You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/10/23 00:05:22 UTC

[01/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/gradle [created] c9cb4fe96


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
deleted file mode 100644
index 6e0323c..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.spell.SuggestMode;
-import org.apache.lucene.search.spell.SuggestWord;
-import org.apache.solr.client.solrj.response.SpellCheckResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.SpellingParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrEventListener;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.search.DocSet;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.spelling.AbstractLuceneSpellChecker;
-import org.apache.solr.spelling.ConjunctionSolrSpellChecker;
-import org.apache.solr.spelling.IndexBasedSpellChecker;
-import org.apache.solr.spelling.QueryConverter;
-import org.apache.solr.spelling.SolrSpellChecker;
-import org.apache.solr.spelling.SpellCheckCollation;
-import org.apache.solr.spelling.SpellCheckCollator;
-import org.apache.solr.spelling.SpellingOptions;
-import org.apache.solr.spelling.SpellingQueryConverter;
-import org.apache.solr.spelling.SpellingResult;
-import org.apache.solr.spelling.Token;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A SearchComponent implementation which provides support for spell checking
- * and suggestions using the Lucene contributed SpellChecker.
- *
- * <p>
- * Refer to http://wiki.apache.org/solr/SpellCheckComponent for more details
- * </p>
- *
- * @since solr 1.3
- */
-public class SpellCheckComponent extends SearchComponent implements SolrCoreAware, SpellingParams {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final boolean DEFAULT_ONLY_MORE_POPULAR = false;
-
-  /**
-   * Base name for all spell checker query parameters. This name is also used to
-   * register this component with SearchHandler.
-   */
-  public static final String COMPONENT_NAME = "spellcheck";
-
-  @SuppressWarnings("unchecked")
-  protected NamedList initParams;
-
-
-  /**
-   * Key is the dictionary, value is the SpellChecker for that dictionary name
-   */
-  protected Map<String, SolrSpellChecker> spellCheckers = new ConcurrentHashMap<>();
-
-  protected QueryConverter queryConverter;
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void init(NamedList args) {
-    super.init(args);
-    this.initParams = args;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void prepare(ResponseBuilder rb) throws IOException {
-
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) {
-      return;
-    }
-    SolrSpellChecker spellChecker = getSpellChecker(params);
-    if (params.getBool(SPELLCHECK_BUILD, false)) {
-      spellChecker.build(rb.req.getCore(), rb.req.getSearcher());
-      rb.rsp.add("command", "build");
-    } else if (params.getBool(SPELLCHECK_RELOAD, false)) {
-      spellChecker.reload(rb.req.getCore(), rb.req.getSearcher());
-      rb.rsp.add("command", "reload");
-    }
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void process(ResponseBuilder rb) throws IOException {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false) || spellCheckers.isEmpty()) {
-      return;
-    }
-    boolean shardRequest = "true".equals(params.get(ShardParams.IS_SHARD));
-    String q = params.get(SPELLCHECK_Q);
-    SolrSpellChecker spellChecker = getSpellChecker(params);
-    Collection<Token> tokens = null;
-
-    if (q != null) {
-      //we have a spell check param, tokenize it with the query analyzer applicable for this spellchecker
-      tokens = getTokens(q, spellChecker.getQueryAnalyzer());
-    } else {
-      q = rb.getQueryString();
-      if (q == null) {
-        q = params.get(CommonParams.Q);
-      }
-      tokens = queryConverter.convert(q);
-    }
-    if (tokens != null && tokens.isEmpty() == false) {
-      if (spellChecker != null) {
-        int count = params.getInt(SPELLCHECK_COUNT, 1);
-        boolean onlyMorePopular = params.getBool(SPELLCHECK_ONLY_MORE_POPULAR, DEFAULT_ONLY_MORE_POPULAR);
-        boolean extendedResults = params.getBool(SPELLCHECK_EXTENDED_RESULTS, false);
-        boolean collate = params.getBool(SPELLCHECK_COLLATE, false);
-        float accuracy = params.getFloat(SPELLCHECK_ACCURACY, Float.MIN_VALUE);
-        int alternativeTermCount = params.getInt(SpellingParams.SPELLCHECK_ALTERNATIVE_TERM_COUNT, 0);
-        //If specified, this can be a discrete # of results, or a percentage of fq results.
-        Integer maxResultsForSuggest = maxResultsForSuggest(rb);
-        
-        ModifiableSolrParams customParams = new ModifiableSolrParams();
-        for (String checkerName : getDictionaryNames(params)) {
-          customParams.add(getCustomParams(checkerName, params));
-        }
-
-        Number hitsLong = (Number) rb.rsp.getToLog().get("hits");
-        long hits = 0;
-        if (hitsLong == null) {
-          hits = rb.getNumberDocumentsFound();
-        } else {
-          hits = hitsLong.longValue();
-        }
-        
-        SpellingResult spellingResult = null;
-        if (maxResultsForSuggest == null || hits <= maxResultsForSuggest) {
-          SuggestMode suggestMode = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX;
-          if (onlyMorePopular) {
-            suggestMode = SuggestMode.SUGGEST_MORE_POPULAR;
-          } else if (alternativeTermCount > 0) {
-            suggestMode = SuggestMode.SUGGEST_ALWAYS;
-          }
-
-          IndexReader reader = rb.req.getSearcher().getIndexReader();
-          SpellingOptions options = new SpellingOptions(tokens, reader, count,
-              alternativeTermCount, suggestMode, extendedResults, accuracy,
-              customParams);
-          spellingResult = spellChecker.getSuggestions(options);
-        } else {
-          spellingResult = new SpellingResult();
-        }
-        boolean isCorrectlySpelled = hits > (maxResultsForSuggest==null ? 0 : maxResultsForSuggest);
-
-        NamedList response = new SimpleOrderedMap();
-        NamedList suggestions = toNamedList(shardRequest, spellingResult, q, extendedResults);
-        response.add("suggestions", suggestions);
-
-        if (extendedResults) {
-          response.add("correctlySpelled", isCorrectlySpelled);
-        }
-        if (collate) {
-          addCollationsToResponse(params, spellingResult, rb, q, response, spellChecker.isSuggestionsMayOverlap());
-        }
-        if (shardRequest) {
-          addOriginalTermsToResponse(response, tokens);
-        }
-
-        rb.rsp.add("spellcheck", response);
-
-      } else {
-        throw new SolrException(SolrException.ErrorCode.NOT_FOUND,
-            "Specified dictionaries do not exist: " + getDictionaryNameAsSingleString(getDictionaryNames(params)));
-      }
-    }
-  }
-  
-  private Integer maxResultsForSuggest(ResponseBuilder rb) {
-    SolrParams params = rb.req.getParams();
-    float maxResultsForSuggestParamValue = params.getFloat(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, 0.0f);
-    Integer maxResultsForSuggest = null;
-    
-    if (maxResultsForSuggestParamValue > 0.0f) {
-      if (maxResultsForSuggestParamValue == (int) maxResultsForSuggestParamValue) {
-        // If a whole number was passed in, this is a discrete number of documents
-        maxResultsForSuggest = (int) maxResultsForSuggestParamValue;
-      } else {
-        // If a fractional value was passed in, this is the % of documents returned by the specified filter
-        // If no specified filter, we use the most restrictive filter of the fq parameters
-        String maxResultsFilterQueryString = params.get(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST_FQ);
-        
-        int maxResultsByFilters = Integer.MAX_VALUE;
-        SolrIndexSearcher searcher = rb.req.getSearcher();
-        
-        try {
-          if (maxResultsFilterQueryString != null) {
-            // Get the default Lucene query parser
-            QParser parser = QParser.getParser(maxResultsFilterQueryString, rb.req);
-            DocSet s = searcher.getDocSet(parser.getQuery());
-            maxResultsByFilters = s.size();
-          } else {
-            List<Query> filters = rb.getFilters();
-
-            // Get the maximum possible hits within these filters (size of most restrictive filter). 
-            if (filters != null) {
-              for (Query query : filters) {
-                DocSet s = searcher.getDocSet(query);
-                if (s != null) {
-                  maxResultsByFilters = Math.min(s.size(), maxResultsByFilters);
-                }
-              }
-            }
-          }
-        } catch (IOException e){
-          log.error(e.toString());
-          return null;
-        } catch (SyntaxError e) {
-          log.error(e.toString());
-          return null;
-        }
-        
-        // Recalculate maxResultsForSuggest if filters were specified
-        if (maxResultsByFilters != Integer.MAX_VALUE) {
-          maxResultsForSuggest = Math.round(maxResultsByFilters * maxResultsForSuggestParamValue);
-        }
-      }
-    }
-    return maxResultsForSuggest;
-  }
-  
-  @SuppressWarnings("unchecked")
-  protected void addCollationsToResponse(SolrParams params, SpellingResult spellingResult, ResponseBuilder rb, String q,
-      NamedList response, boolean suggestionsMayOverlap) {
-    int maxCollations = params.getInt(SPELLCHECK_MAX_COLLATIONS, 1);
-    int maxCollationTries = params.getInt(SPELLCHECK_MAX_COLLATION_TRIES, 0);
-    int maxCollationEvaluations = params.getInt(SPELLCHECK_MAX_COLLATION_EVALUATIONS, 10000);
-    boolean collationExtendedResults = params.getBool(SPELLCHECK_COLLATE_EXTENDED_RESULTS, false);
-    int maxCollationCollectDocs = params.getInt(SPELLCHECK_COLLATE_MAX_COLLECT_DOCS, 0);
-    // If not reporting hits counts, don't bother collecting more than 1 document per try.
-    if (!collationExtendedResults) {
-      maxCollationCollectDocs = 1;
-    }
-    boolean shard = params.getBool(ShardParams.IS_SHARD, false);
-    SpellCheckCollator collator = new SpellCheckCollator()
-        .setMaxCollations(maxCollations)
-        .setMaxCollationTries(maxCollationTries)
-        .setMaxCollationEvaluations(maxCollationEvaluations)
-        .setSuggestionsMayOverlap(suggestionsMayOverlap)
-        .setDocCollectionLimit(maxCollationCollectDocs)
-    ;
-    List<SpellCheckCollation> collations = collator.collate(spellingResult, q, rb);
-    //by sorting here we guarantee a non-distributed request returns all 
-    //results in the same order as a distributed request would,
-    //even in cases when the internal rank is the same.
-    Collections.sort(collations);
-
-    NamedList collationList = new NamedList();
-    for (SpellCheckCollation collation : collations) {
-      if (collationExtendedResults) {
-        NamedList extendedResult = new SimpleOrderedMap();
-        extendedResult.add("collationQuery", collation.getCollationQuery());
-        extendedResult.add("hits", collation.getHits());
-        extendedResult.add("misspellingsAndCorrections", collation.getMisspellingsAndCorrections());
-        if(maxCollationTries>0 && shard)
-        {
-          extendedResult.add("collationInternalRank", collation.getInternalRank());
-        }
-        collationList.add("collation", extendedResult);
-      } else {
-        collationList.add("collation", collation.getCollationQuery());
-        if (maxCollationTries>0 && shard) {
-          collationList.add("collationInternalRank", collation.getInternalRank());
-        }
-      }
-    }
-    response.add("collations", collationList);
-  }
-
-  private void addOriginalTermsToResponse(NamedList response, Collection<Token> originalTerms) {
-    List<String> originalTermStr = new ArrayList<String>();
-    for(Token t : originalTerms) {
-      originalTermStr.add(t.toString());
-    }
-    response.add("originalTerms", originalTermStr);
-  }
-
-  /**
-   * For every param that is of the form "spellcheck.[dictionary name].XXXX=YYYY, add
-   * XXXX=YYYY as a param to the custom param list
-   * @param params The original SolrParams
-   * @return The new Params
-   */
-  protected SolrParams getCustomParams(String dictionary, SolrParams params) {
-    ModifiableSolrParams result = new ModifiableSolrParams();
-    Iterator<String> iter = params.getParameterNamesIterator();
-    String prefix = SpellingParams.SPELLCHECK_PREFIX + dictionary + ".";
-    while (iter.hasNext()) {
-      String nxt = iter.next();
-      if (nxt.startsWith(prefix)) {
-        result.add(nxt.substring(prefix.length()), params.getParams(nxt));
-      }
-    }
-    return result;
-  }
-
-
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) return;
-    int purpose = rb.grouping() ? ShardRequest.PURPOSE_GET_TOP_GROUPS : ShardRequest.PURPOSE_GET_TOP_IDS;
-    if ((sreq.purpose & purpose) != 0) {
-      // fetch at least 5 suggestions from each shard
-      int count = sreq.params.getInt(SPELLCHECK_COUNT, 1);
-      if (count < 5)  count = 5;
-      sreq.params.set(SPELLCHECK_COUNT, count);
-      sreq.params.set("spellcheck", "true");
-    } else  {
-      sreq.params.set("spellcheck", "false");
-    }
-  }
-
-  @Override
-  @SuppressWarnings({"unchecked", "deprecation"})
-  public void finishStage(ResponseBuilder rb) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false) || rb.stage != ResponseBuilder.STAGE_GET_FIELDS)
-      return;
-
-    boolean extendedResults = params.getBool(SPELLCHECK_EXTENDED_RESULTS, false);
-    boolean collate = params.getBool(SPELLCHECK_COLLATE, false);
-    boolean collationExtendedResults = params.getBool(SPELLCHECK_COLLATE_EXTENDED_RESULTS, false);
-    int maxCollationTries = params.getInt(SPELLCHECK_MAX_COLLATION_TRIES, 0);
-    int maxCollations = params.getInt(SPELLCHECK_MAX_COLLATIONS, 1);
-    Integer maxResultsForSuggest = maxResultsForSuggest(rb);
-    int count = rb.req.getParams().getInt(SPELLCHECK_COUNT, 1);
-    int numSug = Math.max(count, AbstractLuceneSpellChecker.DEFAULT_SUGGESTION_COUNT);
-
-    String origQuery = params.get(SPELLCHECK_Q);
-    if (origQuery == null) {
-      origQuery = rb.getQueryString();
-      if (origQuery == null) {
-        origQuery = params.get(CommonParams.Q);
-      }
-    }
-    
-    long hits = rb.grouping() ? rb.totalHitCount : rb.getNumberDocumentsFound();
-    boolean isCorrectlySpelled = hits > (maxResultsForSuggest==null ? 0 : maxResultsForSuggest);
-
-    SpellCheckMergeData mergeData = new SpellCheckMergeData();
-    if (maxResultsForSuggest==null || !isCorrectlySpelled) {
-      for (ShardRequest sreq : rb.finished) {
-        for (ShardResponse srsp : sreq.responses) {
-          NamedList nl = null;
-          try {
-            nl = (NamedList) srsp.getSolrResponse().getResponse().get("spellcheck");
-          } catch (Exception e) {
-            if (ShardParams.getShardsTolerantAsBool(rb.req.getParams())) {
-              continue; // looks like a shard did not return anything
-            }
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                "Unable to read spelling info for shard: " + srsp.getShard(), e);
-          }
-          log.info(srsp.getShard() + " " + nl);
-          if (nl != null) {
-            mergeData.totalNumberShardResponses++;
-            collectShardSuggestions(nl, mergeData);
-            collectShardCollations(mergeData, nl, maxCollationTries);
-          }
-        }
-      }
-    }
-
-    // all shard responses have been collected
-    // create token and get top suggestions
-    SolrSpellChecker checker = getSpellChecker(rb.req.getParams());
-    SpellingResult result = checker.mergeSuggestions(mergeData, numSug, count, extendedResults);
-
-    NamedList response = new SimpleOrderedMap();
-
-    NamedList suggestions = toNamedList(false, result, origQuery, extendedResults);
-    response.add("suggestions", suggestions);
-
-    if (extendedResults) {
-      response.add("correctlySpelled", isCorrectlySpelled);
-    }
-
-    if (collate) {
-      SpellCheckCollation[] sortedCollations = mergeData.collations.values()
-          .toArray(new SpellCheckCollation[mergeData.collations.size()]);
-      Arrays.sort(sortedCollations);
-
-      NamedList collations = new NamedList();
-      int i = 0;
-      while (i < maxCollations && i < sortedCollations.length) {
-        SpellCheckCollation collation = sortedCollations[i];
-        i++;
-        if (collationExtendedResults) {
-          SimpleOrderedMap extendedResult = new SimpleOrderedMap();
-          extendedResult.add("collationQuery", collation.getCollationQuery());
-          extendedResult.add("hits", collation.getHits());
-          extendedResult.add("misspellingsAndCorrections", collation
-              .getMisspellingsAndCorrections());
-          collations.add("collation", extendedResult);
-        } else {
-          collations.add("collation", collation.getCollationQuery());
-        }
-      }
-
-      response.add("collations", collations);
-    }
-
-    rb.rsp.add("spellcheck", response);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void collectShardSuggestions(NamedList nl, SpellCheckMergeData mergeData) {
-    SpellCheckResponse spellCheckResp = new SpellCheckResponse(nl);
-    Iterable<Object> originalTermStrings = (Iterable<Object>) nl.get("originalTerms");
-    if(originalTermStrings!=null) {
-      mergeData.originalTerms = new HashSet<>();
-      for (Object originalTermObj : originalTermStrings) {
-        mergeData.originalTerms.add(originalTermObj.toString());
-      }
-    }
-    for (SpellCheckResponse.Suggestion suggestion : spellCheckResp.getSuggestions()) {
-      mergeData.origVsSuggestion.put(suggestion.getToken(), suggestion);
-      HashSet<String> suggested = mergeData.origVsSuggested.get(suggestion.getToken());
-      if (suggested == null) {
-        suggested = new HashSet<>();
-        mergeData.origVsSuggested.put(suggestion.getToken(), suggested);
-      }
-
-      // sum up original frequency          
-      int origFreq = 0;
-      Integer o = mergeData.origVsFreq.get(suggestion.getToken());
-      if (o != null)  origFreq += o;
-      origFreq += suggestion.getOriginalFrequency();
-      mergeData.origVsFreq.put(suggestion.getToken(), origFreq);
-
-      //# shards reporting
-      Integer origShards = mergeData.origVsShards.get(suggestion.getToken());
-      if(origShards==null) {
-        mergeData.origVsShards.put(suggestion.getToken(), 1);
-      } else {
-        mergeData.origVsShards.put(suggestion.getToken(), ++origShards);
-      }
-
-      // find best suggestions
-      for (int i = 0; i < suggestion.getNumFound(); i++) {
-        String alternative = suggestion.getAlternatives().get(i);
-        suggested.add(alternative);
-        SuggestWord sug = mergeData.suggestedVsWord.get(alternative);
-        if (sug == null)  {
-          sug = new SuggestWord();
-          mergeData.suggestedVsWord.put(alternative, sug);
-        }
-        sug.string = alternative;
-        // alternative frequency is present only for extendedResults=true
-        if (suggestion.getAlternativeFrequencies() != null
-            && suggestion.getAlternativeFrequencies().size() > 0) {
-          Integer freq = suggestion.getAlternativeFrequencies().get(i);
-          if (freq != null) sug.freq += freq;
-        }
-      }
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private void collectShardCollations(SpellCheckMergeData mergeData, NamedList spellCheckResponse, int maxCollationTries) {
-    Map<String, SpellCheckCollation> collations = mergeData.collations;
-    NamedList collationHolder = (NamedList) spellCheckResponse.get("collations");
-    if(collationHolder != null) {
-      List<Object> collationList = collationHolder.getAll("collation");
-      List<Object> collationRankList = collationHolder.getAll("collationInternalRank");
-      int i=0;
-      if(collationList != null) {
-        for(Object o : collationList)
-        {
-          if(o instanceof String)
-          {
-            SpellCheckCollation coll = new SpellCheckCollation();
-            coll.setCollationQuery((String) o);
-            if(collationRankList!= null && collationRankList.size()>0)
-            {
-              coll.setInternalRank((Integer) collationRankList.get(i));
-              i++;
-            }
-            SpellCheckCollation priorColl = collations.get(coll.getCollationQuery());
-            if(priorColl != null)
-            {
-              coll.setInternalRank(Math.max(coll.getInternalRank(),priorColl.getInternalRank()));
-            }
-            collations.put(coll.getCollationQuery(), coll);
-          } else
-          {
-            NamedList expandedCollation = (NamedList) o;
-            SpellCheckCollation coll = new SpellCheckCollation();
-            coll.setCollationQuery((String) expandedCollation.get("collationQuery"));
-            coll.setHits(((Number) expandedCollation.get("hits")).longValue());
-            if(maxCollationTries>0)
-            {
-              coll.setInternalRank((Integer) expandedCollation.get("collationInternalRank"));
-            }
-            coll.setMisspellingsAndCorrections((NamedList) expandedCollation.get("misspellingsAndCorrections"));
-            SpellCheckCollation priorColl = collations.get(coll.getCollationQuery());
-            if(priorColl != null)
-            {
-              coll.setHits(coll.getHits() + priorColl.getHits());
-              coll.setInternalRank(Math.max(coll.getInternalRank(),priorColl.getInternalRank()));
-            }
-            collations.put(coll.getCollationQuery(), coll);
-          }
-        }
-      }
-    }
-  }
-
-  private Collection<Token> getTokens(String q, Analyzer analyzer) throws IOException {
-    Collection<Token> result = new ArrayList<>();
-    assert analyzer != null;
-    try (TokenStream ts = analyzer.tokenStream("", q)) {
-      ts.reset();
-      // TODO: support custom attributes
-      CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
-      OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
-      TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
-      FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
-      PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
-      PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
-
-      while (ts.incrementToken()){
-        Token token = new Token();
-        token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
-        token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
-        token.setType(typeAtt.type());
-        token.setFlags(flagsAtt.getFlags());
-        token.setPayload(payloadAtt.getPayload());
-        token.setPositionIncrement(posIncAtt.getPositionIncrement());
-        result.add(token);
-      }
-      ts.end();
-      return result;
-    }
-  }
-
-  protected SolrSpellChecker getSpellChecker(SolrParams params) {
-    String[] dictName = getDictionaryNames(params);
-    if (dictName.length == 1) {
-      return spellCheckers.get(dictName[0]);
-    } else {
-      String singleStr = getDictionaryNameAsSingleString(dictName);
-      SolrSpellChecker ssc = spellCheckers.get(singleStr);
-      if (ssc == null) {
-        ConjunctionSolrSpellChecker cssc = new ConjunctionSolrSpellChecker();
-        for (String dn : dictName) {
-          cssc.addChecker(spellCheckers.get(dn));
-        }
-        ssc = cssc;
-      }
-      return ssc;
-    }
-  }
-
-  private String getDictionaryNameAsSingleString(String[] dictName) {
-    StringBuilder sb = new StringBuilder();
-    for (String dn : dictName) {
-      if (sb.length() > 0) {
-        sb.append(" ");
-      }
-      sb.append(dn);
-    }
-    return sb.toString();
-  }
-
-  private String[] getDictionaryNames(SolrParams params) {
-    String[] dictName = params.getParams(SPELLCHECK_DICT);
-    if (dictName == null) {
-      return new String[] {SolrSpellChecker.DEFAULT_DICTIONARY_NAME};
-    }
-    return dictName;
-  }
-
-  /**
-   * @return the spellchecker registered to a given name
-   */
-  public SolrSpellChecker getSpellChecker(String name) {
-    return spellCheckers.get(name);
-  }
-
-  protected NamedList toNamedList(boolean shardRequest,
-      SpellingResult spellingResult, String origQuery, boolean extendedResults) {
-    NamedList result = new NamedList();
-    Map<Token,LinkedHashMap<String,Integer>> suggestions = spellingResult
-        .getSuggestions();
-    boolean hasFreqInfo = spellingResult.hasTokenFrequencyInfo();
-    boolean hasSuggestions = false;
-    boolean hasZeroFrequencyToken = false;
-    for (Map.Entry<Token,LinkedHashMap<String,Integer>> entry : suggestions
-        .entrySet()) {
-      Token inputToken = entry.getKey();
-      String tokenString = new String(inputToken.buffer(), 0, inputToken
-          .length());
-      Map<String,Integer> theSuggestions = new LinkedHashMap<>(
-          entry.getValue());
-      Iterator<String> sugIter = theSuggestions.keySet().iterator();
-      while (sugIter.hasNext()) {
-        String sug = sugIter.next();
-        if (sug.equals(tokenString)) {
-          sugIter.remove();
-        }
-      }
-      if (theSuggestions.size() > 0) {
-        hasSuggestions = true;
-      }
-      if (theSuggestions != null && (theSuggestions.size() > 0 || shardRequest)) {
-        SimpleOrderedMap suggestionList = new SimpleOrderedMap();
-        suggestionList.add("numFound", theSuggestions.size());
-        suggestionList.add("startOffset", inputToken.startOffset());
-        suggestionList.add("endOffset", inputToken.endOffset());
-
-        // Logical structure of normal (non-extended) results:
-        // "suggestion":["alt1","alt2"]
-        //
-        // Logical structure of the extended results:
-        // "suggestion":[
-        // {"word":"alt1","freq":7},
-        // {"word":"alt2","freq":4}
-        // ]
-        if (extendedResults && hasFreqInfo) {
-          suggestionList.add("origFreq", spellingResult
-              .getTokenFrequency(inputToken));
-
-          ArrayList<SimpleOrderedMap> sugs = new ArrayList<>();
-          suggestionList.add("suggestion", sugs);
-          for (Map.Entry<String,Integer> suggEntry : theSuggestions.entrySet()) {
-            SimpleOrderedMap sugEntry = new SimpleOrderedMap();
-            sugEntry.add("word", suggEntry.getKey());
-            sugEntry.add("freq", suggEntry.getValue());
-            sugs.add(sugEntry);
-          }
-        } else {
-          suggestionList.add("suggestion", theSuggestions.keySet());
-        }
-
-        if (hasFreqInfo) {
-          Integer tokenFrequency = spellingResult.getTokenFrequency(inputToken);
-          if (tokenFrequency==null || tokenFrequency == 0) {
-            hasZeroFrequencyToken = true;
-          }
-        }
-        result.add(tokenString, suggestionList);
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    if (initParams != null) {
-      log.info("Initializing spell checkers");
-      boolean hasDefault = false;
-      for (int i = 0; i < initParams.size(); i++) {
-        if (initParams.getName(i).equals("spellchecker")) {
-          Object cfg = initParams.getVal(i);
-          if (cfg instanceof NamedList) {
-            addSpellChecker(core, hasDefault, (NamedList) cfg);
-          } else if (cfg instanceof Map) {
-            addSpellChecker(core, hasDefault, new NamedList((Map) cfg));
-          } else if (cfg instanceof List) {
-            for (Object o : (List) cfg) {
-              if (o instanceof Map) {
-                addSpellChecker(core, hasDefault, new NamedList((Map) o));
-              }
-            }
-          }
-        }
-      }
-
-      Map<String, QueryConverter> queryConverters = new HashMap<>();
-      core.initPlugins(queryConverters,QueryConverter.class);
-
-      //ensure that there is at least one query converter defined
-      if (queryConverters.size() == 0) {
-        log.trace("No queryConverter defined, using default converter");
-        queryConverters.put("queryConverter", new SpellingQueryConverter());
-      }
-
-      //there should only be one
-      if (queryConverters.size() == 1) {
-        queryConverter = queryConverters.values().iterator().next();
-        IndexSchema schema = core.getLatestSchema();
-        String fieldTypeName = (String) initParams.get("queryAnalyzerFieldType");
-        FieldType fieldType = schema.getFieldTypes().get(fieldTypeName);
-        Analyzer analyzer = fieldType == null ? new WhitespaceAnalyzer()
-                : fieldType.getQueryAnalyzer();
-        //TODO: There's got to be a better way!  Where's Spring when you need it?
-        queryConverter.setAnalyzer(analyzer);
-      }
-    }
-  }
-
-  private boolean addSpellChecker(SolrCore core, boolean hasDefault, NamedList spellchecker) {
-    String className = (String) spellchecker.get("classname");
-    if (className == null) className = (String) spellchecker.get("class");
-    // TODO: this is a little bit sneaky: warn if class isnt supplied
-    // so that it's mandatory in a future release?
-    if (className == null)
-      className = IndexBasedSpellChecker.class.getName();
-    SolrResourceLoader loader = core.getResourceLoader();
-    SolrSpellChecker checker = loader.newInstance(className, SolrSpellChecker.class);
-    if (checker != null) {
-      String dictionary = checker.init(spellchecker, core);
-      if (dictionary != null) {
-        boolean isDefault = dictionary.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME);
-        if (isDefault && !hasDefault) {
-          hasDefault = true;
-        } else if (isDefault && hasDefault) {
-          throw new RuntimeException("More than one dictionary is missing name.");
-        }
-        spellCheckers.put(dictionary, checker);
-      } else {
-        if (!hasDefault) {
-          spellCheckers.put(SolrSpellChecker.DEFAULT_DICTIONARY_NAME, checker);
-          hasDefault = true;
-        } else {
-          throw new RuntimeException("More than one dictionary is missing name.");
-        }
-      }
-      // Register event listeners for this SpellChecker
-      core.registerFirstSearcherListener(new SpellCheckerListener(core, checker, false, false));
-      boolean buildOnCommit = Boolean.parseBoolean((String) spellchecker.get("buildOnCommit"));
-      boolean buildOnOptimize = Boolean.parseBoolean((String) spellchecker.get("buildOnOptimize"));
-      if (buildOnCommit || buildOnOptimize) {
-        log.info("Registering newSearcher listener for spellchecker: " + checker.getDictionaryName());
-        core.registerNewSearcherListener(new SpellCheckerListener(core, checker, buildOnCommit, buildOnOptimize));
-      }
-    } else {
-      throw new RuntimeException("Can't load spell checker: " + className);
-    }
-    return hasDefault;
-  }
-
-  private static class SpellCheckerListener implements SolrEventListener {
-    private final SolrCore core;
-    private final SolrSpellChecker checker;
-    private final boolean buildOnCommit;
-    private final boolean buildOnOptimize;
-
-    public SpellCheckerListener(SolrCore core, SolrSpellChecker checker, boolean buildOnCommit, boolean buildOnOptimize) {
-      this.core = core;
-      this.checker = checker;
-      this.buildOnCommit = buildOnCommit;
-      this.buildOnOptimize = buildOnOptimize;
-    }
-
-    @Override
-    public void init(NamedList args) {
-    }
-
-    @Override
-    public void newSearcher(SolrIndexSearcher newSearcher,
-                            SolrIndexSearcher currentSearcher) {
-      if (currentSearcher == null) {
-        // firstSearcher event
-        try {
-          log.info("Loading spell index for spellchecker: "
-                  + checker.getDictionaryName());
-          checker.reload(core, newSearcher);
-        } catch (IOException e) {
-          log.error( "Exception in reloading spell check index for spellchecker: " + checker.getDictionaryName(), e);
-        }
-      } else {
-        // newSearcher event
-        if (buildOnCommit)  {
-          buildSpellIndex(newSearcher);
-        } else if (buildOnOptimize) {
-          if (newSearcher.getIndexReader().leaves().size() == 1)  {
-            buildSpellIndex(newSearcher);
-          } else  {
-            log.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName());
-          }
-        }
-      }
-
-    }
-
-    private void buildSpellIndex(SolrIndexSearcher newSearcher) {
-      try {
-        log.info("Building spell index for spell checker: " + checker.getDictionaryName());
-        checker.build(core, newSearcher);
-      } catch (Exception e) {
-        log.error(
-                "Exception in building spell check index for spellchecker: " + checker.getDictionaryName(), e);
-      }
-    }
-
-    @Override
-    public void postCommit() {
-    }
-
-    @Override
-    public void postSoftCommit() {
-    }
-  }
-
-  public Map<String, SolrSpellChecker> getSpellCheckers() {
-    return Collections.unmodifiableMap(spellCheckers);
-  }
-
-  // ///////////////////////////////////////////
-  // / SolrInfoBean
-  // //////////////////////////////////////////
-
-  @Override
-  public String getDescription() {
-    return "A Spell Checker component";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.SPELLCHECKER;
-  }
-}


[10/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
deleted file mode 100644
index 0616ac8..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
+++ /dev/null
@@ -1,857 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStreamWriter;
-import java.io.Reader;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.net.URLEncoder;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.OnReconnect;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.JSONResponseWriter;
-import org.apache.solr.response.RawResponseWriter;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.util.SimplePostTool.BAOS;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.server.ByteBufferInputStream;
-import org.noggit.CharArr;
-import org.noggit.JSONWriter;
-import org.noggit.ObjectBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.OMIT_HEADER;
-import static org.apache.solr.common.params.CommonParams.PATH;
-import static org.apache.solr.common.params.CommonParams.WT;
-
-
-/**
- * Zookeeper Info
- *
- * @since solr 4.0
- */
-public final class ZookeeperInfoHandler extends RequestHandlerBase {
-  private final CoreContainer cores;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  // used for custom sorting collection names looking like prefix##
-  // only go out to 7 digits (which safely fits in an int)
-  private static final Pattern endsWithDigits = Pattern.compile("^(\\D*)(\\d{1,7}?)$");
-
-  public ZookeeperInfoHandler(CoreContainer cc) {
-    this.cores = cc;
-  }
-
-
-  @Override
-  public String getDescription() {
-    return "Fetch Zookeeper contents";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  /**
-   * Enumeration of ways to filter collections on the graph panel.
-   */
-  static enum FilterType {
-    none, name, status
-  }
-
-  /**
-   * Holds state of a single page of collections requested from the cloud panel.
-   */
-  static final class PageOfCollections {
-    List<String> selected;
-    int numFound = 0; // total number of matches (across all pages)
-    int start = 0;
-    int rows = -1;
-    FilterType filterType;
-    String filter;
-
-    PageOfCollections(int start, int rows, FilterType filterType, String filter) {
-      this.start = start;
-      this.rows = rows;
-      this.filterType = filterType;
-      this.filter = filter;
-    }
-
-    void selectPage(List<String> collections) {
-      numFound = collections.size();
-      // start with full set and then find the sublist for the desired selected
-      selected = collections;
-
-      if (rows > 0) { // paging desired
-        if (start > numFound)
-          start = 0; // this might happen if they applied a new filter
-
-        int lastIndex = Math.min(start + rows, numFound);
-        if (start > 0 || lastIndex < numFound)
-          selected = collections.subList(start, lastIndex);
-      }
-    }
-
-    /**
-     * Filters a list of collections by name if applicable.
-     */
-    List<String> applyNameFilter(List<String> collections) {
-
-      if (filterType != FilterType.name || filter == null)
-        return collections; // name filter doesn't apply
-
-      // typically, a user will type a prefix and then *, e.g. tj*
-      // when they really mean tj.*
-      String regexFilter = (!filter.endsWith(".*") && filter.endsWith("*"))
-          ? filter.substring(0, filter.length() - 1) + ".*" : filter;
-
-      // case-insensitive
-      if (!regexFilter.startsWith("(?i)"))
-        regexFilter = "(?i)" + regexFilter;
-
-      Pattern filterRegex = Pattern.compile(regexFilter);
-      List<String> filtered = new ArrayList<String>();
-      for (String next : collections) {
-        if (matches(filterRegex, next))
-          filtered.add(next);
-      }
-
-      return filtered;
-    }
-
-    /**
-     * Walk the collection state JSON object to see if it has any replicas that match
-     * the state the user is filtering by.
-     */
-    @SuppressWarnings("unchecked")
-    final boolean matchesStatusFilter(Map<String, Object> collectionState, Set<String> liveNodes) {
-
-      if (filterType != FilterType.status || filter == null || filter.length() == 0)
-        return true; // no status filter, so all match
-
-      boolean isHealthy = true; // means all replicas for all shards active
-      boolean hasDownedShard = false; // means one or more shards is down
-      boolean replicaInRecovery = false;
-
-      Map<String, Object> shards = (Map<String, Object>) collectionState.get("shards");
-      for (String shardId : shards.keySet()) {
-        boolean hasActive = false;
-        Map<String, Object> shard = (Map<String, Object>) shards.get(shardId);
-        Map<String, Object> replicas = (Map<String, Object>) shard.get("replicas");
-        for (String replicaId : replicas.keySet()) {
-          Map<String, Object> replicaState = (Map<String, Object>) replicas.get(replicaId);
-          Replica.State coreState = Replica.State.getState((String) replicaState.get(ZkStateReader.STATE_PROP));
-          String nodeName = (String) replicaState.get("node_name");
-
-          // state can lie to you if the node is offline, so need to reconcile with live_nodes too
-          if (!liveNodes.contains(nodeName))
-            coreState = Replica.State.DOWN; // not on a live node, so must be down
-
-          if (coreState == Replica.State.ACTIVE) {
-            hasActive = true; // assumed no replicas active and found one that is for this shard
-          } else {
-            if (coreState == Replica.State.RECOVERING) {
-              replicaInRecovery = true;
-            }
-            isHealthy = false; // assumed healthy and found one replica that is not
-          }
-        }
-
-        if (!hasActive)
-          hasDownedShard = true; // this is bad
-      }
-
-      if ("healthy".equals(filter)) {
-        return isHealthy;
-      } else if ("degraded".equals(filter)) {
-        return !hasDownedShard && !isHealthy; // means no shards offline but not 100% healthy either
-      } else if ("downed_shard".equals(filter)) {
-        return hasDownedShard;
-      } else if (Replica.State.getState(filter) == Replica.State.RECOVERING) {
-        return !isHealthy && replicaInRecovery;
-      }
-
-      return true;
-    }
-
-    final boolean matches(final Pattern filter, final String collName) {
-      return filter.matcher(collName).matches();
-    }
-
-    String getPagingHeader() {
-      return start + "|" + rows + "|" + numFound + "|" + (filterType != null ? filterType.toString() : "") + "|" + (filter != null ? filter : "");
-    }
-
-    public String toString() {
-      return getPagingHeader();
-    }
-
-  }
-
-  /**
-   * Supports paged navigation of collections on the cloud panel. To avoid serving
-   * stale collection data, this object watches the /collections znode, which will
-   * change if a collection is added or removed.
-   */
-  static final class PagedCollectionSupport implements Watcher, Comparator<String>, OnReconnect {
-
-    // this is the full merged list of collections from ZooKeeper
-    private List<String> cachedCollections;
-
-    /**
-     * If the list of collections changes, mark the cache as stale.
-     */
-    @Override
-    public void process(WatchedEvent event) {
-      // session events are not change events, and do not remove the watcher
-      if (Event.EventType.None.equals(event.getType())) {
-        return;
-      }
-      synchronized (this) {
-        cachedCollections = null;
-      }
-    }
-
-    /**
-     * Create a merged view of all collections (internal from /clusterstate.json and external from /collections/?/state.json
-     */
-    private synchronized List<String> getCollections(SolrZkClient zkClient) throws KeeperException, InterruptedException {
-      if (cachedCollections == null) {
-        // cache is stale, rebuild the full list ...
-        cachedCollections = new ArrayList<String>();
-
-        List<String> fromZk = zkClient.getChildren("/collections", this, true);
-        if (fromZk != null)
-          cachedCollections.addAll(fromZk);
-
-        // sort the final merged set of collections
-        Collections.sort(cachedCollections, this);
-      }
-
-      return cachedCollections;
-    }
-
-    /**
-     * Gets the requested page of collections after applying filters and offsets.
-     */
-    public PageOfCollections fetchPage(PageOfCollections page, SolrZkClient zkClient)
-        throws KeeperException, InterruptedException {
-
-
-      List<String> children = getCollections(zkClient);
-      page.selected = children; // start with the page being the full list
-
-      // activate paging (if disabled) for large collection sets
-      if (page.start == 0 && page.rows == -1 && page.filter == null && children.size() > 10) {
-        page.rows = 20;
-        page.start = 0;
-      }
-
-      // apply the name filter if supplied (we don't need to pull state
-      // data from ZK to do name filtering
-      if (page.filterType == FilterType.name && page.filter != null)
-        children = page.applyNameFilter(children);
-
-      // a little hacky ... we can't select the page when filtering by
-      // status until reading all status objects from ZK
-      if (page.filterType != FilterType.status)
-        page.selectPage(children);
-
-      return page;
-    }
-
-    @Override
-    public int compare(String left, String right) {
-      if (left == null)
-        return -1;
-
-      if (left.equals(right))
-        return 0;
-
-      // sort lexically unless the two collection names start with the same base prefix
-      // and end in a number (which is a common enough naming scheme to have direct 
-      // support for it)
-      Matcher leftMatcher = endsWithDigits.matcher(left);
-      if (leftMatcher.matches()) {
-        Matcher rightMatcher = endsWithDigits.matcher(right);
-        if (rightMatcher.matches()) {
-          String leftGroup1 = leftMatcher.group(1);
-          String rightGroup1 = rightMatcher.group(1);
-          if (leftGroup1.equals(rightGroup1)) {
-            // both start with the same prefix ... compare indexes
-            // using longs here as we don't know how long the 2nd group is
-            int leftGroup2 = Integer.parseInt(leftMatcher.group(2));
-            int rightGroup2 = Integer.parseInt(rightMatcher.group(2));
-            return (leftGroup2 > rightGroup2) ? 1 : ((leftGroup2 == rightGroup2) ? 0 : -1);
-          }
-        }
-      }
-      return left.compareTo(right);
-    }
-
-    /**
-     * Called after a ZooKeeper session expiration occurs
-     */
-    @Override
-    public void command() {
-      // we need to re-establish the watcher on the collections list after session expires
-      synchronized (this) {
-        cachedCollections = null;
-      }
-    }
-  }
-
-  private PagedCollectionSupport pagingSupport;
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    final SolrParams params = req.getParams();
-    Map<String, String> map = new HashMap<>(1);
-    map.put(WT, "raw");
-    map.put(OMIT_HEADER, "true");
-    req.setParams(SolrParams.wrapDefaults(new MapSolrParams(map), params));
-    synchronized (this) {
-      if (pagingSupport == null) {
-        pagingSupport = new PagedCollectionSupport();
-        ZkController zkController = cores.getZkController();
-        if (zkController != null) {
-          // get notified when the ZK session expires (so we can clear the cached collections and rebuild)
-          zkController.addOnReconnectListener(pagingSupport);
-        }
-      }
-    }
-
-    String path = params.get(PATH);
-    String addr = params.get("addr");
-
-    if (addr != null && addr.length() == 0) {
-      addr = null;
-    }
-
-    String detailS = params.get("detail");
-    boolean detail = detailS != null && detailS.equals("true");
-
-    String dumpS = params.get("dump");
-    boolean dump = dumpS != null && dumpS.equals("true");
-
-    int start = params.getInt("start", 0);
-    int rows = params.getInt("rows", -1);
-
-    String filterType = params.get("filterType");
-    if (filterType != null) {
-      filterType = filterType.trim().toLowerCase(Locale.ROOT);
-      if (filterType.length() == 0)
-        filterType = null;
-    }
-    FilterType type = (filterType != null) ? FilterType.valueOf(filterType) : FilterType.none;
-
-    String filter = (type != FilterType.none) ? params.get("filter") : null;
-    if (filter != null) {
-      filter = filter.trim();
-      if (filter.length() == 0)
-        filter = null;
-    }
-
-    ZKPrinter printer = new ZKPrinter(cores.getZkController(), addr);
-    printer.detail = detail;
-    printer.dump = dump;
-    boolean isGraphView = "graph".equals(params.get("view"));
-    printer.page = (isGraphView && "/clusterstate.json".equals(path))
-        ? new PageOfCollections(start, rows, type, filter) : null;
-    printer.pagingSupport = pagingSupport;
-
-    try {
-      printer.print(path);
-    } finally {
-      printer.close();
-    }
-    rsp.getValues().add(RawResponseWriter.CONTENT,printer);
-  }
-
-  //--------------------------------------------------------------------------------------
-  //
-  //--------------------------------------------------------------------------------------
-
-  static class ZKPrinter implements ContentStream {
-    static boolean FULLPATH_DEFAULT = false;
-
-    boolean indent = true;
-    boolean fullpath = FULLPATH_DEFAULT;
-    boolean detail = false;
-    boolean dump = false;
-
-    String addr; // the address passed to us
-    String keeperAddr; // the address we're connected to
-
-    boolean doClose;  // close the client after done if we opened it
-
-    final BAOS baos = new BAOS();
-    final Writer out = new OutputStreamWriter(baos,  StandardCharsets.UTF_8);
-    SolrZkClient zkClient;
-
-    int level;
-    int maxData = 95;
-
-    PageOfCollections page;
-    PagedCollectionSupport pagingSupport;
-    ZkController zkController;
-
-    public ZKPrinter(ZkController controller, String addr) throws IOException {
-      this.zkController = controller;
-      this.addr = addr;
-
-      if (addr == null) {
-        if (controller != null) {
-          // this core is zk enabled
-          keeperAddr = controller.getZkServerAddress();
-          zkClient = controller.getZkClient();
-          if (zkClient != null && zkClient.isConnected()) {
-            return;
-          } else {
-            // try a different client with this address
-            addr = keeperAddr;
-          }
-        }
-      }
-
-      keeperAddr = addr;
-      if (addr == null) {
-        writeError(404, "Zookeeper is not configured for this Solr Core. Please try connecting to an alternate zookeeper address.");
-        return;
-      }
-
-      try {
-        zkClient = new SolrZkClient(addr, 10000);
-        doClose = true;
-      } catch (Exception e) {
-        writeError(503, "Could not connect to zookeeper at '" + addr + "'\"");
-        zkClient = null;
-        return;
-      }
-
-    }
-
-    public void close() {
-      if (doClose) {
-        zkClient.close();
-      }
-      try {
-        out.flush();
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    // main entry point
-    void print(String path) throws IOException {
-      if (zkClient == null) {
-        return;
-      }
-
-      // normalize path
-      if (path == null) {
-        path = "/";
-      } else {
-        path = path.trim();
-        if (path.length() == 0) {
-          path = "/";
-        }
-      }
-
-      if (path.endsWith("/") && path.length() > 1) {
-        path = path.substring(0, path.length() - 1);
-      }
-
-      int idx = path.lastIndexOf('/');
-      String parent = idx >= 0 ? path.substring(0, idx) : path;
-      if (parent.length() == 0) {
-        parent = "/";
-      }
-
-      CharArr chars = new CharArr();
-      JSONWriter json = new JSONWriter(chars, 2);
-      json.startObject();
-
-      if (detail) {
-        if (!printZnode(json, path)) {
-          return;
-        }
-        json.writeValueSeparator();
-      }
-
-      json.writeString("tree");
-      json.writeNameSeparator();
-      json.startArray();
-      if (!printTree(json, path)) {
-        return; // there was an error
-      }
-      json.endArray();
-      json.endObject();
-      out.write(chars.toString());
-    }
-
-    void writeError(int code, String msg) throws IOException {
-      throw new SolrException(ErrorCode.getErrorCode(code), msg);
-      /*response.setStatus(code);
-
-      CharArr chars = new CharArr();
-      JSONWriter w = new JSONWriter(chars, 2);
-      w.startObject();
-      w.indent();
-      w.writeString("status");
-      w.writeNameSeparator();
-      w.write(code);
-      w.writeValueSeparator();
-      w.indent();
-      w.writeString("error");
-      w.writeNameSeparator();
-      w.writeString(msg);
-      w.endObject();
-
-      out.write(chars.toString());*/
-    }
-
-
-    boolean printTree(JSONWriter json, String path) throws IOException {
-      String label = path;
-      if (!fullpath) {
-        int idx = path.lastIndexOf('/');
-        label = idx > 0 ? path.substring(idx + 1) : path;
-      }
-      json.startObject();
-      //writeKeyValue(json, "data", label, true );
-      json.writeString("data");
-      json.writeNameSeparator();
-
-      json.startObject();
-      writeKeyValue(json, "title", label, true);
-      json.writeValueSeparator();
-      json.writeString("attr");
-      json.writeNameSeparator();
-      json.startObject();
-      writeKeyValue(json, "href", "admin/zookeeper?detail=true&path=" + URLEncoder.encode(path, "UTF-8"), true);
-      json.endObject();
-      json.endObject();
-
-      Stat stat = new Stat();
-      try {
-        // Trickily, the call to zkClient.getData fills in the stat variable
-        byte[] data = zkClient.getData(path, null, stat, true);
-
-        if (stat.getEphemeralOwner() != 0) {
-          writeKeyValue(json, "ephemeral", true, false);
-          writeKeyValue(json, "version", stat.getVersion(), false);
-        }
-
-        if (dump) {
-          json.writeValueSeparator();
-          printZnode(json, path);
-        }
-
-      } catch (IllegalArgumentException e) {
-        // path doesn't exist (must have been removed)
-        writeKeyValue(json, "warning", "(path gone)", false);
-      } catch (KeeperException e) {
-        writeKeyValue(json, "warning", e.toString(), false);
-        log.warn("Keeper Exception", e);
-      } catch (InterruptedException e) {
-        writeKeyValue(json, "warning", e.toString(), false);
-        log.warn("InterruptedException", e);
-      }
-
-      if (stat.getNumChildren() > 0) {
-        json.writeValueSeparator();
-        if (indent) {
-          json.indent();
-        }
-        json.writeString("children");
-        json.writeNameSeparator();
-        json.startArray();
-
-        try {
-          List<String> children = zkClient.getChildren(path, null, true);
-          java.util.Collections.sort(children);
-
-          boolean first = true;
-          for (String child : children) {
-            if (!first) {
-              json.writeValueSeparator();
-            }
-
-            String childPath = path + (path.endsWith("/") ? "" : "/") + child;
-            if (!printTree(json, childPath)) {
-              return false;
-            }
-            first = false;
-          }
-        } catch (KeeperException e) {
-          writeError(500, e.toString());
-          return false;
-        } catch (InterruptedException e) {
-          writeError(500, e.toString());
-          return false;
-        } catch (IllegalArgumentException e) {
-          // path doesn't exist (must have been removed)
-          json.writeString("(children gone)");
-        }
-
-        json.endArray();
-      }
-
-      json.endObject();
-      return true;
-    }
-
-    String time(long ms) {
-      return (new Date(ms)).toString() + " (" + ms + ")";
-    }
-
-    public void writeKeyValue(JSONWriter json, String k, Object v, boolean isFirst) {
-      if (!isFirst) {
-        json.writeValueSeparator();
-      }
-      if (indent) {
-        json.indent();
-      }
-      json.writeString(k);
-      json.writeNameSeparator();
-      json.write(v);
-    }
-
-    @SuppressWarnings("unchecked")
-    boolean printZnode(JSONWriter json, String path) throws IOException {
-      try {
-        String dataStr = null;
-        String dataStrErr = null;
-        Stat stat = new Stat();
-        // Trickily, the call to zkClient.getData fills in the stat variable
-        byte[] data = zkClient.getData(path, null, stat, true);
-        if (null != data) {
-          try {
-            dataStr = (new BytesRef(data)).utf8ToString();
-          } catch (Exception e) {
-            dataStrErr = "data is not parsable as a utf8 String: " + e.toString();
-          }
-        }
-        // support paging of the collections graph view (in case there are many collections)
-        if (page != null) {
-          // we've already pulled the data for /clusterstate.json from ZooKeeper above,
-          // but it needs to be parsed into a map so we can lookup collection states before
-          // trying to find them in the /collections/?/state.json znode
-          Map<String, Object> clusterstateJsonMap = null;
-          if (dataStr != null) {
-            try {
-              clusterstateJsonMap = (Map<String, Object>) ObjectBuilder.fromJSON(dataStr);
-            } catch (Exception e) {
-              throw new SolrException(ErrorCode.SERVER_ERROR,
-                  "Failed to parse /clusterstate.json from ZooKeeper due to: " + e, e);
-            }
-          } else {
-            clusterstateJsonMap = Utils.makeMap();
-          }
-
-          // fetch the requested page of collections and then retrieve the state for each 
-          page = pagingSupport.fetchPage(page, zkClient);
-          // keep track of how many collections match the filter
-          boolean applyStatusFilter =
-              (page.filterType == FilterType.status && page.filter != null);
-          List<String> matchesStatusFilter = applyStatusFilter ? new ArrayList<String>() : null;
-          Set<String> liveNodes = applyStatusFilter ?
-              zkController.getZkStateReader().getClusterState().getLiveNodes() : null;
-
-          SortedMap<String, Object> collectionStates = new TreeMap<String, Object>(pagingSupport);
-          for (String collection : page.selected) {
-            Object collectionState = clusterstateJsonMap.get(collection);
-            if (collectionState != null) {
-              // collection state was in /clusterstate.json
-              if (applyStatusFilter) {
-                // verify this collection matches the status filter
-                if (page.matchesStatusFilter((Map<String, Object>) collectionState, liveNodes)) {
-                  matchesStatusFilter.add(collection);
-                  collectionStates.put(collection, collectionState);
-                }
-              } else {
-                collectionStates.put(collection, collectionState);
-              }
-            } else {
-              // looks like an external collection ...
-              String collStatePath = String.format(Locale.ROOT, "/collections/%s/state.json", collection);
-              String childDataStr = null;
-              try {
-                byte[] childData = zkClient.getData(collStatePath, null, null, true);
-                if (childData != null)
-                  childDataStr = (new BytesRef(childData)).utf8ToString();
-              } catch (KeeperException.NoNodeException nne) {
-                log.warn("State for collection " + collection +
-                    " not found in /clusterstate.json or /collections/" + collection + "/state.json!");
-              } catch (Exception childErr) {
-                log.error("Failed to get " + collStatePath + " due to: " + childErr);
-              }
-
-              if (childDataStr != null) {
-                Map<String, Object> extColl = (Map<String, Object>) ObjectBuilder.fromJSON(childDataStr);
-                collectionState = extColl.get(collection);
-
-                if (applyStatusFilter) {
-                  // verify this collection matches the filtered state
-                  if (page.matchesStatusFilter((Map<String, Object>) collectionState, liveNodes)) {
-                    matchesStatusFilter.add(collection);
-                    collectionStates.put(collection, collectionState);
-                  }
-                } else {
-                  collectionStates.put(collection, collectionState);
-                }
-              }
-            }
-          }
-
-          if (applyStatusFilter) {
-            // update the paged navigation info after applying the status filter
-            page.selectPage(matchesStatusFilter);
-
-            // rebuild the Map of state data
-            SortedMap<String, Object> map = new TreeMap<String, Object>(pagingSupport);
-            for (String next : page.selected)
-              map.put(next, collectionStates.get(next));
-            collectionStates = map;
-          }
-
-          if (collectionStates != null) {
-            CharArr out = new CharArr();
-            new JSONWriter(out, 2).write(collectionStates);
-            dataStr = out.toString();
-          }
-        }
-
-        json.writeString("znode");
-        json.writeNameSeparator();
-        json.startObject();
-
-        writeKeyValue(json, PATH, path, true);
-
-        json.writeValueSeparator();
-        json.writeString("prop");
-        json.writeNameSeparator();
-        json.startObject();
-        writeKeyValue(json, "version", stat.getVersion(), true);
-        writeKeyValue(json, "aversion", stat.getAversion(), false);
-        writeKeyValue(json, "children_count", stat.getNumChildren(), false);
-        writeKeyValue(json, "ctime", time(stat.getCtime()), false);
-        writeKeyValue(json, "cversion", stat.getCversion(), false);
-        writeKeyValue(json, "czxid", stat.getCzxid(), false);
-        writeKeyValue(json, "ephemeralOwner", stat.getEphemeralOwner(), false);
-        writeKeyValue(json, "mtime", time(stat.getMtime()), false);
-        writeKeyValue(json, "mzxid", stat.getMzxid(), false);
-        writeKeyValue(json, "pzxid", stat.getPzxid(), false);
-        writeKeyValue(json, "dataLength", stat.getDataLength(), false);
-        if (null != dataStrErr) {
-          writeKeyValue(json, "dataNote", dataStrErr, false);
-        }
-        json.endObject();
-
-        if (null != dataStr) {
-          writeKeyValue(json, "data", dataStr, false);
-        }
-
-        if (page != null) {
-          writeKeyValue(json, "paging", page.getPagingHeader(), false);
-        }
-
-        json.endObject();
-      } catch (KeeperException e) {
-        writeError(500, e.toString());
-        return false;
-      } catch (InterruptedException e) {
-        writeError(500, e.toString());
-        return false;
-      }
-      return true;
-    }
-
-   /* @Override
-    public void write(OutputStream os) throws IOException {
-      ByteBuffer bytes = baos.getByteBuffer();
-      os.write(bytes.array(),0,bytes.limit());
-    }
-*/
-    @Override
-    public String getName() {
-      return null;
-    }
-
-    @Override
-    public String getSourceInfo() {
-      return null;
-    }
-
-    @Override
-    public String getContentType() {
-      return JSONResponseWriter.CONTENT_TYPE_JSON_UTF8;
-    }
-
-    @Override
-    public Long getSize() {
-      return null;
-    }
-
-    @Override
-    public InputStream getStream() throws IOException {
-      return new ByteBufferInputStream(baos.getByteBuffer());
-    }
-
-    @Override
-    public Reader getReader() throws IOException {
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperStatusHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperStatusHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperStatusHandler.java
deleted file mode 100644
index 8842437..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperStatusHandler.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.net.Socket;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Zookeeper Status handler, talks to ZK using sockets and four-letter words
- *
- * @since solr 7.5
- */
-public final class ZookeeperStatusHandler extends RequestHandlerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final int ZOOKEEPER_DEFAULT_PORT = 2181;
-  private static final String STATUS_RED = "red";
-  private static final String STATUS_GREEN = "green";
-  private static final String STATUS_YELLOW = "yellow";
-  private static final String STATUS_NA = "N/A";
-  private CoreContainer cores;
-
-  public ZookeeperStatusHandler(CoreContainer cc) {
-    this.cores = cc;
-  }
-  
-  @Override
-  public String getDescription() {
-    return "Fetch Zookeeper status";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    final SolrParams params = req.getParams();
-    Map<String, String> map = new HashMap<>(1);
-    NamedList values = rsp.getValues();
-    values.add("zkStatus", getZkStatus(cores.getZkController().getZkServerAddress()));
-  }
-
-  /*
-   Gets all info from ZK API and returns as a map
-   */
-  protected Map<String, Object> getZkStatus(String zkHost) {
-    Map<String, Object> zkStatus = new HashMap<>();
-    List<String> zookeepers = Arrays.asList(zkHost.split("/")[0].split(","));
-    List<Object> details = new ArrayList<>();
-    int numOk = 0;
-    String status = STATUS_NA;
-    int standalone = 0;
-    int followers = 0;
-    int reportedFollowers = 0;
-    int leaders = 0;
-    List<String> errors = new ArrayList<>();
-    for (String zk : zookeepers) {
-      try {
-        Map<String, Object> stat = monitorZookeeper(zk);
-        details.add(stat);
-        if ("true".equals(String.valueOf(stat.get("ok")))) {
-          numOk++;
-        }
-        String state = String.valueOf(stat.get("zk_server_state"));
-        if ("follower".equals(state)) {
-          followers++;
-        } else if ("leader".equals(state)) {
-          leaders++;
-          reportedFollowers = Integer.parseInt(String.valueOf(stat.get("zk_followers")));
-        } else if ("standalone".equals(state)) {
-          standalone++;
-        }
-      } catch (SolrException se) {
-        log.warn("Failed talking to zookeeper" + zk, se);
-        errors.add(se.getMessage());
-        Map<String, Object> stat = new HashMap<>();
-        stat.put("host", zk);
-        stat.put("ok", false);
-        details.add(stat);
-      }       
-    }
-    zkStatus.put("ensembleSize", zookeepers.size());
-    zkStatus.put("zkHost", zkHost);
-    zkStatus.put("details", details);
-    if (followers+leaders > 0 && standalone > 0) {
-      status = STATUS_RED;
-      errors.add("The zk nodes do not agree on their mode, check details");
-    }
-    if (standalone > 1) {
-      status = STATUS_RED;
-      errors.add("Only one zk allowed in standalone mode");
-    }
-    if (leaders > 1) {
-      zkStatus.put("mode", "ensemble");
-      status = STATUS_RED;
-      errors.add("Only one leader allowed, got " + leaders);
-    }
-    if (followers > 0 && leaders == 0) {
-      zkStatus.put("mode", "ensemble");
-      status = STATUS_RED;
-      errors.add("We do not have a leader");
-    }
-    if (leaders > 0 && followers != reportedFollowers) {
-      zkStatus.put("mode", "ensemble");
-      status = STATUS_RED;
-      errors.add("Leader reports " + reportedFollowers + " followers, but we only found " + followers + 
-        ". Please check zkHost configuration");
-    }
-    if (followers+leaders == 0 && standalone == 1) {
-      zkStatus.put("mode", "standalone");
-    }
-    if (followers+leaders > 0 && (zookeepers.size())%2 == 0) {
-      if (!STATUS_RED.equals(status)) {
-        status = STATUS_YELLOW;
-      }
-      errors.add("We have an even number of zookeepers which is not recommended");
-    }
-    if (followers+leaders > 0 && standalone == 0) {
-      zkStatus.put("mode", "ensemble");
-    }
-    if (status.equals(STATUS_NA)) {
-      if (numOk == zookeepers.size()) {
-        status = STATUS_GREEN;
-      } else if (numOk < zookeepers.size() && numOk > zookeepers.size() / 2) {
-        status = STATUS_YELLOW;
-        errors.add("Some zookeepers are down: " + numOk + "/" + zookeepers.size());
-      } else {
-        status = STATUS_RED;
-        errors.add("Mismatch in number of zookeeper nodes live. numOK=" + numOk + ", expected " + zookeepers.size());
-      }
-    }
-    zkStatus.put("status", status);
-    if (!errors.isEmpty()) {
-      zkStatus.put("errors", errors);
-    }
-    return zkStatus;
-  }
-
-  private Map<String, Object> monitorZookeeper(String zkHostPort) {
-    List<String> lines = getZkRawResponse(zkHostPort, "mntr");
-    Map<String, Object> obj = new HashMap<>();
-    obj.put("host", zkHostPort);
-    obj.put("ok", "imok".equals(getZkRawResponse(zkHostPort, "ruok").get(0)));
-    for (String line : lines) {
-      obj.put(line.split("\t")[0], line.split("\t")[1]);
-    }
-    lines = getZkRawResponse(zkHostPort, "conf");
-    for (String line : lines) {
-      obj.put(line.split("=")[0], line.split("=")[1]);
-    }
-    return obj;
-  }
-  
-  /**
-   * Sends a four-letter-word command to one particular Zookeeper server and returns the response as list of strings
-   * @param zkHostPort the host:port for one zookeeper server to access
-   * @param fourLetterWordCommand the custom 4-letter command to send to Zookeeper
-   * @return a list of lines returned from Zookeeper
-   */
-  private List<String> getZkRawResponse(String zkHostPort, String fourLetterWordCommand) {
-    String[] hostPort = zkHostPort.split(":");
-    String host = hostPort[0];
-    int port = ZOOKEEPER_DEFAULT_PORT;
-    if (hostPort.length > 1) {
-      port = Integer.parseInt(hostPort[1]);
-    }
-    try (
-        Socket socket = new Socket(host, port);
-        Writer writer = new OutputStreamWriter(socket.getOutputStream(), "utf-8");
-        PrintWriter out = new PrintWriter(writer, true);
-        BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), "utf-8"));) {
-      out.println(fourLetterWordCommand);
-      List<String> response = in.lines().collect(Collectors.toList());
-      log.debug("Got response from ZK on host {} and port {}: {}", host, port, response);
-      if (response == null || response.isEmpty()) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Empty response from Zookeeper " + zkHostPort);
-      }
-      return response;
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed talking to Zookeeper " + zkHostPort, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/package-info.java b/solr/core/src/java/org/apache/solr/handler/admin/package-info.java
deleted file mode 100644
index bce6448..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * {@link org.apache.solr.request.SolrRequestHandler} implementations for powering he Solr Admin UI
- */
-package org.apache.solr.handler.admin;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
deleted file mode 100644
index 1f398a9..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.lucene.search.Query;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.facet.FacetDebugInfo;
-import org.apache.solr.util.SolrPluginUtils;
-
-import static org.apache.solr.common.params.CommonParams.FQ;
-import static org.apache.solr.common.params.CommonParams.JSON;
-
-/**
- * Adds debugging information to a request.
- * 
- *
- * @since solr 1.3
- */
-public class DebugComponent extends SearchComponent
-{
-  public static final String COMPONENT_NAME = "debug";
-  
-  /**
-   * A counter to ensure that no RID is equal, even if they fall in the same millisecond
-   */
-  private static final AtomicLong ridCounter = new AtomicLong();
-  
-  /**
-   * Map containing all the possible stages as key and
-   * the corresponding readable purpose as value
-   */
-  private static final Map<Integer, String> stages;
-
-  static {
-      Map<Integer, String> map = new TreeMap<>();
-      map.put(ResponseBuilder.STAGE_START, "START");
-      map.put(ResponseBuilder.STAGE_PARSE_QUERY, "PARSE_QUERY");
-      map.put(ResponseBuilder.STAGE_TOP_GROUPS, "TOP_GROUPS");
-      map.put(ResponseBuilder.STAGE_EXECUTE_QUERY, "EXECUTE_QUERY");
-      map.put(ResponseBuilder.STAGE_GET_FIELDS, "GET_FIELDS");
-      map.put(ResponseBuilder.STAGE_DONE, "DONE");
-      stages = Collections.unmodifiableMap(map);
-  }
-  
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException
-  {
-    if(rb.isDebugTrack() && rb.isDistrib) {
-      rb.setNeedDocList(true);
-      doDebugTrack(rb);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public void process(ResponseBuilder rb) throws IOException
-  {
-    if( rb.isDebug() ) {
-      DocList results = null;
-      //some internal grouping requests won't have results value set
-      if(rb.getResults() != null) {
-        results = rb.getResults().docList;
-      }
-
-      NamedList stdinfo = SolrPluginUtils.doStandardDebug( rb.req,
-          rb.getQueryString(), rb.wrap(rb.getQuery()), results, rb.isDebugQuery(), rb.isDebugResults());
-      
-      NamedList info = rb.getDebugInfo();
-      if( info == null ) {
-        rb.setDebugInfo( stdinfo );
-        info = stdinfo;
-      }
-      else {
-        info.addAll( stdinfo );
-      }
-
-      FacetDebugInfo fdebug = (FacetDebugInfo)(rb.req.getContext().get("FacetDebugInfo"));
-      if (fdebug != null) {
-        info.add("facet-trace", fdebug.getFacetDebugInfo());
-      }
-
-      fdebug = (FacetDebugInfo)(rb.req.getContext().get("FacetDebugInfo-nonJson"));
-      if (fdebug != null) {
-        info.add("facet-debug", fdebug.getFacetDebugInfo());
-      }
-      
-      if (rb.req.getJSON() != null) {
-        info.add(JSON, rb.req.getJSON());
-      }
-
-      if (rb.isDebugQuery() && rb.getQparser() != null) {
-        rb.getQparser().addDebugInfo(rb.getDebugInfo());
-      }
-      
-      if (null != rb.getDebugInfo() ) {
-        if (rb.isDebugQuery() && null != rb.getFilters() ) {
-          info.add("filter_queries",rb.req.getParams().getParams(FQ));
-          List<String> fqs = new ArrayList<>(rb.getFilters().size());
-          for (Query fq : rb.getFilters()) {
-            fqs.add(QueryParsing.toString(fq, rb.req.getSchema()));
-          }
-          info.add("parsed_filter_queries",fqs);
-        }
-        
-        // Add this directly here?
-        rb.rsp.add("debug", rb.getDebugInfo() );
-      }
-    }
-  }
-
-
-  private void doDebugTrack(ResponseBuilder rb) {
-    SolrQueryRequest req = rb.req;
-    String rid = req.getParams().get(CommonParams.REQUEST_ID);
-    if(rid == null || "".equals(rid)) {
-      rid = generateRid(rb);
-      ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
-      params.add(CommonParams.REQUEST_ID, rid);//add rid to the request so that shards see it
-      req.setParams(params);
-    }
-    rb.addDebug(rid, "track", CommonParams.REQUEST_ID);//to see it in the response
-    rb.rsp.addToLog(CommonParams.REQUEST_ID, rid); //to see it in the logs of the landing core
-    
-  }
-
-  @SuppressForbidden(reason = "Need currentTimeMillis, only used for naming")
-  private String generateRid(ResponseBuilder rb) {
-    String hostName = rb.req.getCore().getCoreContainer().getHostName();
-    return hostName + "-" + rb.req.getCore().getName() + "-" + System.currentTimeMillis() + "-" + ridCounter.getAndIncrement();
-  }
-
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    if (!rb.isDebug()) return;
-    
-    // Turn on debug to get explain only when retrieving fields
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0) {
-      sreq.purpose |= ShardRequest.PURPOSE_GET_DEBUG;
-      if (rb.isDebugAll()) {
-        sreq.params.set(CommonParams.DEBUG_QUERY, "true");
-      } else {
-        if (rb.isDebugQuery()){
-          sreq.params.add(CommonParams.DEBUG, CommonParams.QUERY);
-        }
-        if (rb.isDebugResults()){
-          sreq.params.add(CommonParams.DEBUG, CommonParams.RESULTS);
-        }
-      }
-    } else {
-      sreq.params.set(CommonParams.DEBUG_QUERY, "false");
-      sreq.params.set(CommonParams.DEBUG, "false");
-    }
-    if (rb.isDebugTimings()) {
-      sreq.params.add(CommonParams.DEBUG, CommonParams.TIMING);
-    } 
-    if (rb.isDebugTrack()) {
-      sreq.params.add(CommonParams.DEBUG, CommonParams.TRACK);
-      sreq.params.set(CommonParams.REQUEST_ID, rb.req.getParams().get(CommonParams.REQUEST_ID));
-      sreq.params.set(CommonParams.REQUEST_PURPOSE, SolrPluginUtils.getRequestPurpose(sreq.purpose));
-    }
-  }
-
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-    if (rb.isDebugTrack() && rb.isDistrib && !rb.finished.isEmpty()) {
-      @SuppressWarnings("unchecked")
-      NamedList<Object> stageList = (NamedList<Object>) ((NamedList<Object>)rb.getDebugInfo().get("track")).get(stages.get(rb.stage));
-      if(stageList == null) {
-        stageList = new SimpleOrderedMap<>();
-        rb.addDebug(stageList, "track", stages.get(rb.stage));
-      }
-      for(ShardResponse response: sreq.responses) {
-        stageList.add(response.getShard(), getTrackResponse(response));
-      }
-    }
-  }
-
-  private final static Set<String> EXCLUDE_SET = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("explain")));
-
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    if (rb.isDebug() && rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
-      NamedList<Object> info = rb.getDebugInfo();
-      NamedList<Object> explain = new SimpleOrderedMap<>();
-
-      Map.Entry<String, Object>[]  arr =  new NamedList.NamedListEntry[rb.resultIds.size()];
-      // Will be set to true if there is at least one response with PURPOSE_GET_DEBUG
-      boolean hasGetDebugResponses = false;
-
-      for (ShardRequest sreq : rb.finished) {
-        for (ShardResponse srsp : sreq.responses) {
-          if (srsp.getException() != null) {
-            // can't expect the debug content if there was an exception for this request
-            // this should only happen when using shards.tolerant=true
-            continue;
-          }
-          NamedList sdebug = (NamedList)srsp.getSolrResponse().getResponse().get("debug");
-          info = (NamedList)merge(sdebug, info, EXCLUDE_SET);
-          if ((sreq.purpose & ShardRequest.PURPOSE_GET_DEBUG) != 0) {
-            hasGetDebugResponses = true;
-            if (rb.isDebugResults()) {
-              NamedList sexplain = (NamedList)sdebug.get("explain");
-              SolrPluginUtils.copyNamedListIntoArrayByDocPosInResponse(sexplain, rb.resultIds, arr);
-            }
-          }
-        }
-      }
-
-      if (rb.isDebugResults()) {
-         explain = SolrPluginUtils.removeNulls(arr, new SimpleOrderedMap<>());
-      }
-
-      if (!hasGetDebugResponses) {
-        if (info == null) {
-          info = new SimpleOrderedMap<>();
-        }
-        // No responses were received from shards. Show local query info.
-        SolrPluginUtils.doStandardQueryDebug(
-                rb.req, rb.getQueryString(),  rb.wrap(rb.getQuery()), rb.isDebugQuery(), info);
-        if (rb.isDebugQuery() && rb.getQparser() != null) {
-          rb.getQparser().addDebugInfo(info);
-        }
-      }
-      if (rb.isDebugResults()) {
-        int idx = info.indexOf("explain",0);
-        if (idx>=0) {
-          info.setVal(idx, explain);
-        } else {
-          info.add("explain", explain);
-        }
-      }
-
-      rb.setDebugInfo(info);
-      rb.rsp.add("debug", rb.getDebugInfo() );
-    }
-    
-  }
-
-
-  private NamedList<String> getTrackResponse(ShardResponse shardResponse) {
-    NamedList<String> namedList = new SimpleOrderedMap<>();
-    if (shardResponse.getException() != null) {
-      namedList.add("Exception", shardResponse.getException().getMessage());
-      return namedList;
-    }
-    NamedList<Object> responseNL = shardResponse.getSolrResponse().getResponse();
-    @SuppressWarnings("unchecked")
-    NamedList<Object> responseHeader = (NamedList<Object>)responseNL.get("responseHeader");
-    if(responseHeader != null) {
-      namedList.add("QTime", responseHeader.get("QTime").toString());
-    }
-    namedList.add("ElapsedTime", String.valueOf(shardResponse.getSolrResponse().getElapsedTime()));
-    namedList.add("RequestPurpose", shardResponse.getShardRequest().params.get(CommonParams.REQUEST_PURPOSE));
-    SolrDocumentList docList = (SolrDocumentList)shardResponse.getSolrResponse().getResponse().get("response");
-    if(docList != null) {
-      namedList.add("NumFound", String.valueOf(docList.getNumFound()));
-    }
-    namedList.add("Response", String.valueOf(responseNL));
-    return namedList;
-  }
-
-  protected Object merge(Object source, Object dest, Set<String> exclude) {
-    if (source == null) return dest;
-    if (dest == null) {
-      if (source instanceof NamedList) {
-        dest = source instanceof SimpleOrderedMap ? new SimpleOrderedMap() : new NamedList();
-      } else {
-        return source;
-      }
-    } else {
-
-      if (dest instanceof Collection) {
-        // merge as Set
-        if (!(dest instanceof Set)) {
-          dest = new LinkedHashSet<>((Collection<?>) dest);
-        }
-        if (source instanceof Collection) {
-          ((Collection)dest).addAll((Collection)source);
-        } else {
-          ((Collection)dest).add(source);
-        }
-        return dest;
-      } else if (source instanceof Number) {
-        if (dest instanceof Number) {
-          if (source instanceof Double || dest instanceof Double) {
-            return ((Number)source).doubleValue() + ((Number)dest).doubleValue();
-          }
-          return ((Number)source).longValue() + ((Number)dest).longValue();
-        }
-        // fall through
-      } else if (source instanceof String) {
-        if (source.equals(dest)) {
-          return dest;
-        }
-        // fall through
-      }
-    }
-
-
-    if (source instanceof NamedList && dest instanceof NamedList) {
-      NamedList<Object> tmp = new NamedList<>();
-      @SuppressWarnings("unchecked")
-      NamedList<Object> sl = (NamedList<Object>)source;
-      @SuppressWarnings("unchecked")
-      NamedList<Object> dl = (NamedList<Object>)dest;
-      for (int i=0; i<sl.size(); i++) {
-        String skey = sl.getName(i);
-        if (exclude.contains(skey)) continue;
-        Object sval = sl.getVal(i);
-        int didx = -1;
-
-        // optimize case where elements are in same position
-        if (i < dl.size()) {
-          String dkey = dl.getName(i);
-          if (skey == dkey || (skey!=null && skey.equals(dkey))) {
-            didx = i;
-          }
-        }
-
-        if (didx == -1) {
-          didx = dl.indexOf(skey, 0);
-        }
-
-        if (didx == -1) {
-          tmp.add(skey, merge(sval, null, Collections.emptySet()));
-        } else {
-          dl.setVal(didx, merge(sval, dl.getVal(didx), Collections.emptySet()));
-        }
-      }
-      dl.addAll(tmp);
-      return dl;
-    }
-
-    // only add to list if JSON is different
-    if (source.equals(dest)) return source;
-
-    // merge unlike elements in a list
-    List<Object> t = new ArrayList<>();
-    t.add(dest);
-    t.add(source);
-    return t;
-  }
-
-
-  
-  /////////////////////////////////////////////
-  ///  SolrInfoBean
-  ////////////////////////////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Debug Information";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.OTHER;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
deleted file mode 100644
index 2cbe703..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ /dev/null
@@ -1,828 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
-import com.carrotsearch.hppc.IntHashSet;
-import com.carrotsearch.hppc.IntObjectHashMap;
-import com.carrotsearch.hppc.LongHashSet;
-import com.carrotsearch.hppc.LongObjectHashMap;
-import com.carrotsearch.hppc.LongObjectMap;
-import com.carrotsearch.hppc.cursors.IntObjectCursor;
-import com.carrotsearch.hppc.cursors.LongCursor;
-import com.carrotsearch.hppc.cursors.LongObjectCursor;
-import com.carrotsearch.hppc.cursors.ObjectCursor;
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.OrdinalMap;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorable;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.ScoreMode;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.TermInSetQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.TopDocsCollector;
-import org.apache.lucene.search.TopFieldCollector;
-import org.apache.lucene.search.TopScoreDocCollector;
-import org.apache.lucene.search.TotalHits;
-import org.apache.lucene.util.BitSetIterator;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.FixedBitSet;
-import org.apache.lucene.util.LongValues;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.ExpandParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.NumberType;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.schema.StrField;
-import org.apache.solr.search.CollapsingQParserPlugin;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocSlice;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SortSpecParsing;
-import org.apache.solr.uninverting.UninvertingReader;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.apache.solr.util.plugin.SolrCoreAware;
-
-/**
- * The ExpandComponent is designed to work with the CollapsingPostFilter.
- * The CollapsingPostFilter collapses a result set on a field.
- * <p>
- * The ExpandComponent expands the collapsed groups for a single page.
- * <p>
- * http parameters:
- * <p>
- * expand=true <br>
- * expand.rows=5 <br>
- * expand.sort=field asc|desc<br>
- * expand.q=*:* (optional, overrides the main query)<br>
- * expand.fq=type:child (optional, overrides the main filter queries)<br>
- * expand.field=field (mandatory if the not used with the CollapsingQParserPlugin)<br>
- */
-public class ExpandComponent extends SearchComponent implements PluginInfoInitialized, SolrCoreAware {
-  public static final String COMPONENT_NAME = "expand";
-  private static final int finishingStage = ResponseBuilder.STAGE_GET_FIELDS;
-  private PluginInfo info = PluginInfo.EMPTY_INFO;
-
-  @Override
-  public void init(PluginInfo info) {
-    this.info = info;
-  }
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    if (rb.req.getParams().getBool(ExpandParams.EXPAND, false)) {
-      rb.doExpand = true;
-    }
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-
-    if (!rb.doExpand) {
-      return;
-    }
-
-    SolrQueryRequest req = rb.req;
-    SolrParams params = req.getParams();
-
-    String field = params.get(ExpandParams.EXPAND_FIELD);
-    String hint = null;
-    if (field == null) {
-      List<Query> filters = rb.getFilters();
-      if (filters != null) {
-        for (Query q : filters) {
-          if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
-            CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
-            field = cp.getField();
-            hint = cp.hint;
-          }
-        }
-      }
-    }
-
-    if (field == null) {
-      throw new IOException("Expand field is null.");
-    }
-
-    String sortParam = params.get(ExpandParams.EXPAND_SORT);
-    String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
-    String qs = params.get(ExpandParams.EXPAND_Q);
-    int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);
-
-    Sort sort = null;
-
-    if (sortParam != null) {
-      sort = SortSpecParsing.parseSortSpec(sortParam, rb.req).getSort();
-    }
-
-    Query query;
-    if (qs == null) {
-      query = rb.getQuery();
-    } else {
-      try {
-        QParser parser = QParser.getParser(qs, req);
-        query = parser.getQuery();
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
-
-    List<Query> newFilters = new ArrayList<>();
-
-    if (fqs == null) {
-      List<Query> filters = rb.getFilters();
-      if (filters != null) {
-        for (Query q : filters) {
-          if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
-            newFilters.add(q);
-          }
-        }
-      }
-    } else {
-      try {
-        for (String fq : fqs) {
-          if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
-            QParser fqp = QParser.getParser(fq, req);
-            newFilters.add(fqp.getQuery());
-          }
-        }
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-    }
-
-    SolrIndexSearcher searcher = req.getSearcher();
-    LeafReader reader = searcher.getSlowAtomicReader();
-
-    SchemaField schemaField = searcher.getSchema().getField(field);
-    FieldType fieldType = schemaField.getType();
-
-    SortedDocValues values = null;
-    long nullValue = 0L;
-
-    if(fieldType instanceof StrField) {
-      //Get The Top Level SortedDocValues
-      if(CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
-        @SuppressWarnings("resource") LeafReader uninvertingReader = UninvertingReader.wrap(
-            new ReaderWrapper(searcher.getSlowAtomicReader(), field),
-            Collections.singletonMap(field, UninvertingReader.Type.SORTED)::get);
-        values = uninvertingReader.getSortedDocValues(field);
-      } else {
-        values = DocValues.getSorted(reader, field);
-      }
-    } else {
-      //Get the nullValue for the numeric collapse field
-      String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
-      
-      final NumberType numType = fieldType.getNumberType();
-
-      // Since the expand component depends on the operation of the collapse component, 
-      // which validates that numeric field types are 32-bit,
-      // we don't need to handle invalid 64-bit field types here.
-      if (defaultValue != null) {
-        if (numType == NumberType.INTEGER) {
-          nullValue = Long.parseLong(defaultValue);
-        } else if (numType == NumberType.FLOAT) {
-          nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
-        }
-      } else if (NumberType.FLOAT.equals(numType)) { // Integer case already handled by nullValue defaulting to 0
-        nullValue = Float.floatToIntBits(0.0f);
-      }
-    }
-
-    FixedBitSet groupBits = null;
-    LongHashSet groupSet = null;
-    DocList docList = rb.getResults().docList;
-    IntHashSet collapsedSet = new IntHashSet(docList.size() * 2);
-
-    //Gather the groups for the current page of documents
-    DocIterator idit = docList.iterator();
-    int[] globalDocs = new int[docList.size()];
-    int docsIndex = -1;
-    while (idit.hasNext()) {
-      globalDocs[++docsIndex] = idit.nextDoc();
-    }
-
-    Arrays.sort(globalDocs);
-    Query groupQuery = null;
-
-    /*
-    * This code gathers the group information for the current page.
-    */
-    List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
-
-    if(contexts.size() == 0) {
-      //When no context is available we can skip the expanding
-      return;
-    }
-
-    int currentContext = 0;
-    int currentDocBase = contexts.get(currentContext).docBase;
-    int nextDocBase = (currentContext+1)<contexts.size() ? contexts.get(currentContext+1).docBase : Integer.MAX_VALUE;
-    IntObjectHashMap<BytesRef> ordBytes = null;
-    if(values != null) {
-      groupBits = new FixedBitSet(values.getValueCount());
-      OrdinalMap ordinalMap = null;
-      SortedDocValues[] sortedDocValues = null;
-      LongValues segmentOrdinalMap = null;
-      SortedDocValues currentValues = null;
-      if(values instanceof MultiDocValues.MultiSortedDocValues) {
-        ordinalMap = ((MultiDocValues.MultiSortedDocValues)values).mapping;
-        sortedDocValues = ((MultiDocValues.MultiSortedDocValues)values).values;
-        currentValues = sortedDocValues[currentContext];
-        segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
-      }
-      int count = 0;
-
-      ordBytes = new IntObjectHashMap<>();
-
-      for(int i=0; i<globalDocs.length; i++) {
-        int globalDoc = globalDocs[i];
-        while(globalDoc >= nextDocBase) {
-          currentContext++;
-          currentDocBase = contexts.get(currentContext).docBase;
-          nextDocBase = (currentContext+1) < contexts.size() ? contexts.get(currentContext+1).docBase : Integer.MAX_VALUE;
-          if(ordinalMap != null) {
-            currentValues = sortedDocValues[currentContext];
-            segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
-          }
-        }
-
-        int contextDoc = globalDoc - currentDocBase;
-        if(ordinalMap != null) {
-          if (contextDoc > currentValues.docID()) {
-            currentValues.advance(contextDoc);
-          }
-          if (contextDoc == currentValues.docID()) {
-            int ord = currentValues.ordValue();
-            ++count;
-            BytesRef ref = currentValues.lookupOrd(ord);
-            ord = (int)segmentOrdinalMap.get(ord);
-            ordBytes.put(ord, BytesRef.deepCopyOf(ref));
-            groupBits.set(ord);
-            collapsedSet.add(globalDoc);
-          }
-        } else {
-          if (globalDoc > values.docID()) {
-            values.advance(globalDoc);
-          }
-          if (globalDoc == values.docID()) {
-            int ord = values.ordValue();
-            ++count;
-            BytesRef ref = values.lookupOrd(ord);
-            ordBytes.put(ord, BytesRef.deepCopyOf(ref));
-            groupBits.set(ord);
-            collapsedSet.add(globalDoc);
-          }
-        }
-      }
-
-      if(count > 0 && count < 200) {
-        try {
-          groupQuery = getGroupQuery(field, count, ordBytes);
-        } catch(Exception e) {
-          throw new IOException(e);
-        }
-      }
-    } else {
-      groupSet = new LongHashSet(docList.size());
-      NumericDocValues collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
-      int count = 0;
-      for(int i=0; i<globalDocs.length; i++) {
-        int globalDoc = globalDocs[i];
-        while(globalDoc >= nextDocBase) {
-          currentContext++;
-          currentDocBase = contexts.get(currentContext).docBase;
-          nextDocBase = currentContext+1 < contexts.size() ? contexts.get(currentContext+1).docBase : Integer.MAX_VALUE;
-          collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
-        }
-        int contextDoc = globalDoc - currentDocBase;
-        int valueDocID = collapseValues.docID();
-        if (valueDocID < contextDoc) {
-          valueDocID = collapseValues.advance(contextDoc);
-        }
-        long value;
-        if (valueDocID == contextDoc) {
-          value = collapseValues.longValue();
-        } else {
-          value = 0;
-        }
-        if(value != nullValue) {
-          ++count;
-          groupSet.add(value);
-          collapsedSet.add(globalDoc);
-        }
-      }
-
-      if(count > 0 && count < 200) {
-        if (fieldType.isPointField()) {
-          groupQuery = getPointGroupQuery(schemaField, count, groupSet);
-        } else {
-          groupQuery = getGroupQuery(field, fieldType, count, groupSet);
-        }
-      }
-    }
-
-    Collector collector;
-    if (sort != null)
-      sort = sort.rewrite(searcher);
-
-
-    Collector groupExpandCollector = null;
-
-    if(values != null) {
-      //Get The Top Level SortedDocValues again so we can re-iterate:
-      if(CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
-        @SuppressWarnings("resource") LeafReader uninvertingReader = UninvertingReader.wrap(
-            new ReaderWrapper(searcher.getSlowAtomicReader(), field),
-            Collections.singletonMap(field, UninvertingReader.Type.SORTED)::get);
-        values = uninvertingReader.getSortedDocValues(field);
-      } else {
-        values = DocValues.getSorted(reader, field);
-      }
-      
-      groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit, sort);
-    } else {
-      groupExpandCollector = new NumericGroupExpandCollector(field, nullValue, groupSet, collapsedSet, limit, sort);
-    }
-
-    if(groupQuery !=  null) {
-      //Limits the results to documents that are in the same group as the documents in the page.
-      newFilters.add(groupQuery);
-    }
-
-    SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
-    if (pfilter.postFilter != null) {
-      pfilter.postFilter.setLastDelegate(groupExpandCollector);
-      collector = pfilter.postFilter;
-    } else {
-      collector = groupExpandCollector;
-    }
-
-    if (pfilter.filter == null) {
-      searcher.search(query, collector);
-    } else {
-      Query q = new BooleanQuery.Builder()
-          .add(query, Occur.MUST)
-          .add(pfilter.filter, Occur.FILTER)
-          .build();
-      searcher.search(q, collector);
-    }
-    LongObjectMap<Collector> groups = ((GroupCollector) groupExpandCollector).getGroups();
-    NamedList outMap = new SimpleOrderedMap();
-    CharsRefBuilder charsRef = new CharsRefBuilder();
-    for (LongObjectCursor<Collector> cursor : groups) {
-      long groupValue = cursor.key;
-      TopDocsCollector<?> topDocsCollector = TopDocsCollector.class.cast(cursor.value);
-      TopDocs topDocs = topDocsCollector.topDocs();
-      ScoreDoc[] scoreDocs = topDocs.scoreDocs;
-      if (scoreDocs.length > 0) {
-        int[] docs = new int[scoreDocs.length];
-        float[] scores = new float[scoreDocs.length];
-        for (int i = 0; i < docs.length; i++) {
-          ScoreDoc scoreDoc = scoreDocs[i];
-          docs[i] = scoreDoc.doc;
-          scores[i] = scoreDoc.score;
-        }
-        assert topDocs.totalHits.relation == TotalHits.Relation.EQUAL_TO;
-        DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits.value, Float.NaN);
-
-        if(fieldType instanceof StrField) {
-          final BytesRef bytesRef = ordBytes.get((int)groupValue);
-          fieldType.indexedToReadable(bytesRef, charsRef);
-          String group = charsRef.toString();
-          outMap.add(group, slice);
-        } else {
-          outMap.add(numericToString(fieldType, groupValue), slice);
-        }
-      }
-    }
-
-    rb.rsp.add("expanded", outMap);
-  }
-
-  @Override
-  public int distributedProcess(ResponseBuilder rb) throws IOException {
-    if (rb.doExpand && rb.stage < finishingStage) {
-      return finishingStage;
-    }
-    return ResponseBuilder.STAGE_DONE;
-  }
-    
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) return;
-    if (!rb.onePassDistributedQuery && (sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) == 0) {
-      sreq.params.set(COMPONENT_NAME, "false");
-    } else {
-      sreq.params.set(COMPONENT_NAME, "true");
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-
-    if (!rb.doExpand) {
-      return;
-    }
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0) {
-      SolrQueryRequest req = rb.req;
-      NamedList expanded = (NamedList) req.getContext().get("expanded");
-      if (expanded == null) {
-        expanded = new SimpleOrderedMap();
-        req.getContext().put("expanded", expanded);
-      }
-
-      for (ShardResponse srsp : sreq.responses) {
-        NamedList response = srsp.getSolrResponse().getResponse();
-        NamedList ex = (NamedList) response.get("expanded");
-        for (int i=0; i<ex.size(); i++) {
-          String name = ex.getName(i);
-          SolrDocumentList val = (SolrDocumentList) ex.getVal(i);
-          expanded.add(name, val);
-        }
-      }
-    }
-  }
-
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-
-    if (!rb.doExpand) {
-      return;
-    }
-
-    if (rb.stage != finishingStage) {
-      return;
-    }
-
-    NamedList expanded = (NamedList) rb.req.getContext().get("expanded");
-    if (expanded == null) {
-      expanded = new SimpleOrderedMap();
-    }
-
-    rb.rsp.add("expanded", expanded);
-  }
-
-  private static class GroupExpandCollector implements Collector, GroupCollector {
-    private SortedDocValues docValues;
-    private OrdinalMap ordinalMap;
-    private SortedDocValues segmentValues;
-    private LongValues segmentOrdinalMap;
-    private MultiDocValues.MultiSortedDocValues multiSortedDocValues;
-
-    private LongObjectMap<Collector> groups;
-    private FixedBitSet groupBits;
-    private IntHashSet collapsedSet;
-
-    public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntHashSet collapsedSet, int limit, Sort sort) throws IOException {
-      int numGroups = collapsedSet.size();
-      groups = new LongObjectHashMap<>(numGroups);
-      DocIdSetIterator iterator = new BitSetIterator(groupBits, 0); // cost is not useful here
-      int group;
-      while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-        Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, Integer.MAX_VALUE) : TopFieldCollector.create(sort, limit, Integer.MAX_VALUE);
-        groups.put(group, collector);
-      }
-
-      this.collapsedSet = collapsedSet;
-      this.groupBits = groupBits;
-      this.docValues = docValues;
-      if(docValues instanceof MultiDocValues.MultiSortedDocValues) {
-        this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)docValues;
-        this.ordinalMap = multiSortedDocValues.mapping;
-      }
-    }
-
-    public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
-      final int docBase = context.docBase;
-
-      if(ordinalMap != null) {
-        this.segmentValues = this.multiSortedDocValues.values[context.ord];
-        this.segmentOrdinalMap = ordinalMap.getGlobalOrds(context.ord);
-      }
-
-      final LongObjectMap<LeafCollector> leafCollectors = new LongObjectHashMap<>();
-      for (LongObjectCursor<Collector> entry : groups) {
-        leafCollectors.put(entry.key, entry.value.getLeafCollector(context));
-      }
-      return new LeafCollector() {
-
-        @Override
-        public void setScorer(Scorable scorer) throws IOException {
-          for (ObjectCursor<LeafCollector> c : leafCollectors.values()) {
-            c.value.setScorer(scorer);
-          }
-        }
-
-        @Override
-        public void collect(int docId) throws IOException {
-          int globalDoc = docId + docBase;
-          int ord = -1;
-          if(ordinalMap != null) {
-            if (docId > segmentValues.docID()) {
-              segmentValues.advance(docId);
-            }
-            if (docId == segmentValues.docID()) {
-              ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
-            } else {
-              ord = -1;
-            }
-          } else {
-            if (docValues.advanceExact(globalDoc)) {
-              ord = docValues.ordValue();
-            } else {
-              ord = -1;
-            }
-          }
-
-          if (ord > -1 && groupBits.get(ord) && !collapsedSet.contains(globalDoc)) {
-            LeafCollector c = leafCollectors.get(ord);
-            c.collect(docId);
-          }
-        }
-      };
-    }
-
-    public LongObjectMap<Collector> getGroups() {
-      return groups;
-    }
-  }
-
-  private static class NumericGroupExpandCollector implements Collector, GroupCollector {
-    private NumericDocValues docValues;
-
-    private String field;
-    private LongObjectHashMap<Collector> groups;
-
-    private IntHashSet collapsedSet;
-    private long nullValue;
-
-    public NumericGroupExpandCollector(String field, long nullValue, LongHashSet groupSet, IntHashSet collapsedSet, int limit, Sort sort) throws IOException {
-      int numGroups = collapsedSet.size();
-      this.nullValue = nullValue;
-      groups = new LongObjectHashMap<>(numGroups);
-      Iterator<LongCursor> iterator = groupSet.iterator();
-      while (iterator.hasNext()) {
-        LongCursor cursor = iterator.next();
-        Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, Integer.MAX_VALUE) : TopFieldCollector.create(sort, limit, Integer.MAX_VALUE);
-        groups.put(cursor.value, collector);
-      }
-
-      this.field = field;
-      this.collapsedSet = collapsedSet;
-    }
-
-    public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
-      final int docBase = context.docBase;
-      this.docValues = context.reader().getNumericDocValues(this.field);
-
-      final LongObjectHashMap<LeafCollector> leafCollectors = new LongObjectHashMap<>();
-
-      for (LongObjectCursor<Collector> entry : groups) {
-        leafCollectors.put(entry.key, entry.value.getLeafCollector(context));
-      }
-
-      return new LeafCollector() {
-
-        @Override
-        public void setScorer(Scorable scorer) throws IOException {
-          for (ObjectCursor<LeafCollector> c : leafCollectors.values()) {
-            c.value.setScorer(scorer);
-          }
-        }
-
-        @Override
-        public void collect(int docId) throws IOException {
-          long value;
-          if (docValues.advanceExact(docId)) {
-            value = docValues.longValue();
-          } else {
-            value = 0;
-          }
-          final int index;
-          if (value != nullValue && 
-              (index = leafCollectors.indexOf(value)) >= 0 && 
-              !collapsedSet.contains(docId + docBase)) {
-            leafCollectors.indexGet(index).collect(docId);
-          }
-        }
-      };
-    }
-
-    public LongObjectHashMap<Collector> getGroups() {
-      return groups;
-    }
-
-  }
-
-  //TODO lets just do simple abstract base class -- a fine use of inheritance
-  private interface GroupCollector extends Collector {
-    public LongObjectMap<Collector> getGroups();
-
-    @Override
-    default ScoreMode scoreMode() {
-      final LongObjectMap<Collector> groups = getGroups();
-      if (groups.isEmpty()) {
-        return ScoreMode.COMPLETE; // doesn't matter?
-      } else {
-        return groups.iterator().next().value.scoreMode(); // we assume all the collectors should have the same nature
-      }
-    }
-  }
-
-  private Query getGroupQuery(String fname,
-                           FieldType ft,
-                           int size,
-                           LongHashSet groupSet) {
-
-    BytesRef[] bytesRefs = new BytesRef[size];
-    BytesRefBuilder term = new BytesRefBuilder();
-    Iterator<LongCursor> it = groupSet.iterator();
-    int index = -1;
-
-    while (it.hasNext()) {
-      LongCursor cursor = it.next();
-      String stringVal = numericToString(ft, cursor.value);
-      ft.readableToIndexed(stringVal, term);
-      bytesRefs[++index] = term.toBytesRef();
-    }
-
-    return new TermInSetQuery(fname, bytesRefs);
-  }
-
-  private Query getPointGroupQuery(SchemaField sf,
-                                   int size,
-                                   LongHashSet groupSet) {
-
-    Iterator<LongCursor> it = groupSet.iterator();
-    List<String> values = new ArrayList<>(size);
-    FieldType ft = sf.getType();
-    while (it.hasNext()) {
-      LongCursor cursor = it.next();
-      values.add(numericToString(ft, cursor.value));
-    }
-
-    return sf.getType().getSetQuery(null, sf, values);
-  }
-
-  private String numericToString(FieldType fieldType, long val) {
-    if (fieldType.getNumberType() != null) {
-      switch (fieldType.getNumberType()) {
-        case INTEGER:
-        case LONG:
-          return Long.toString(val);
-        case FLOAT:
-          return Float.toString(Float.intBitsToFloat((int)val));
-        case DOUBLE:
-          return Double.toString(Double.longBitsToDouble(val));
-        case DATE:
-          break;
-      }
-    }
-    throw new IllegalArgumentException("FieldType must be INT,LONG,FLOAT,DOUBLE found " + fieldType);
-  }
-
-  private Query getGroupQuery(String fname,
-                              int size,
-                              IntObjectHashMap<BytesRef> ordBytes) throws Exception {
-    BytesRef[] bytesRefs = new BytesRef[size];
-    int index = -1;
-    Iterator<IntObjectCursor<BytesRef>>it = ordBytes.iterator();
-    while (it.hasNext()) {
-      IntObjectCursor<BytesRef> cursor = it.next();
-      bytesRefs[++index] = cursor.value;
-    }
-    return new TermInSetQuery(fname, bytesRefs);
-  }
-
-
-  ////////////////////////////////////////////
-  ///  SolrInfoBean
-  ////////////////////////////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Expand Component";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-
-  // this reader alters the content of the given reader so it should not
-  // delegate the caching stuff
-  private static class ReaderWrapper extends FilterLeafReader {
-
-    private String field;
-
-    public ReaderWrapper(LeafReader leafReader, String field) {
-      super(leafReader);
-      this.field = field;
-    }
-
-    public SortedDocValues getSortedDocValues(String field) {
-      return null;
-    }
-
-    public FieldInfos getFieldInfos() {
-      Iterator<FieldInfo> it = in.getFieldInfos().iterator();
-      List<FieldInfo> newInfos = new ArrayList<>();
-      while(it.hasNext()) {
-        FieldInfo fieldInfo = it.next();
-
-        if(fieldInfo.name.equals(field)) {
-          FieldInfo f = new FieldInfo(fieldInfo.name,
-              fieldInfo.number,
-              fieldInfo.hasVectors(),
-              fieldInfo.hasNorms(),
-              fieldInfo.hasPayloads(),
-              fieldInfo.getIndexOptions(),
-              DocValuesType.NONE,
-              fieldInfo.getDocValuesGen(),
-              fieldInfo.attributes(),
-              fieldInfo.getPointDataDimensionCount(),
-              fieldInfo.getPointIndexDimensionCount(),
-              fieldInfo.getPointNumBytes(),
-              fieldInfo.isSoftDeletesField());
-          newInfos.add(f);
-
-        } else {
-          newInfos.add(fieldInfo);
-        }
-      }
-      FieldInfos infos = new FieldInfos(newInfos.toArray(new FieldInfo[newInfos.size()]));
-      return infos;
-    }
-
-    // NOTE: delegating the caches is wrong here as we are altering the content
-    // of the reader, this should ONLY be used under an uninvertingreader which
-    // will restore doc values back using uninversion, otherwise all sorts of
-    // crazy things could happen.
-
-    @Override
-    public CacheHelper getCoreCacheHelper() {
-      return in.getCoreCacheHelper();
-    }
-
-    @Override
-    public CacheHelper getReaderCacheHelper() {
-      return in.getReaderCacheHelper();
-    }
-  }
-
-}


[39/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
deleted file mode 100644
index 6f0bbfd..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
+++ /dev/null
@@ -1,113 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.codahale.metrics.Timer;
-import org.apache.solr.cloud.OverseerTaskProcessor;
-import org.apache.solr.cloud.Stats;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.util.stats.MetricUtils;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class OverseerStatusCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public OverseerStatusCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    String leaderNode = OverseerTaskProcessor.getLeaderNode(zkStateReader.getZkClient());
-    results.add("leader", leaderNode);
-    Stat stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/queue",null, stat, true);
-    results.add("overseer_queue_size", stat.getNumChildren());
-    stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/queue-work",null, stat, true);
-    results.add("overseer_work_queue_size", stat.getNumChildren());
-    stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/collection-queue-work",null, stat, true);
-    results.add("overseer_collection_queue_size", stat.getNumChildren());
-
-    NamedList overseerStats = new NamedList();
-    NamedList collectionStats = new NamedList();
-    NamedList stateUpdateQueueStats = new NamedList();
-    NamedList workQueueStats = new NamedList();
-    NamedList collectionQueueStats = new NamedList();
-    Stats stats = ocmh.stats;
-    for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
-      String key = entry.getKey();
-      NamedList<Object> lst = new SimpleOrderedMap<>();
-      if (key.startsWith("collection_"))  {
-        collectionStats.add(key.substring(11), lst);
-        int successes = stats.getSuccessCount(entry.getKey());
-        int errors = stats.getErrorCount(entry.getKey());
-        lst.add("requests", successes);
-        lst.add("errors", errors);
-        List<Stats.FailedOp> failureDetails = stats.getFailureDetails(key);
-        if (failureDetails != null) {
-          List<SimpleOrderedMap<Object>> failures = new ArrayList<>();
-          for (Stats.FailedOp failedOp : failureDetails) {
-            SimpleOrderedMap<Object> fail = new SimpleOrderedMap<>();
-            fail.add("request", failedOp.req.getProperties());
-            fail.add("response", failedOp.resp.getResponse());
-            failures.add(fail);
-          }
-          lst.add("recent_failures", failures);
-        }
-      } else if (key.startsWith("/overseer/queue_"))  {
-        stateUpdateQueueStats.add(key.substring(16), lst);
-      } else if (key.startsWith("/overseer/queue-work_"))  {
-        workQueueStats.add(key.substring(21), lst);
-      } else if (key.startsWith("/overseer/collection-queue-work_"))  {
-        collectionQueueStats.add(key.substring(32), lst);
-      } else  {
-        // overseer stats
-        overseerStats.add(key, lst);
-        int successes = stats.getSuccessCount(entry.getKey());
-        int errors = stats.getErrorCount(entry.getKey());
-        lst.add("requests", successes);
-        lst.add("errors", errors);
-      }
-      Timer timer = entry.getValue().requestTime;
-      MetricUtils.addMetrics(lst, timer);
-    }
-    results.add("overseer_operations", overseerStats);
-    results.add("collection_operations", collectionStats);
-    results.add("overseer_queue", stateUpdateQueueStats);
-    results.add("overseer_internal_queue", workQueueStats);
-    results.add("collection_queue", collectionQueueStats);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
deleted file mode 100644
index c622f0f..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.cloud.ActiveReplicaWatcher;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public ReplaceNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    String source = message.getStr(CollectionParams.SOURCE_NODE, message.getStr("source"));
-    String target = message.getStr(CollectionParams.TARGET_NODE, message.getStr("target"));
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    if (source == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode is a required param");
-    }
-    String async = message.getStr("async");
-    int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
-    boolean parallel = message.getBool("parallel", false);
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    if (!clusterState.liveNodesContain(source)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + source + " is not live");
-    }
-    if (target != null && !clusterState.liveNodesContain(target)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target Node: " + target + " is not live");
-    }
-    List<ZkNodeProps> sourceReplicas = getReplicasOfNode(source, clusterState);
-    // how many leaders are we moving? for these replicas we have to make sure that either:
-    // * another existing replica can become a leader, or
-    // * we wait until the newly created replica completes recovery (and can become the new leader)
-    // If waitForFinalState=true we wait for all replicas
-    int numLeaders = 0;
-    for (ZkNodeProps props : sourceReplicas) {
-      if (props.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
-        numLeaders++;
-      }
-    }
-    // map of collectionName_coreNodeName to watchers
-    Map<String, CollectionStateWatcher> watchers = new HashMap<>();
-    List<ZkNodeProps> createdReplicas = new ArrayList<>();
-
-    AtomicBoolean anyOneFailed = new AtomicBoolean(false);
-    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(sourceReplicas.size(), ocmh);
-
-    SolrCloseableLatch replicasToRecover = new SolrCloseableLatch(numLeaders, ocmh);
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapperRef = new AtomicReference<>();
-    try {
-      for (ZkNodeProps sourceReplica : sourceReplicas) {
-        NamedList nl = new NamedList();
-        String sourceCollection = sourceReplica.getStr(COLLECTION_PROP);
-        log.info("Going to create replica for collection={} shard={} on node={}", sourceCollection, sourceReplica.getStr(SHARD_ID_PROP), target);
-        String targetNode = target;
-        if (targetNode == null) {
-          Replica.Type replicaType = Replica.Type.get(sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-          int numNrtReplicas = replicaType == Replica.Type.NRT ? 1 : 0;
-          int numTlogReplicas = replicaType == Replica.Type.TLOG ? 1 : 0;
-          int numPullReplicas = replicaType == Replica.Type.PULL ? 1 : 0;
-          Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
-              .forCollection(sourceCollection)
-              .forShard(Collections.singletonList(sourceReplica.getStr(SHARD_ID_PROP)))
-              .assignNrtReplicas(numNrtReplicas)
-              .assignTlogReplicas(numTlogReplicas)
-              .assignPullReplicas(numPullReplicas)
-              .onNodes(new ArrayList<>(ocmh.cloudManager.getClusterStateProvider().getLiveNodes()))
-              .build();
-          Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
-          Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, clusterState.getCollection(sourceCollection));
-          targetNode = assignStrategy.assign(ocmh.cloudManager, assignRequest).get(0).node;
-          sessionWrapperRef.set(PolicyHelper.getLastSessionWrapper(true));
-        }
-        ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, targetNode);
-        if (async != null) msg.getProperties().put(ASYNC, async);
-        final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
-            msg, nl, () -> {
-              countDownLatch.countDown();
-              if (nl.get("failure") != null) {
-                String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-                    " on node=%s", sourceCollection, sourceReplica.getStr(SHARD_ID_PROP), target);
-                log.warn(errorString);
-                // one replica creation failed. Make the best attempt to
-                // delete all the replicas created so far in the target
-                // and exit
-                synchronized (results) {
-                  results.add("failure", errorString);
-                  anyOneFailed.set(true);
-                }
-              } else {
-                log.debug("Successfully created replica for collection={} shard={} on node={}",
-                    sourceCollection, sourceReplica.getStr(SHARD_ID_PROP), target);
-              }
-            }).get(0);
-
-        if (addedReplica != null) {
-          createdReplicas.add(addedReplica);
-          if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
-            String shardName = sourceReplica.getStr(SHARD_ID_PROP);
-            String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-            String collectionName = sourceCollection;
-            String key = collectionName + "_" + replicaName;
-            CollectionStateWatcher watcher;
-            if (waitForFinalState) {
-              watcher = new ActiveReplicaWatcher(collectionName, null,
-                  Collections.singletonList(addedReplica.getStr(ZkStateReader.CORE_NAME_PROP)), replicasToRecover);
-            } else {
-              watcher = new LeaderRecoveryWatcher(collectionName, shardName, replicaName,
-                  addedReplica.getStr(ZkStateReader.CORE_NAME_PROP), replicasToRecover);
-            }
-            watchers.put(key, watcher);
-            log.debug("--- adding " + key + ", " + watcher);
-            zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
-          } else {
-            log.debug("--- not waiting for " + addedReplica);
-          }
-        }
-      }
-
-      log.debug("Waiting for replicas to be added");
-      if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
-        log.info("Timed out waiting for replicas to be added");
-        anyOneFailed.set(true);
-      } else {
-        log.debug("Finished waiting for replicas to be added");
-      }
-    } finally {
-      PolicyHelper.SessionWrapper sw = sessionWrapperRef.get();
-      if (sw != null) sw.release();
-    }
-    // now wait for leader replicas to recover
-    log.debug("Waiting for " + numLeaders + " leader replicas to recover");
-    if (!replicasToRecover.await(timeout, TimeUnit.SECONDS)) {
-      log.info("Timed out waiting for " + replicasToRecover.getCount() + " leader replicas to recover");
-      anyOneFailed.set(true);
-    } else {
-      log.debug("Finished waiting for leader replicas to recover");
-    }
-    // remove the watchers, we're done either way
-    for (Map.Entry<String, CollectionStateWatcher> e : watchers.entrySet()) {
-      zkStateReader.removeCollectionStateWatcher(e.getKey(), e.getValue());
-    }
-    if (anyOneFailed.get()) {
-      log.info("Failed to create some replicas. Cleaning up all replicas on target node");
-      SolrCloseableLatch cleanupLatch = new SolrCloseableLatch(createdReplicas.size(), ocmh);
-      for (ZkNodeProps createdReplica : createdReplicas) {
-        NamedList deleteResult = new NamedList();
-        try {
-          ocmh.deleteReplica(zkStateReader.getClusterState(), createdReplica.plus("parallel", "true"), deleteResult, () -> {
-            cleanupLatch.countDown();
-            if (deleteResult.get("failure") != null) {
-              synchronized (results) {
-                results.add("failure", "Could not cleanup, because of : " + deleteResult.get("failure"));
-              }
-            }
-          });
-        } catch (KeeperException e) {
-          cleanupLatch.countDown();
-          log.warn("Error deleting replica ", e);
-        } catch (Exception e) {
-          log.warn("Error deleting replica ", e);
-          cleanupLatch.countDown();
-          throw e;
-        }
-      }
-      cleanupLatch.await(5, TimeUnit.MINUTES);
-      return;
-    }
-
-
-    // we have reached this far means all replicas could be recreated
-    //now cleanup the replicas in the source node
-    DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
-    results.add("success", "REPLACENODE action completed successfully from  : " + source + " to : " + target);
-  }
-
-  static List<ZkNodeProps> getReplicasOfNode(String source, ClusterState state) {
-    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
-    for (Map.Entry<String, DocCollection> e : state.getCollectionsMap().entrySet()) {
-      for (Slice slice : e.getValue().getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          if (source.equals(replica.getNodeName())) {
-            ZkNodeProps props = new ZkNodeProps(
-                COLLECTION_PROP, e.getKey(),
-                SHARD_ID_PROP, slice.getName(),
-                ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
-                ZkStateReader.REPLICA_PROP, replica.getName(),
-                ZkStateReader.REPLICA_TYPE, replica.getType().name(),
-                ZkStateReader.LEADER_PROP, String.valueOf(replica.equals(slice.getLeader())),
-                CoreAdminParams.NODE, source);
-            sourceReplicas.add(props);
-          }
-        }
-      }
-    }
-    return sourceReplicas;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
deleted file mode 100644
index d100ce0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public RestoreCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    // TODO maybe we can inherit createCollection's options/code
-
-    String restoreCollectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME); // of backup
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    String asyncId = message.getStr(ASYNC);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    Map<String, String> requestMap = new HashMap<>();
-
-    CoreContainer cc = ocmh.overseer.getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    BackupManager backupMgr = new BackupManager(repository, zkStateReader);
-
-    Properties properties = backupMgr.readBackupProperties(location, backupName);
-    String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP);
-    DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection);
-
-    // Get the Solr nodes to restore a collection.
-    final List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(
-        zkStateReader.getClusterState().getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM);
-
-    int numShards = backupCollectionState.getActiveSlices().size();
-
-    int numNrtReplicas;
-    if (message.get(REPLICATION_FACTOR) != null) {
-      numNrtReplicas = message.getInt(REPLICATION_FACTOR, 0);
-    } else if (message.get(NRT_REPLICAS) != null) {
-      numNrtReplicas = message.getInt(NRT_REPLICAS, 0);
-    } else {
-      //replicationFactor and nrtReplicas is always in sync after SOLR-11676
-      //pick from cluster state of the backed up collection
-      numNrtReplicas = backupCollectionState.getReplicationFactor();
-    }
-    int numTlogReplicas = getInt(message, TLOG_REPLICAS, backupCollectionState.getNumTlogReplicas(), 0);
-    int numPullReplicas = getInt(message, PULL_REPLICAS, backupCollectionState.getNumPullReplicas(), 0);
-    int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
-    assert totalReplicasPerShard > 0;
-    
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
-    int availableNodeCount = nodeList.size();
-    if (maxShardsPerNode != -1 && (numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for"
-              + " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d."
-              + " Consider increasing maxShardsPerNode value OR number of available nodes.",
-              availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
-    }
-
-    //Upload the configs
-    String configName = (String) properties.get(CollectionAdminParams.COLL_CONF);
-    String restoreConfigName = message.getStr(CollectionAdminParams.COLL_CONF, configName);
-    if (zkStateReader.getConfigManager().configExists(restoreConfigName)) {
-      log.info("Using existing config {}", restoreConfigName);
-      //TODO add overwrite option?
-    } else {
-      log.info("Uploading config {}", restoreConfigName);
-      backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName);
-    }
-
-    log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName,
-        location);
-
-    //Create core-less collection
-    {
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
-      propMap.put("fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.
-      if (properties.get(STATE_FORMAT) == null) {
-        propMap.put(STATE_FORMAT, "2");
-      }
-      propMap.put(REPLICATION_FACTOR, numNrtReplicas);
-      propMap.put(NRT_REPLICAS, numNrtReplicas);
-      propMap.put(TLOG_REPLICAS, numTlogReplicas);
-      propMap.put(PULL_REPLICAS, numPullReplicas);
-      properties.put(MAX_SHARDS_PER_NODE, maxShardsPerNode);
-
-      // inherit settings from input API, defaulting to the backup's setting.  Ex: replicationFactor
-      for (String collProp : OverseerCollectionMessageHandler.COLLECTION_PROPS_AND_DEFAULTS.keySet()) {
-        Object val = message.getProperties().getOrDefault(collProp, backupCollectionState.get(collProp));
-        if (val != null && propMap.get(collProp) == null) {
-          propMap.put(collProp, val);
-        }
-      }
-
-      propMap.put(NAME, restoreCollectionName);
-      propMap.put(OverseerCollectionMessageHandler.CREATE_NODE_SET, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY); //no cores
-      propMap.put(CollectionAdminParams.COLL_CONF, restoreConfigName);
-
-      // router.*
-      @SuppressWarnings("unchecked")
-      Map<String, Object> routerProps = (Map<String, Object>) backupCollectionState.getProperties().get(DocCollection.DOC_ROUTER);
-      for (Map.Entry<String, Object> pair : routerProps.entrySet()) {
-        propMap.put(DocCollection.DOC_ROUTER + "." + pair.getKey(), pair.getValue());
-      }
-
-      Set<String> sliceNames = backupCollectionState.getActiveSlicesMap().keySet();
-      if (backupCollectionState.getRouter() instanceof ImplicitDocRouter) {
-        propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, StrUtils.join(sliceNames, ','));
-      } else {
-        propMap.put(OverseerCollectionMessageHandler.NUM_SLICES, sliceNames.size());
-        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
-        //   list of names, and if so uses this instead of building it.  We clear the replica list.
-        Collection<Slice> backupSlices = backupCollectionState.getActiveSlices();
-        Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
-        for (Slice backupSlice : backupSlices) {
-          newSlices.put(backupSlice.getName(),
-              new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
-        }
-        propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, newSlices);
-      }
-
-      ocmh.commandMap.get(CREATE).call(zkStateReader.getClusterState(), new ZkNodeProps(propMap), new NamedList());
-      // note: when createCollection() returns, the collection exists (no race)
-    }
-
-    // Restore collection properties
-    backupMgr.uploadCollectionProperties(location, backupName, restoreCollectionName);
-
-    DocCollection restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-
-    //Mark all shards in CONSTRUCTION STATE while we restore the data
-    {
-      //TODO might instead createCollection accept an initial state?  Is there a race?
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-      for (Slice shard : restoreCollection.getSlices()) {
-        propMap.put(shard.getName(), Slice.State.CONSTRUCTION.toString());
-      }
-      propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
-      inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-    }
-
-    // TODO how do we leverage the RULE / SNITCH logic in createCollection?
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    List<String> sliceNames = new ArrayList<>();
-    restoreCollection.getSlices().forEach(x -> sliceNames.add(x.getName()));
-    PolicyHelper.SessionWrapper sessionWrapper = null;
-
-    try {
-      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
-          .forCollection(restoreCollectionName)
-          .forShard(sliceNames)
-          .assignNrtReplicas(numNrtReplicas)
-          .assignTlogReplicas(numTlogReplicas)
-          .assignPullReplicas(numPullReplicas)
-          .onNodes(nodeList)
-          .build();
-      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
-      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, restoreCollection);
-      List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
-      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
-      //Create one replica per shard and copy backed up data to it
-      for (Slice slice : restoreCollection.getSlices()) {
-        log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
-        propMap.put(COLLECTION_PROP, restoreCollectionName);
-        propMap.put(SHARD_ID_PROP, slice.getName());
-
-        if (numNrtReplicas >= 1) {
-          propMap.put(REPLICA_TYPE, Replica.Type.NRT.name());
-        } else if (numTlogReplicas >= 1) {
-          propMap.put(REPLICA_TYPE, Replica.Type.TLOG.name());
-        } else {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Unexpected number of replicas, replicationFactor, " +
-              Replica.Type.NRT + " or " + Replica.Type.TLOG + " must be greater than 0");
-        }
-
-        // Get the first node matching the shard to restore in
-        String node;
-        for (ReplicaPosition replicaPosition : replicaPositions) {
-          if (Objects.equals(replicaPosition.shard, slice.getName())) {
-            node = replicaPosition.node;
-            propMap.put(CoreAdminParams.NODE, node);
-            replicaPositions.remove(replicaPosition);
-            break;
-          }
-        }
-
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        ocmh.addPropertyParams(message, propMap);
-        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), new NamedList(), null);
-      }
-
-      //refresh the location copy of collection state
-      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-      //Copy data from backed up index to each replica
-      for (Slice slice : restoreCollection.getSlices()) {
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
-        params.set(NAME, "snapshot." + slice.getName());
-        params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
-        params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
-        ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
-      }
-      ocmh.processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap);
-
-
-      for (Slice s: restoreCollection.getSlices()) {
-        for (Replica r : s.getReplicas()) {
-          String nodeName = r.getNodeName();
-          String coreNodeName = r.getCoreName();
-          Replica.State stateRep  = r.getState();
-
-          log.debug("Calling REQUESTAPPLYUPDATES on: nodeName={}, coreNodeName={}, state={}"
-              , nodeName, coreNodeName, stateRep.name());
-
-          ModifiableSolrParams params = new ModifiableSolrParams();
-          params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-          params.set(CoreAdminParams.NAME, coreNodeName);
-
-          ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
-        }
-
-        ocmh.processResponses(new NamedList(), shardHandler, true, "REQUESTAPPLYUPDATES calls did not succeed", asyncId, requestMap);
-
-      }
-
-      //Mark all shards in ACTIVE STATE
-      {
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
-        for (Slice shard : restoreCollection.getSlices()) {
-          propMap.put(shard.getName(), Slice.State.ACTIVE.toString());
-        }
-        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-      }
-
-        if (totalReplicasPerShard > 1) {
-        log.info("Adding replicas to restored collection={}", restoreCollection.getName());
-        for (Slice slice : restoreCollection.getSlices()) {
-
-          //Add the remaining replicas for each shard, considering it's type
-          int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
-
-          // We already created either a NRT or an TLOG replica as leader
-          if (numNrtReplicas > 0) {
-            createdNrtReplicas++;
-          } else if (createdTlogReplicas > 0) {
-            createdTlogReplicas++;
-          }
-
-          for (int i = 1; i < totalReplicasPerShard; i++) {
-            Replica.Type typeToCreate;
-            if (createdNrtReplicas < numNrtReplicas) {
-              createdNrtReplicas++;
-              typeToCreate = Replica.Type.NRT;
-            } else if (createdTlogReplicas < numTlogReplicas) {
-              createdTlogReplicas++;
-              typeToCreate = Replica.Type.TLOG;
-            } else {
-              createdPullReplicas++;
-              typeToCreate = Replica.Type.PULL;
-              assert createdPullReplicas <= numPullReplicas: "Unexpected number of replicas";
-            }
-
-            log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
-            HashMap<String, Object> propMap = new HashMap<>();
-            propMap.put(COLLECTION_PROP, restoreCollectionName);
-            propMap.put(SHARD_ID_PROP, slice.getName());
-            propMap.put(REPLICA_TYPE, typeToCreate.name());
-
-            // Get the first node matching the shard to restore in
-            String node;
-            for (ReplicaPosition replicaPosition : replicaPositions) {
-              if (Objects.equals(replicaPosition.shard, slice.getName())) {
-                node = replicaPosition.node;
-                propMap.put(CoreAdminParams.NODE, node);
-                replicaPositions.remove(replicaPosition);
-                break;
-              }
-            }
-
-            // add async param
-            if (asyncId != null) {
-              propMap.put(ASYNC, asyncId);
-            }
-            ocmh.addPropertyParams(message, propMap);
-
-            ocmh.addReplica(zkStateReader.getClusterState(), new ZkNodeProps(propMap), results, null);
-          }
-        }
-      }
-
-      log.info("Completed restoring collection={} backupName={}", restoreCollection, backupName);
-    } finally {
-      if (sessionWrapper != null) sessionWrapper.release();
-    }
-  }
-
-  private int getInt(ZkNodeProps message, String propertyName, Integer count, int defaultValue) {
-    Integer value = message.getInt(propertyName, count);
-    return value!=null ? value:defaultValue;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/SetAliasPropCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SetAliasPropCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SetAliasPropCmd.java
deleted file mode 100644
index fdee1d1..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SetAliasPropCmd.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.LinkedHashMap;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.Cmd;
-import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class SetAliasPropCmd implements Cmd {
-
-  public static final String PROPERTIES = "property";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler messageHandler;
-
-  SetAliasPropCmd(OverseerCollectionMessageHandler messageHandler) {
-    this.messageHandler = messageHandler;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String aliasName = message.getStr(NAME);
-
-    final ZkStateReader.AliasesManager aliasesManager = messageHandler.zkStateReader.aliasesManager;
-
-    // Ensure we see the alias.  This may be redundant but SetAliasPropCmd isn't expected to be called very frequently
-    aliasesManager.update();
-
-    if (aliasesManager.getAliases().getCollectionAliasMap().get(aliasName) == null) {
-      // nicer than letting aliases object throw later on...
-      throw new SolrException(BAD_REQUEST,
-          String.format(Locale.ROOT,  "Can't modify non-existent alias %s", aliasName));
-    }
-
-    @SuppressWarnings("unchecked")
-    Map<String, String> properties = new LinkedHashMap<>((Map<String, String>) message.get(PROPERTIES));
-
-    // check & cleanup properties.  It's a mutable copy.
-    for (Map.Entry<String, String> entry : properties.entrySet()) {
-      String key = entry.getKey();
-      if ("".equals(key.trim())) {
-        throw new SolrException(BAD_REQUEST, "property keys must not be pure whitespace");
-      }
-      if (!key.equals(key.trim())) {
-        throw new SolrException(BAD_REQUEST, "property keys should not begin or end with whitespace");
-      }
-      String value = entry.getValue();
-      if ("".equals(value)) {
-        entry.setValue(null);
-      }
-    }
-
-    aliasesManager.applyModificationAndExportToZk(aliases1 -> aliases1.cloneWithCollectionAliasProperties(aliasName, properties));
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
deleted file mode 100644
index 2e68f91..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ /dev/null
@@ -1,778 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.NodeStateProvider;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
-import org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.PlainIdRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.rule.ImplicitSnitch;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.update.SolrIndexSplitter;
-import org.apache.solr.util.RTimerTree;
-import org.apache.solr.util.TestInjection;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-
-public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    split(state, message, results);
-  }
-
-  public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    String methodStr = message.getStr(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower());
-    SolrIndexSplitter.SplitMethod splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr);
-    if (splitMethod == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown value '" + CommonAdminParams.SPLIT_METHOD +
-          ": " + methodStr);
-    }
-    boolean withTiming = message.getBool(CommonParams.TIMING, false);
-
-    String collectionName = message.getStr(CoreAdminParams.COLLECTION);
-
-    log.debug("Split shard invoked: {}", message);
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.forceUpdateCollection(collectionName);
-    AtomicReference<String> slice = new AtomicReference<>();
-    slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
-    Set<String> offlineSlices = new HashSet<>();
-    RTimerTree timings = new RTimerTree();
-
-    String splitKey = message.getStr("split.key");
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    PolicyHelper.SessionWrapper sessionWrapper = null;
-
-    Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
-    if (parentSlice.getState() != Slice.State.ACTIVE) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Parent slice is not active: " +
-          collectionName + "/ " + parentSlice.getName() + ", state=" + parentSlice.getState());
-    }
-
-    // find the leader for the shard
-    Replica parentShardLeader = null;
-    try {
-      parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted.");
-    }
-
-    RTimerTree t = timings.sub("checkDiskSpace");
-    checkDiskSpace(collectionName, slice.get(), parentShardLeader);
-    t.stop();
-
-    // let's record the ephemeralOwner of the parent leader node
-    Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
-    if (leaderZnodeStat == null)  {
-      // we just got to know the leader but its live node is gone already!
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
-    }
-
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    List<String> subSlices = new ArrayList<>();
-    List<String> subShardNames = new ArrayList<>();
-
-    // reproduce the currently existing number of replicas per type
-    AtomicInteger numNrt = new AtomicInteger();
-    AtomicInteger numTlog = new AtomicInteger();
-    AtomicInteger numPull = new AtomicInteger();
-    parentSlice.getReplicas().forEach(r -> {
-      switch (r.getType()) {
-        case NRT:
-          numNrt.incrementAndGet();
-          break;
-        case TLOG:
-          numTlog.incrementAndGet();
-          break;
-        case PULL:
-          numPull.incrementAndGet();
-      }
-    });
-    int repFactor = numNrt.get() + numTlog.get() + numPull.get();
-
-    boolean success = false;
-    try {
-      // type of the first subreplica will be the same as leader
-      boolean firstNrtReplica = parentShardLeader.getType() == Replica.Type.NRT;
-      // verify that we indeed have the right number of correct replica types
-      if ((firstNrtReplica && numNrt.get() < 1) || (!firstNrtReplica && numTlog.get() < 1)) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "aborting split - inconsistent replica types in collection " + collectionName +
-            ": nrt=" + numNrt.get() + ", tlog=" + numTlog.get() + ", pull=" + numPull.get() + ", shard leader type is " +
-            parentShardLeader.getType());
-      }
-
-      // check for the lock
-      if (!lockForSplit(ocmh.cloudManager, collectionName, parentSlice.getName())) {
-        // mark as success to avoid clearing the lock in the "finally" block
-        success = true;
-        throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
-            collectionName + "/" + parentSlice.getName());
-      }
-
-      List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
-
-      t = timings.sub("fillRanges");
-      String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames, firstNrtReplica);
-      t.stop();
-
-      boolean oldShardsDeleted = false;
-      for (String subSlice : subSlices) {
-        Slice oSlice = collection.getSlice(subSlice);
-        if (oSlice != null) {
-          final Slice.State state = oSlice.getState();
-          if (state == Slice.State.ACTIVE) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
-          } else {
-            // delete the shards
-            log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
-            Map<String, Object> propMap = new HashMap<>();
-            propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
-            propMap.put(COLLECTION_PROP, collectionName);
-            propMap.put(SHARD_ID_PROP, subSlice);
-            ZkNodeProps m = new ZkNodeProps(propMap);
-            try {
-              ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
-            } catch (Exception e) {
-              throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
-                  e);
-            }
-
-            oldShardsDeleted = true;
-          }
-        }
-      }
-
-      if (oldShardsDeleted) {
-        // refresh the locally cached cluster state
-        // we know we have the latest because otherwise deleteshard would have failed
-        clusterState = zkStateReader.getClusterState();
-        collection = clusterState.getCollection(collectionName);
-      }
-
-      final String asyncId = message.getStr(ASYNC);
-      Map<String, String> requestMap = new HashMap<>();
-      String nodeName = parentShardLeader.getNodeName();
-
-      t = timings.sub("createSubSlicesAndLeadersInState");
-      for (int i = 0; i < subRanges.size(); i++) {
-        String subSlice = subSlices.get(i);
-        String subShardName = subShardNames.get(i);
-        DocRouter.Range subRange = subRanges.get(i);
-
-        log.debug("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
-
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
-        propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
-        propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
-        propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
-        propMap.put("shard_parent_node", nodeName);
-        propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-
-        // wait until we are able to see the new shard in cluster state
-        ocmh.waitForNewShard(collectionName, subSlice);
-
-        // refresh cluster state
-        clusterState = zkStateReader.getClusterState();
-
-        log.debug("Adding first replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
-            + " on " + nodeName);
-        propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-        propMap.put(COLLECTION_PROP, collectionName);
-        propMap.put(SHARD_ID_PROP, subSlice);
-        propMap.put(REPLICA_TYPE, firstNrtReplica ? Replica.Type.NRT.toString() : Replica.Type.TLOG.toString());
-        propMap.put("node", nodeName);
-        propMap.put(CoreAdminParams.NAME, subShardName);
-        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        // copy over property params:
-        for (String key : message.keySet()) {
-          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
-            propMap.put(key, message.getStr(key));
-          }
-        }
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
-      }
-
-      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard leaders", asyncId, requestMap);
-
-      t.stop();
-      t = timings.sub("waitForSubSliceLeadersAlive");
-      for (String subShardName : subShardNames) {
-        // wait for parent leader to acknowledge the sub-shard core
-        log.debug("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
-        String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
-        CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-        cmd.setCoreName(subShardName);
-        cmd.setNodeName(nodeName);
-        cmd.setCoreNodeName(coreNodeName);
-        cmd.setState(Replica.State.ACTIVE);
-        cmd.setCheckLive(true);
-        cmd.setOnlyIfLeader(true);
-
-        ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
-        ocmh.sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
-          asyncId, requestMap);
-      t.stop();
-
-      log.debug("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
-          + " on: " + parentShardLeader);
-
-      log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
-          + collectionName + " on " + parentShardLeader);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-      params.set(CommonAdminParams.SPLIT_METHOD, splitMethod.toLower());
-      params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
-        params.add(CoreAdminParams.TARGET_CORE, subShardName);
-      }
-      params.set(CoreAdminParams.RANGES, rangesStr);
-
-      t = timings.sub("splitParentCore");
-
-      ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT core admin command", asyncId,
-          requestMap);
-      t.stop();
-
-      log.debug("Index on shard: " + nodeName + " split into two successfully");
-
-      t = timings.sub("applyBufferedUpdates");
-      // apply buffered updates on sub-shards
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
-
-        log.debug("Applying buffered updates on : " + subShardName);
-
-        params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-        params.set(CoreAdminParams.NAME, subShardName);
-
-        ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
-          " to apply buffered updates", asyncId, requestMap);
-      t.stop();
-
-      log.debug("Successfully applied buffered updates on : " + subShardNames);
-
-      // Replica creation for the new Slices
-      // replica placement is controlled by the autoscaling policy framework
-
-      Set<String> nodes = clusterState.getLiveNodes();
-      List<String> nodeList = new ArrayList<>(nodes.size());
-      nodeList.addAll(nodes);
-
-      // TODO: Have maxShardsPerNode param for this operation?
-
-      // Remove the node that hosts the parent shard for replica creation.
-      nodeList.remove(nodeName);
-
-      // TODO: change this to handle sharding a slice into > 2 sub-shards.
-
-      // we have already created one subReplica for each subShard on the parent node.
-      // identify locations for the remaining replicas
-      if (firstNrtReplica) {
-        numNrt.decrementAndGet();
-      } else {
-        numTlog.decrementAndGet();
-      }
-
-      t = timings.sub("identifyNodesForReplicas");
-      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
-          .forCollection(collectionName)
-          .forShard(subSlices)
-          .assignNrtReplicas(numNrt.get())
-          .assignTlogReplicas(numTlog.get())
-          .assignPullReplicas(numPull.get())
-          .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
-          .build();
-      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
-      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, collection);
-      List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
-      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
-      t.stop();
-
-      t = timings.sub("createReplicaPlaceholders");
-      for (ReplicaPosition replicaPosition : replicaPositions) {
-        String sliceName = replicaPosition.shard;
-        String subShardNodeName = replicaPosition.node;
-        String solrCoreName = Assign.buildSolrCoreName(collectionName, sliceName, replicaPosition.type, replicaPosition.index);
-
-        log.debug("Creating replica shard " + solrCoreName + " as part of slice " + sliceName + " of collection "
-            + collectionName + " on " + subShardNodeName);
-
-        // we first create all replicas in DOWN state without actually creating their cores in order to
-        // avoid a race condition where Overseer may prematurely activate the new sub-slices (and deactivate
-        // the parent slice) before all new replicas are added. This situation may lead to a loss of performance
-        // because the new shards will be activated with possibly many fewer replicas.
-        ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collectionName,
-            ZkStateReader.SHARD_ID_PROP, sliceName,
-            ZkStateReader.CORE_NAME_PROP, solrCoreName,
-            ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(subShardNodeName),
-            ZkStateReader.NODE_NAME_PROP, subShardNodeName,
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-        propMap.put(COLLECTION_PROP, collectionName);
-        propMap.put(SHARD_ID_PROP, sliceName);
-        propMap.put(REPLICA_TYPE, replicaPosition.type.name());
-        propMap.put("node", subShardNodeName);
-        propMap.put(CoreAdminParams.NAME, solrCoreName);
-        // copy over property params:
-        for (String key : message.keySet()) {
-          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
-            propMap.put(key, message.getStr(key));
-          }
-        }
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        // special flag param to instruct addReplica not to create the replica in cluster state again
-        propMap.put(OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
-
-        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-
-        replicas.add(propMap);
-      }
-      t.stop();
-      assert TestInjection.injectSplitFailureBeforeReplicaCreation();
-
-      long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
-      // compare against the ephemeralOwner of the parent leader node
-      leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
-      if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
-        // put sub-shards in recovery_failed state
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-
-        if (leaderZnodeStat == null)  {
-          // the leader is not live anymore, fail the split!
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
-        } else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
-          // there's a new leader, fail the split!
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
-                  + ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
-        }
-      }
-
-      // we must set the slice state into recovery before actually creating the replica cores
-      // this ensures that the logic inside ReplicaMutator to update sub-shard state to 'active'
-      // always gets a chance to execute. See SOLR-7673
-
-      if (repFactor == 1) {
-        // switch sub shard states to 'active'
-        log.debug("Replication factor is 1 so switching shard states");
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        propMap.put(slice.get(), Slice.State.INACTIVE.toString());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.ACTIVE.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      } else {
-        log.debug("Requesting shard state be set to 'recovery'");
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.RECOVERY.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      }
-
-      t = timings.sub("createCoresForReplicas");
-      // now actually create replica cores on sub shard nodes
-      for (Map<String, Object> replica : replicas) {
-        ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
-      }
-
-      assert TestInjection.injectSplitFailureAfterReplicaCreation();
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard replicas", asyncId, requestMap);
-      t.stop();
-
-      log.info("Successfully created all replica shards for all sub-slices " + subSlices);
-
-      t = timings.sub("finalCommit");
-      ocmh.commit(results, slice.get(), parentShardLeader);
-      t.stop();
-      if (withTiming) {
-        results.add(CommonParams.TIMING, timings.asNamedList());
-      }
-      success = true;
-      // don't unlock the shard yet - only do this if the final switch-over in
-      // ReplicaMutator succeeds (or fails)
-      return true;
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
-    } finally {
-      if (sessionWrapper != null) sessionWrapper.release();
-      if (!success) {
-        cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices, offlineSlices);
-        unlockForSplit(ocmh.cloudManager, collectionName, parentSlice.getName());
-      }
-    }
-  }
-
-  private void checkDiskSpace(String collection, String shard, Replica parentShardLeader) throws SolrException {
-    // check that enough disk space is available on the parent leader node
-    // otherwise the actual index splitting will always fail
-    NodeStateProvider nodeStateProvider = ocmh.cloudManager.getNodeStateProvider();
-    Map<String, Object> nodeValues = nodeStateProvider.getNodeValues(parentShardLeader.getNodeName(),
-        Collections.singletonList(ImplicitSnitch.DISK));
-    Map<String, Map<String, List<ReplicaInfo>>> infos = nodeStateProvider.getReplicaInfo(parentShardLeader.getNodeName(),
-        Collections.singletonList(Type.CORE_IDX.metricsAttribute));
-    if (infos.get(collection) == null || infos.get(collection).get(shard) == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing replica information for parent shard leader");
-    }
-    // find the leader
-    List<ReplicaInfo> lst = infos.get(collection).get(shard);
-    Double indexSize = null;
-    for (ReplicaInfo info : lst) {
-      if (info.getCore().equals(parentShardLeader.getCoreName())) {
-        Number size = (Number)info.getVariable(Type.CORE_IDX.metricsAttribute);
-        if (size == null) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing index size information for parent shard leader");
-        }
-        indexSize = (Double) Type.CORE_IDX.convertVal(size);
-        break;
-      }
-    }
-    if (indexSize == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing replica information for parent shard leader");
-    }
-    Number freeSize = (Number)nodeValues.get(ImplicitSnitch.DISK);
-    if (freeSize == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing node disk space information for parent shard leader");
-    }
-    if (freeSize.doubleValue() < 2.0 * indexSize) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "not enough free disk space to perform index split on node " +
-          parentShardLeader.getNodeName() + ", required: " + (2 * indexSize) + ", available: " + freeSize);
-    }
-  }
-
-  private void cleanupAfterFailure(ZkStateReader zkStateReader, String collectionName, String parentShard,
-                                   List<String> subSlices, Set<String> offlineSlices) {
-    log.info("Cleaning up after a failed split of " + collectionName + "/" + parentShard);
-    // get the latest state
-    try {
-      zkStateReader.forceUpdateCollection(collectionName);
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Cleanup failed after failed split of " + collectionName + "/" + parentShard + ": (force update collection)", e);
-      return;
-    }
-    ClusterState clusterState = zkStateReader.getClusterState();
-    DocCollection coll = clusterState.getCollectionOrNull(collectionName);
-
-    if (coll == null) { // may have been deleted
-      return;
-    }
-
-    // set already created sub shards states to CONSTRUCTION - this prevents them
-    // from entering into RECOVERY or ACTIVE (SOLR-9455)
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-    final Map<String, Object> propMap = new HashMap<>();
-    boolean sendUpdateState = false;
-    propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-    propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-    for (Slice s : coll.getSlices()) {
-      if (!subSlices.contains(s.getName())) {
-        continue;
-      }
-      propMap.put(s.getName(), Slice.State.CONSTRUCTION.toString());
-      sendUpdateState = true;
-    }
-
-    // if parent is inactive activate it again
-    Slice parentSlice = coll.getSlice(parentShard);
-    if (parentSlice.getState() == Slice.State.INACTIVE) {
-      sendUpdateState = true;
-      propMap.put(parentShard, Slice.State.ACTIVE.toString());
-    }
-    // plus any other previously deactivated slices
-    for (String sliceName : offlineSlices) {
-      propMap.put(sliceName, Slice.State.ACTIVE.toString());
-      sendUpdateState = true;
-    }
-
-    if (sendUpdateState) {
-      try {
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      } catch (Exception e) {
-        // don't give up yet - just log the error, we may still be able to clean up
-        log.warn("Cleanup failed after failed split of " + collectionName + "/" + parentShard + ": (slice state changes)", e);
-      }
-    }
-
-    // delete existing subShards
-    for (String subSlice : subSlices) {
-      Slice s = coll.getSlice(subSlice);
-      if (s == null) {
-        continue;
-      }
-      log.debug("- sub-shard: {} exists therefore requesting its deletion", subSlice);
-      HashMap<String, Object> props = new HashMap<>();
-      props.put(Overseer.QUEUE_OPERATION, "deleteshard");
-      props.put(COLLECTION_PROP, collectionName);
-      props.put(SHARD_ID_PROP, subSlice);
-      ZkNodeProps m = new ZkNodeProps(props);
-      try {
-        ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
-      } catch (Exception e) {
-        log.warn("Cleanup failed after failed split of " + collectionName + "/" + parentShard + ": (deleting existing sub shard " + subSlice + ")", e);
-      }
-    }
-  }
-
-  public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
-    DocCollection collection = clusterState.getCollection(collectionName);
-    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-    Slice parentSlice;
-
-    if (slice.get() == null) {
-      if (router instanceof CompositeIdRouter) {
-        Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
-        if (searchSlices.isEmpty()) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
-        }
-        if (searchSlices.size() > 1) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
-        }
-        parentSlice = searchSlices.iterator().next();
-        slice.set(parentSlice.getName());
-        log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
-      } else {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
-                + router.getClass().getName());
-      }
-    } else {
-      parentSlice = collection.getSlice(slice.get());
-    }
-
-    if (parentSlice == null) {
-      // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
-      // an exception already
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
-    }
-    return parentSlice;
-  }
-
-  public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
-                                List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames,
-                                  boolean firstReplicaNrt) {
-    String splitKey = message.getStr("split.key");
-    DocRouter.Range range = parentSlice.getRange();
-    if (range == null) {
-      range = new PlainIdRouter().fullRange();
-    }
-    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-    String rangesStr = message.getStr(CoreAdminParams.RANGES);
-    if (rangesStr != null) {
-      String[] ranges = rangesStr.split(",");
-      if (ranges.length == 0 || ranges.length == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
-      } else {
-        for (int i = 0; i < ranges.length; i++) {
-          String r = ranges[i];
-          try {
-            subRanges.add(DocRouter.DEFAULT.fromString(r));
-          } catch (Exception e) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
-          }
-          if (!subRanges.get(i).isSubsetOf(range)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
-          }
-        }
-        List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
-        Collections.sort(temp);
-        if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
-        }
-        for (int i = 1; i < temp.size(); i++) {
-          if (temp.get(i - 1).max + 1 != temp.get(i).min) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
-                + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
-          }
-        }
-      }
-    } else if (splitKey != null) {
-      if (router instanceof CompositeIdRouter) {
-        CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
-        List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
-        if (tmpSubRanges.size() == 1) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
-              + " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
-        }
-        for (DocRouter.Range subRange : tmpSubRanges) {
-          if (subRange.min == subRange.max) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
-          }
-        }
-        subRanges.addAll(tmpSubRanges);
-        log.info("Partitioning parent shard " + parentSlice.getName() + " range: " + parentSlice.getRange() + " yields: " + subRanges);
-        rangesStr = "";
-        for (int i = 0; i < subRanges.size(); i++) {
-          DocRouter.Range subRange = subRanges.get(i);
-          rangesStr += subRange.toString();
-          if (i < subRanges.size() - 1) rangesStr += ',';
-        }
-      }
-    } else {
-      // todo: fixed to two partitions?
-      subRanges.addAll(router.partitionRange(2, range));
-    }
-
-    for (int i = 0; i < subRanges.size(); i++) {
-      String subSlice = parentSlice.getName() + "_" + i;
-      subSlices.add(subSlice);
-      String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice,
-          firstReplicaNrt ? Replica.Type.NRT : Replica.Type.TLOG);
-      subShardNames.add(subShardName);
-    }
-    return rangesStr;
-  }
-
-  public static boolean lockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
-    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
-    if (cloudManager.getDistribStateManager().hasData(path)) {
-      return false;
-    }
-    Map<String, Object> map = new HashMap<>();
-    map.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
-    byte[] data = Utils.toJSON(map);
-    try {
-      cloudManager.getDistribStateManager().makePath(path, data, CreateMode.EPHEMERAL, true);
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
-          collection + "/" + shard, e);
-    }
-    return true;
-  }
-
-  public static void unlockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
-    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
-    cloudManager.getDistribStateManager().removeRecursively(path, true, true);
-  }
-}


[23/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
deleted file mode 100644
index 54fdf99..0000000
--- a/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.lucene.analysis.util.TokenizerFactory;
-import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.Attribute;
-import org.apache.lucene.util.AttributeImpl;
-import org.apache.lucene.util.AttributeReflector;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.IOUtils;
-import org.apache.solr.analysis.TokenizerChain;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.FieldType;
-
-/**
- * A base class for all analysis request handlers.
- *
- *
- * @since solr 1.4
- */
-public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
-
-  public static final Set<BytesRef> EMPTY_BYTES_SET = Collections.emptySet();
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    rsp.add("analysis", doAnalysis(req));
-  }
-
-  /**
-   * Performs the analysis based on the given solr request and returns the analysis result as a named list.
-   *
-   * @param req The solr request.
-   *
-   * @return The analysis result as a named list.
-   *
-   * @throws Exception When analysis fails.
-   */
-  protected abstract NamedList doAnalysis(SolrQueryRequest req) throws Exception;
-
-  /**
-   * Analyzes the given value using the given Analyzer.
-   *
-   * @param value   Value to analyze
-   * @param context The {@link AnalysisContext analysis context}.
-   *
-   * @return NamedList containing the tokens produced by analyzing the given value
-   */
-  protected NamedList<? extends Object> analyzeValue(String value, AnalysisContext context) {
-
-    Analyzer analyzer = context.getAnalyzer();
-
-    if (!TokenizerChain.class.isInstance(analyzer)) {
-
-      try (TokenStream tokenStream = analyzer.tokenStream(context.getFieldName(), value)) {
-        NamedList<List<NamedList>> namedList = new NamedList<>();
-        namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(analyzeTokenStream(tokenStream), context));
-        return namedList;
-      } catch (IOException e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-      }
-    }
-
-    TokenizerChain tokenizerChain = (TokenizerChain) analyzer;
-    CharFilterFactory[] cfiltfacs = tokenizerChain.getCharFilterFactories();
-    TokenizerFactory tfac = tokenizerChain.getTokenizerFactory();
-    TokenFilterFactory[] filtfacs = tokenizerChain.getTokenFilterFactories();
-
-    NamedList<Object> namedList = new NamedList<>();
-
-    if (0 < cfiltfacs.length) {
-      String source = value;
-      for(CharFilterFactory cfiltfac : cfiltfacs ){
-        try (Reader sreader = new StringReader(source);
-             Reader reader = cfiltfac.create(sreader)) {
-          source = writeCharStream(namedList, reader);
-        } catch (IOException e) {
-          // do nothing.
-        }
-      }
-    }
-
-    TokenStream tokenStream = tfac.create();
-    ((Tokenizer)tokenStream).setReader(tokenizerChain.initReader(null, new StringReader(value)));
-    List<AttributeSource> tokens = analyzeTokenStream(tokenStream);
-
-    namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
-
-    ListBasedTokenStream listBasedTokenStream = new ListBasedTokenStream(tokenStream, tokens);
-
-    for (TokenFilterFactory tokenFilterFactory : filtfacs) {
-      for (final AttributeSource tok : tokens) {
-        tok.getAttribute(TokenTrackingAttribute.class).freezeStage();
-      }
-      // overwrite the vars "tokenStream", "tokens", and "listBasedTokenStream"
-      tokenStream = tokenFilterFactory.create(listBasedTokenStream);
-      tokens = analyzeTokenStream(tokenStream);
-      namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
-      try {
-        listBasedTokenStream.close();
-      } catch (IOException e) {
-        // do nothing;
-      }
-      listBasedTokenStream = new ListBasedTokenStream(listBasedTokenStream, tokens);
-    }
-
-    try {
-      listBasedTokenStream.close();
-    } catch (IOException e) {
-      // do nothing.
-    }
-    return namedList;
-  }
-
-  /**
-   * Analyzes the given text using the given analyzer and returns the produced tokens.
-   *
-   * @param query    The query to analyze.
-   * @param analyzer The analyzer to use.
-   */
-  protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) {
-    try (TokenStream tokenStream = analyzer.tokenStream("", query)){
-      final Set<BytesRef> tokens = new HashSet<>();
-      final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
-
-      tokenStream.reset();
-
-      while (tokenStream.incrementToken()) {
-        tokens.add(BytesRef.deepCopyOf(bytesAtt.getBytesRef()));
-      }
-
-      tokenStream.end();
-      return tokens;
-    } catch (IOException ioe) {
-      throw new RuntimeException("Error occured while iterating over tokenstream", ioe);
-    }
-  }
-
-  /**
-   * Analyzes the given TokenStream, collecting the Tokens it produces.
-   *
-   * @param tokenStream TokenStream to analyze
-   *
-   * @return List of tokens produced from the TokenStream
-   */
-  private List<AttributeSource> analyzeTokenStream(TokenStream tokenStream) {
-    final List<AttributeSource> tokens = new ArrayList<>();
-    final PositionIncrementAttribute posIncrAtt = tokenStream.addAttribute(PositionIncrementAttribute.class);
-    final TokenTrackingAttribute trackerAtt = tokenStream.addAttribute(TokenTrackingAttribute.class);
-    // for backwards compatibility, add all "common" attributes
-    tokenStream.addAttribute(OffsetAttribute.class);
-    tokenStream.addAttribute(TypeAttribute.class);
-    try {
-      tokenStream.reset();
-      int position = 0;
-      while (tokenStream.incrementToken()) {
-        position += posIncrAtt.getPositionIncrement();
-        trackerAtt.setActPosition(position);
-        tokens.add(tokenStream.cloneAttributes());
-      }
-      tokenStream.end(); // TODO should we capture?
-    } catch (IOException ioe) {
-      throw new RuntimeException("Error occured while iterating over tokenstream", ioe);
-    } finally {
-      IOUtils.closeWhileHandlingException(tokenStream);
-    }
-
-    return tokens;
-  }
-
-  // a static mapping of the reflected attribute keys to the names used in Solr 1.4
-  static Map<String,String> ATTRIBUTE_MAPPING = Collections.unmodifiableMap(new HashMap<String,String>() {{
-    put(OffsetAttribute.class.getName() + "#startOffset", "start");
-    put(OffsetAttribute.class.getName() + "#endOffset", "end");
-    put(TypeAttribute.class.getName() + "#type", "type");
-    put(TokenTrackingAttribute.class.getName() + "#position", "position");
-    put(TokenTrackingAttribute.class.getName() + "#positionHistory", "positionHistory");
-  }});
-
-  /**
-   * Converts the list of Tokens to a list of NamedLists representing the tokens.
-   *
-   * @param tokenList  Tokens to convert
-   * @param context The analysis context
-   *
-   * @return List of NamedLists containing the relevant information taken from the tokens
-   */
-  private List<NamedList> convertTokensToNamedLists(final List<AttributeSource> tokenList, AnalysisContext context) {
-    final List<NamedList> tokensNamedLists = new ArrayList<>();
-    final FieldType fieldType = context.getFieldType();
-    final AttributeSource[] tokens = tokenList.toArray(new AttributeSource[tokenList.size()]);
-    
-    // sort the tokens by absolute position
-    ArrayUtil.timSort(tokens, new Comparator<AttributeSource>() {
-      @Override
-      public int compare(AttributeSource a, AttributeSource b) {
-        return arrayCompare(
-          a.getAttribute(TokenTrackingAttribute.class).getPositions(),
-          b.getAttribute(TokenTrackingAttribute.class).getPositions()
-        );
-      }
-      
-      private int arrayCompare(int[] a, int[] b) {
-        int p = 0;
-        final int stop = Math.min(a.length, b.length);
-        while(p < stop) {
-          int diff = a[p] - b[p];
-          if (diff != 0) return diff;
-          p++;
-        }
-        // One is a prefix of the other, or, they are equal:
-        return a.length - b.length;
-      }
-    });
-
-    for (int i = 0; i < tokens.length; i++) {
-      AttributeSource token = tokens[i];
-      final NamedList<Object> tokenNamedList = new SimpleOrderedMap<>();
-      final BytesRef rawBytes;
-      if (token.hasAttribute(BytesTermAttribute.class)) {
-        final BytesTermAttribute bytesAtt = token.getAttribute(BytesTermAttribute.class);
-        rawBytes = bytesAtt.getBytesRef(); 
-      } else {
-        final TermToBytesRefAttribute termAtt = token.getAttribute(TermToBytesRefAttribute.class);
-        rawBytes = termAtt.getBytesRef();
-      }
-      final String text = fieldType.indexedToReadable(rawBytes, new CharsRefBuilder()).toString();
-      tokenNamedList.add("text", text);
-      
-      if (token.hasAttribute(CharTermAttribute.class)) {
-        final String rawText = token.getAttribute(CharTermAttribute.class).toString();
-        if (!rawText.equals(text)) {
-          tokenNamedList.add("raw_text", rawText);
-        }
-      }
-
-      tokenNamedList.add("raw_bytes", rawBytes.toString());
-
-      if (context.getTermsToMatch().contains(rawBytes)) {
-        tokenNamedList.add("match", true);
-      }
-
-      token.reflectWith(new AttributeReflector() {
-        @Override
-        public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
-          // leave out position and bytes term
-          if (TermToBytesRefAttribute.class.isAssignableFrom(attClass))
-            return;
-          if (CharTermAttribute.class.isAssignableFrom(attClass))
-            return;
-          if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
-            return;
-          
-          String k = attClass.getName() + '#' + key;
-          
-          // map keys for "standard attributes":
-          if (ATTRIBUTE_MAPPING.containsKey(k)) {
-            k = ATTRIBUTE_MAPPING.get(k);
-          }
-          
-          if (value instanceof BytesRef) {
-            final BytesRef p = (BytesRef) value;
-            value = p.toString();
-          }
-
-          tokenNamedList.add(k, value);
-        }
-      });
-
-      tokensNamedLists.add(tokenNamedList);
-    }
-
-    return tokensNamedLists;
-  }
-  
-  private String writeCharStream(NamedList<Object> out, Reader input ){
-    final int BUFFER_SIZE = 1024;
-    char[] buf = new char[BUFFER_SIZE];
-    int len = 0;
-    StringBuilder sb = new StringBuilder();
-    do {
-      try {
-        len = input.read( buf, 0, BUFFER_SIZE );
-      } catch (IOException e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-      }
-      if( len > 0 )
-        sb.append(buf, 0, len);
-    } while( len == BUFFER_SIZE );
-    out.add( input.getClass().getName(), sb.toString());
-    return sb.toString();
-  }
-
-  // ================================================= Inner classes =================================================
-  /**
-   * TokenStream that iterates over a list of pre-existing Tokens
-   * @lucene.internal
-   */
-  protected final static class ListBasedTokenStream extends TokenStream {
-    private final List<AttributeSource> tokens;
-    private Iterator<AttributeSource> tokenIterator;
-
-    /**
-     * Creates a new ListBasedTokenStream which uses the given tokens as its token source.
-     *
-     * @param attributeSource source of the attribute factory and attribute impls
-     * @param tokens Source of tokens to be used
-     */
-    ListBasedTokenStream(AttributeSource attributeSource, List<AttributeSource> tokens) {
-      super(attributeSource.getAttributeFactory());
-      this.tokens = tokens;
-      // Make sure all the attributes of the source are here too
-      addAttributes(attributeSource);
-    }
-
-    @Override
-    public void reset() throws IOException {
-      super.reset();
-      tokenIterator = tokens.iterator();
-    }
-
-    @Override
-    public boolean incrementToken() {
-      if (tokenIterator.hasNext()) {
-        clearAttributes();
-        AttributeSource next = tokenIterator.next();
-
-        addAttributes(next); // just in case there were delayed attribute additions
-
-        next.copyTo(this);
-        return true;
-      } else {
-        return false;
-      }
-    }
-
-
-    protected void addAttributes(AttributeSource attributeSource) {
-      // note: ideally we wouldn't call addAttributeImpl which is marked internal. But nonetheless it's possible
-      //  this method is used by some custom attributes, especially since Solr doesn't provide a way to customize the
-      //  AttributeFactory which is the recommended way to choose which classes implement which attributes.
-      Iterator<AttributeImpl> atts = attributeSource.getAttributeImplsIterator();
-      while (atts.hasNext()) {
-        addAttributeImpl(atts.next()); // adds both impl & interfaces
-      }
-    }
-  }
-
-  /** This is an {@link Attribute} used to track the positions of tokens
-   * in the analysis chain.
-   * @lucene.internal This class is only public for usage by the {@link AttributeSource} API.
-   */
-  public interface TokenTrackingAttribute extends Attribute {
-    void freezeStage();
-    void setActPosition(int pos);
-    int[] getPositions();
-    void reset(int[] basePositions, int position);
-  }
-
-  /** Implementation of {@link TokenTrackingAttribute}.
-   * @lucene.internal This class is only public for usage by the {@link AttributeSource} API.
-   */
-  public static final class TokenTrackingAttributeImpl extends AttributeImpl implements TokenTrackingAttribute {
-    private int[] basePositions = new int[0];
-    private int position = 0;
-    private transient int[] cachedPositions = null;
-
-    @Override
-    public void freezeStage() {
-      this.basePositions = getPositions();
-      this.position = 0;
-      this.cachedPositions = null;
-    }
-    
-    @Override
-    public void setActPosition(int pos) {
-      this.position = pos;
-      this.cachedPositions = null;
-    }
-    
-    @Override
-    public int[] getPositions() {
-      if (cachedPositions == null) {
-        cachedPositions = ArrayUtils.add(basePositions, position);
-      }
-      return cachedPositions;
-    }
-    
-    @Override
-    public void reset(int[] basePositions, int position) {
-      this.basePositions = basePositions;
-      this.position = position;
-      this.cachedPositions = null;
-    }
-    
-    @Override
-    public void clear() {
-      // we do nothing here, as all attribute values are controlled externally by consumer
-    }
-    
-    @Override
-    public void reflectWith(AttributeReflector reflector) {
-      reflector.reflect(TokenTrackingAttribute.class, "position", position);
-      // convert to Integer[] array, as only such one can be serialized by ResponseWriters
-      reflector.reflect(TokenTrackingAttribute.class, "positionHistory", ArrayUtils.toObject(getPositions()));
-    }
-
-    @Override
-    public void copyTo(AttributeImpl target) {
-      final TokenTrackingAttribute t = (TokenTrackingAttribute) target;
-      t.reset(basePositions, position);
-    }
-  }
-
-  /**
-   * Serves as the context of an analysis process. This context contains the following constructs
-   */
-  protected static class AnalysisContext {
-
-    private final String fieldName;
-    private final FieldType fieldType;
-    private final Analyzer analyzer;
-    private final Set<BytesRef> termsToMatch;
-
-    /**
-     * Constructs a new AnalysisContext with a given field tpe, analyzer and 
-     * termsToMatch. By default the field name in this context will be 
-     * {@code null}. During the analysis processs, The produced tokens will 
-     * be compaired to the terms in the {@code termsToMatch} set. When found, 
-     * these tokens will be marked as a match.
-     *
-     * @param fieldType    The type of the field the analysis is performed on.
-     * @param analyzer     The analyzer to be used.
-     * @param termsToMatch Holds all the terms that should match during the 
-     *                     analysis process.
-     */
-    public AnalysisContext(FieldType fieldType, Analyzer analyzer, Set<BytesRef> termsToMatch) {
-      this(null, fieldType, analyzer, termsToMatch);
-    }
-
-    /**
-     * Constructs an AnalysisContext with a given field name, field type 
-     * and analyzer. By default this context will hold no terms to match
-     *
-     * @param fieldName The name of the field the analysis is performed on 
-     *                  (may be {@code null}).
-     * @param fieldType The type of the field the analysis is performed on.
-     * @param analyzer  The analyzer to be used during the analysis process.
-     *
-     */
-    public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer) {
-      this(fieldName, fieldType, analyzer, EMPTY_BYTES_SET);
-    }
-
-    /**
-     * Constructs a new AnalysisContext with a given field tpe, analyzer and
-     * termsToMatch. During the analysis processs, The produced tokens will be 
-     * compared to the terms in the {@code termsToMatch} set. When found, 
-     * these tokens will be marked as a match.
-     *
-     * @param fieldName    The name of the field the analysis is performed on 
-     *                     (may be {@code null}).
-     * @param fieldType    The type of the field the analysis is performed on.
-     * @param analyzer     The analyzer to be used.
-     * @param termsToMatch Holds all the terms that should match during the 
-     *                     analysis process.
-     */
-    public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer, Set<BytesRef> termsToMatch) {
-      this.fieldName = fieldName;
-      this.fieldType = fieldType;
-      this.analyzer = analyzer;
-      this.termsToMatch = termsToMatch;
-    }
-
-    public String getFieldName() {
-      return fieldName;
-    }
-
-    public FieldType getFieldType() {
-      return fieldType;
-    }
-
-    public Analyzer getAnalyzer() {
-      return analyzer;
-    }
-
-    public Set<BytesRef> getTermsToMatch() {
-      return termsToMatch;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java b/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java
deleted file mode 100644
index 4112e04..0000000
--- a/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.eval.SourceEvaluator;
-import org.apache.solr.client.solrj.io.stream.StreamContext;
-import org.apache.solr.client.solrj.io.stream.expr.Explanation;
-import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue;
-import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.lucene.analysis.*;
-import org.apache.solr.core.SolrCore;
-
-public class AnalyzeEvaluator extends SourceEvaluator {
-  private static final long serialVersionUID = 1L;
-
-  private String fieldName;
-  private String analyzerField;
-  private Analyzer analyzer;
-
-  public AnalyzeEvaluator(String _fieldName, String _analyzerField) {
-    init(_fieldName, _analyzerField);
-  }
-
-  public AnalyzeEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
-    String _fieldName = factory.getValueOperand(expression, 0);
-    String _analyzerField = factory.getValueOperand(expression, 1);
-    init(_fieldName, _analyzerField);
-  }
-
-  public void setStreamContext(StreamContext context) {
-    this.streamContext = context;
-    Object solrCoreObj = context.get("solr-core");
-    if (solrCoreObj == null || !(solrCoreObj instanceof SolrCore) ) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "StreamContext must have SolrCore in solr-core key");
-    }
-
-    analyzer = ((SolrCore) solrCoreObj).getLatestSchema().getFieldType(analyzerField).getIndexAnalyzer();
-  }
-
-  private void init(String fieldName, String analyzerField) {
-    this.fieldName = fieldName;
-    if(analyzerField == null) {
-      this.analyzerField = fieldName;
-    } else {
-      this.analyzerField = analyzerField;
-    }
-  }
-
-  @Override
-  public Object evaluate(Tuple tuple) throws IOException {
-    String value = null;
-    Object obj = tuple.get(fieldName);
-
-    if(obj == null) {
-      value = fieldName;
-    } else {
-      value = obj.toString();
-    }
-
-    List<String> tokens = new ArrayList();
-
-    try(TokenStream tokenStream = analyzer.tokenStream(analyzerField, value)) {
-      CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
-      tokenStream.reset();
-      while (tokenStream.incrementToken()) {
-        tokens.add(termAtt.toString());
-      }
-      tokenStream.end();
-    }
-    return tokens;
-  }
-
-  @Override
-  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
-    return new StreamExpressionValue(fieldName);
-  }
-
-  @Override
-  public Explanation toExplanation(StreamFactory factory) throws IOException {
-    return new Explanation(nodeId.toString())
-        .withExpressionType(ExpressionType.EVALUATOR)
-        .withImplementingClass(getClass().getName())
-        .withExpression(toExpression(factory).toString());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/BlobHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/BlobHandler.java b/solr/core/src/java/org/apache/solr/handler/BlobHandler.java
deleted file mode 100644
index ee3adae..0000000
--- a/solr/core/src/java/org/apache/solr/handler/BlobHandler.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.lang.invoke.MethodHandles;
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-import java.security.MessageDigest;
-import java.util.Collection;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.TopFieldDocs;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.request.SolrRequestInfo;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.search.QParser;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.update.AddUpdateCommand;
-import org.apache.solr.update.CommitUpdateCommand;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
-import org.apache.solr.util.SimplePostTool;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Collections.singletonMap;
-import static org.apache.solr.common.params.CommonParams.ID;
-import static org.apache.solr.common.params.CommonParams.JSON;
-import static org.apache.solr.common.params.CommonParams.SORT;
-import static org.apache.solr.common.params.CommonParams.VERSION;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-public class BlobHandler extends RequestHandlerBase implements PluginInfoInitialized , PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final long DEFAULT_MAX_SIZE = 5 * 1024 * 1024; // 5MB
-  private long maxSize = DEFAULT_MAX_SIZE;
-
-  @Override
-  public void handleRequestBody(final SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    String httpMethod = req.getHttpMethod();
-    String path = (String) req.getContext().get("path");
-    RequestHandlerUtils.setWt(req, JSON);
-
-    List<String> pieces = StrUtils.splitSmart(path, '/');
-    String blobName = null;
-    if (pieces.size() >= 3) blobName = pieces.get(2);
-
-    if ("POST".equals(httpMethod)) {
-      if (blobName == null || blobName.isEmpty()) {
-        rsp.add("error", "Name not found");
-        return;
-      }
-      String err = SolrConfigHandler.validateName(blobName);
-      if (err != null) {
-        log.warn("no blob name");
-        rsp.add("error", err);
-        return;
-      }
-      if (req.getContentStreams() == null) {
-        log.warn("no content stream");
-        rsp.add("error", "No stream");
-        return;
-      }
-
-
-      for (ContentStream stream : req.getContentStreams()) {
-        ByteBuffer payload;
-        try (InputStream is = stream.getStream()) {
-          payload = SimplePostTool.inputStreamToByteArray(is, maxSize);
-        }
-        MessageDigest m = MessageDigest.getInstance("MD5");
-        m.update(payload.array(), payload.position(), payload.limit());
-        String md5 = new BigInteger(1, m.digest()).toString(16);
-
-        int duplicateCount = req.getSearcher().count(new TermQuery(new Term("md5", md5)));
-        if (duplicateCount > 0) {
-          rsp.add("error", "duplicate entry");
-          forward(req, null,
-              new MapSolrParams((Map) makeMap(
-                  "q", "md5:" + md5,
-                  "fl", "id,size,version,timestamp,blobName")),
-              rsp);
-          log.warn("duplicate entry for blob :" + blobName);
-          return;
-        }
-
-        TopFieldDocs docs = req.getSearcher().search(new TermQuery(new Term("blobName", blobName)),
-            1, new Sort(new SortField("version", SortField.Type.LONG, true)));
-
-        long version = 0;
-        if (docs.totalHits.value > 0) {
-          Document doc = req.getSearcher().doc(docs.scoreDocs[0].doc);
-          Number n = doc.getField("version").numericValue();
-          version = n.longValue();
-        }
-        version++;
-        String id = blobName + "/" + version;
-        Map<String, Object> doc = makeMap(
-            ID, id,
-            CommonParams.TYPE, "blob",
-            "md5", md5,
-            "blobName", blobName,
-            VERSION, version,
-            "timestamp", new Date(),
-            "size", payload.limit(),
-            "blob", payload);
-        verifyWithRealtimeGet(blobName, version, req, doc);
-        log.info(StrUtils.formatString("inserting new blob {0} ,size {1}, md5 {2}", doc.get(ID), String.valueOf(payload.limit()), md5));
-        indexMap(req, rsp, doc);
-        log.info(" Successfully Added and committed a blob with id {} and size {} ", id, payload.limit());
-
-        break;
-      }
-
-    } else {
-      int version = -1;
-      if (pieces.size() > 3) {
-        try {
-          version = Integer.parseInt(pieces.get(3));
-        } catch (NumberFormatException e) {
-          rsp.add("error", "Invalid version" + pieces.get(3));
-          return;
-        }
-
-      }
-      if (ReplicationHandler.FILE_STREAM.equals(req.getParams().get(CommonParams.WT))) {
-        if (blobName == null) {
-          throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "Please send the request in the format /blob/<blobName>/<version>");
-        } else {
-          String q = "blobName:{0}";
-          if (version != -1) q = "id:{0}/{1}";
-          QParser qparser = QParser.getParser(StrUtils.formatString(q, blobName, version), req);
-          final TopDocs docs = req.getSearcher().search(qparser.parse(), 1, new Sort(new SortField("version", SortField.Type.LONG, true)));
-          if (docs.totalHits.value > 0) {
-            rsp.add(ReplicationHandler.FILE_STREAM, new SolrCore.RawWriter() {
-
-              @Override
-              public void write(OutputStream os) throws IOException {
-                Document doc = req.getSearcher().doc(docs.scoreDocs[0].doc);
-                IndexableField sf = doc.getField("blob");
-                FieldType fieldType = req.getSchema().getField("blob").getType();
-                ByteBuffer buf = (ByteBuffer) fieldType.toObject(sf);
-                if (buf == null) {
-                  //should never happen unless a user wrote this document directly
-                  throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "Invalid document . No field called blob");
-                } else {
-                  os.write(buf.array(), 0, buf.limit());
-                }
-              }
-            });
-
-          } else {
-            throw new SolrException(SolrException.ErrorCode.NOT_FOUND,
-                StrUtils.formatString("Invalid combination of blobName {0} and version {1}", blobName, version));
-          }
-
-        }
-      } else {
-        String q = "*:*";
-        if (blobName != null) {
-          q = "blobName:{0}";
-          if (version != -1) {
-            q = "id:{0}/{1}";
-          }
-        }
-
-        forward(req, null,
-            new MapSolrParams((Map) makeMap(
-                "q", StrUtils.formatString(q, blobName, version),
-                "fl", "id,size,version,timestamp,blobName,md5",
-                SORT, "version desc"))
-            , rsp);
-      }
-    }
-  }
-
-  private void verifyWithRealtimeGet(String blobName, long version, SolrQueryRequest req, Map<String, Object> doc) {
-    for (; ; ) {
-      SolrQueryResponse response = new SolrQueryResponse();
-      String id = blobName + "/" + version;
-      forward(req, "/get", new MapSolrParams(singletonMap(ID, id)), response);
-      if (response.getValues().get("doc") == null) {
-        //ensure that the version does not exist
-        return;
-      } else {
-        log.info("id {} already exists trying next ", id);
-        version++;
-        doc.put("version", version);
-        id = blobName + "/" + version;
-        doc.put(ID, id);
-      }
-    }
-
-  }
-
-  public static void indexMap(SolrQueryRequest req, SolrQueryResponse rsp, Map<String, Object> doc) throws IOException {
-    SolrInputDocument solrDoc = new SolrInputDocument();
-    for (Map.Entry<String, Object> e : doc.entrySet()) solrDoc.addField(e.getKey(), e.getValue());
-    UpdateRequestProcessorChain processorChain = req.getCore().getUpdateProcessorChain(req.getParams());
-    try (UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp)) {
-      AddUpdateCommand cmd = new AddUpdateCommand(req);
-      cmd.solrDoc = solrDoc;
-      log.info("Adding doc: " + doc);
-      processor.processAdd(cmd);
-      log.info("committing doc: " + doc);
-      processor.processCommit(new CommitUpdateCommand(req, false));
-      processor.finish();
-    }
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String subPath) {
-    if (StrUtils.splitSmart(subPath, '/').size() > 4) return null;
-    return this;
-  }
-
-
-//////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Load Jars into a system index";
-  }
-
-
-  @Override
-  public void init(PluginInfo info) {
-    super.init(info.initArgs);
-    if (info.initArgs != null) {
-      NamedList invariants = (NamedList) info.initArgs.get(PluginInfo.INVARIANTS);
-      if (invariants != null) {
-        Object o = invariants.get("maxSize");
-        if (o != null) {
-          maxSize = Long.parseLong(String.valueOf(o));
-          maxSize = maxSize * 1024 * 1024;
-        }
-      }
-
-    }
-  }
-
-  // This does not work for the general case of forwarding requests.  It probably currently
-  // works OK for real-time get (which is all that BlobHandler uses it for).
-  private static void forward(SolrQueryRequest req, String handler ,SolrParams params, SolrQueryResponse rsp){
-    LocalSolrQueryRequest r = new LocalSolrQueryRequest(req.getCore(), params);
-    SolrRequestInfo.getRequestInfo().addCloseHook( r );  // Close as late as possible...
-    req.getCore().getRequestHandler(handler).handleRequest(r, rsp);
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this, "core.system.blob", "core.system.blob.upload");
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext ctx) {
-    switch (ctx.getHttpMethod()) {
-      case "GET":
-        return Name.READ_PERM;
-      case "POST":
-        return Name.UPDATE_PERM;
-      default:
-        return null;
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CalciteJDBCStream.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CalciteJDBCStream.java b/solr/core/src/java/org/apache/solr/handler/CalciteJDBCStream.java
deleted file mode 100644
index fd76cbf..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CalciteJDBCStream.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.sql.Array;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.util.Properties;
-
-import org.apache.solr.client.solrj.io.comp.StreamComparator;
-import org.apache.solr.client.solrj.io.stream.JDBCStream;
-
-/**
- * Used with o.a.s.Handler.SQLHandler.
- * 
- * @lucene.internal
- * @since 7.0.0
- */
-public class CalciteJDBCStream extends JDBCStream {
-  private static final long serialVersionUID = 1L;
-
-  public CalciteJDBCStream(String connectionUrl, String sqlQuery, StreamComparator definedSort,
-      Properties connectionProperties, String driverClassName) throws IOException {
-    super(connectionUrl, sqlQuery, definedSort, connectionProperties, driverClassName);
-  }
-
-  @Override
-  protected ResultSetValueSelector determineValueSelector(int columnIdx, ResultSetMetaData metadata)
-      throws SQLException {
-    ResultSetValueSelector valueSelector = super.determineValueSelector(columnIdx, metadata);
-    if (valueSelector == null) {
-      final int columnNumber = columnIdx + 1;
-      final String columnName = metadata.getColumnLabel(columnNumber);
-      final String className = metadata.getColumnClassName(columnNumber);
-      if (Array.class.getName().equals(className)) {
-        valueSelector = new ResultSetValueSelector() {
-          @Override
-          public Object selectValue(ResultSet resultSet) throws SQLException {
-            Object o = resultSet.getObject(columnNumber);
-            if (resultSet.wasNull()) {
-              return null;
-            }
-            if (o instanceof Array) {
-              Array array = (Array) o;
-              return array.getArray();
-            } else {
-              return o;
-            }
-          }
-
-          @Override
-          public String getColumnName() {
-            return columnName;
-          }
-        };
-      }
-    }
-    return valueSelector;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrBufferManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrBufferManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrBufferManager.java
deleted file mode 100644
index 8696379..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrBufferManager.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.CdcrUpdateLog;
-
-/**
- * This manager is responsible in enabling or disabling the buffering of the update logs. Currently, buffer
- * is always activated for non-leader nodes. For leader nodes, it is enabled only if the user explicitly
- * enabled it with the action {@link org.apache.solr.handler.CdcrParams.CdcrAction#ENABLEBUFFER}.
- */
-class CdcrBufferManager implements CdcrStateManager.CdcrStateObserver {
-
-  private CdcrLeaderStateManager leaderStateManager;
-  private CdcrBufferStateManager bufferStateManager;
-
-  private final SolrCore core;
-
-  CdcrBufferManager(SolrCore core) {
-    this.core = core;
-  }
-
-  void setLeaderStateManager(final CdcrLeaderStateManager leaderStateManager) {
-    this.leaderStateManager = leaderStateManager;
-    this.leaderStateManager.register(this);
-  }
-
-  void setBufferStateManager(final CdcrBufferStateManager bufferStateManager) {
-    this.bufferStateManager = bufferStateManager;
-    this.bufferStateManager.register(this);
-  }
-
-  /**
-   * This method is synchronised as it can both be called by the leaderStateManager and the bufferStateManager.
-   */
-  @Override
-  public synchronized void stateUpdate() {
-    CdcrUpdateLog ulog = (CdcrUpdateLog) core.getUpdateHandler().getUpdateLog();
-
-    // If I am not the leader, I should always buffer my updates
-    if (!leaderStateManager.amILeader()) {
-      ulog.enableBuffer();
-      return;
-    }
-    // If I am the leader, I should buffer my updates only if buffer is enabled
-    else if (bufferStateManager.getState().equals(CdcrParams.BufferState.ENABLED)) {
-      ulog.enableBuffer();
-      return;
-    }
-
-    // otherwise, disable the buffer
-    ulog.disableBuffer();
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java
deleted file mode 100644
index fd8d4bb..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrBufferStateManager.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.Charset;
-
-/**
- * Manage the state of the update log buffer. It is responsible of synchronising the state
- * through Zookeeper. The state of the buffer is stored in the zk node defined by {@link #getZnodePath()}.
- */
-class CdcrBufferStateManager extends CdcrStateManager {
-
-  private CdcrParams.BufferState state = DEFAULT_STATE;
-
-  private BufferStateWatcher wrappedWatcher;
-  private Watcher watcher;
-
-  private SolrCore core;
-
-  static CdcrParams.BufferState DEFAULT_STATE = CdcrParams.BufferState.ENABLED;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CdcrBufferStateManager(final SolrCore core, SolrParams bufferConfiguration) {
-    this.core = core;
-
-    // Ensure that the state znode exists
-    this.createStateNode();
-
-    // set default state
-    if (bufferConfiguration != null) {
-      byte[] defaultState = bufferConfiguration.get(
-          CdcrParams.DEFAULT_STATE_PARAM, DEFAULT_STATE.toLower()).getBytes(Charset.forName("UTF-8"));
-      state = CdcrParams.BufferState.get(defaultState);
-    }
-    this.setState(state); // notify observers
-
-    // Startup and register the watcher at startup
-    try {
-      SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-      watcher = this.initWatcher(zkClient);
-      this.setState(CdcrParams.BufferState.get(zkClient.getData(this.getZnodePath(), watcher, null, true)));
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Failed fetching initial state", e);
-    }
-  }
-
-  /**
-   * SolrZkClient does not guarantee that a watch object will only be triggered once for a given notification
-   * if we does not wrap the watcher - see SOLR-6621.
-   */
-  private Watcher initWatcher(SolrZkClient zkClient) {
-    wrappedWatcher = new BufferStateWatcher();
-    return zkClient.wrapWatcher(wrappedWatcher);
-  }
-
-  private String getZnodeBase() {
-    return "/collections/" + core.getCoreDescriptor().getCloudDescriptor().getCollectionName() + "/cdcr/state";
-  }
-
-  private String getZnodePath() {
-    return getZnodeBase() + "/buffer";
-  }
-
-  void setState(CdcrParams.BufferState state) {
-    if (this.state != state) {
-      this.state = state;
-      this.callback(); // notify the observers of a state change
-    }
-  }
-
-  CdcrParams.BufferState getState() {
-    return state;
-  }
-
-  /**
-   * Synchronise the state to Zookeeper. This method must be called only by the handler receiving the
-   * action.
-   */
-  void synchronize() {
-    SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-    try {
-      zkClient.setData(this.getZnodePath(), this.getState().getBytes(), true);
-      // check if nobody changed it in the meantime, and set a new watcher
-      this.setState(CdcrParams.BufferState.get(zkClient.getData(this.getZnodePath(), watcher, null, true)));
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Failed synchronising new state", e);
-    }
-  }
-
-  private void createStateNode() {
-    SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-    try {
-      if (!zkClient.exists(this.getZnodePath(), true)) {
-        if (!zkClient.exists(this.getZnodeBase(), true)) {
-          zkClient.makePath(this.getZnodeBase(), null, CreateMode.PERSISTENT, null, false, true); // Should be a no-op if node exists
-        }
-        zkClient.create(this.getZnodePath(), DEFAULT_STATE.getBytes(), CreateMode.PERSISTENT, true);
-        log.info("Created znode {}", this.getZnodePath());
-      }
-    } catch (KeeperException.NodeExistsException ne) {
-      // Someone got in first and created the node.
-    }  catch (KeeperException | InterruptedException e) {
-      log.warn("Failed to create CDCR buffer state node", e);
-    }
-  }
-
-  void shutdown() {
-    if (wrappedWatcher != null) {
-      wrappedWatcher.cancel(); // cancel the watcher to avoid spurious warn messages during shutdown
-    }
-  }
-
-  private class BufferStateWatcher implements Watcher {
-
-    private boolean isCancelled = false;
-
-    /**
-     * Cancel the watcher to avoid spurious warn messages during shutdown.
-     */
-    void cancel() {
-      isCancelled = true;
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-      if (isCancelled) return; // if the watcher is cancelled, do nothing.
-      String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-      String shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-
-      log.info("The CDCR buffer state has changed: {} @ {}:{}", event, collectionName, shard);
-      // session events are not change events, and do not remove the watcher
-      if (Event.EventType.None.equals(event.getType())) {
-        return;
-      }
-      SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-      try {
-        CdcrParams.BufferState state = CdcrParams.BufferState.get(zkClient.getData(CdcrBufferStateManager.this.getZnodePath(), watcher, null, true));
-        log.info("Received new CDCR buffer state from watcher: {} @ {}:{}", state, collectionName, shard);
-        CdcrBufferStateManager.this.setState(state);
-      } catch (KeeperException | InterruptedException e) {
-        log.warn("Failed synchronising new state @ " + collectionName + ":" + shard, e);
-      }
-    }
-
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java
deleted file mode 100644
index 1b4d8af..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrLeaderStateManager.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.core.SolrCore;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * <p>
- * Manage the leader state of the CDCR nodes.
- * </p>
- * <p>
- * It takes care of notifying the {@link CdcrReplicatorManager} in case
- * of a leader state change.
- * </p>
- */
-class CdcrLeaderStateManager extends CdcrStateManager {
-
-  private boolean amILeader = false;
-
-  private LeaderStateWatcher wrappedWatcher;
-  private Watcher watcher;
-
-  private SolrCore core;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CdcrLeaderStateManager(final SolrCore core) {
-    this.core = core;
-
-    // Fetch leader state and register the watcher at startup
-    try {
-      SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-      ClusterState clusterState = core.getCoreContainer().getZkController().getClusterState();
-
-      watcher = this.initWatcher(zkClient);
-      // if the node does not exist, it means that the leader was not yet registered. This can happen
-      // when the cluster is starting up. The core is not yet fully loaded, and the leader election process
-      // is waiting for it.
-      if (this.isLeaderRegistered(zkClient, clusterState)) {
-        this.checkIfIAmLeader();
-      }
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Failed fetching initial leader state and setting watch", e);
-    }
-  }
-
-  /**
-   * Checks if the leader is registered. If it is not registered, we are probably at the
-   * initialisation phase of the cluster. In this case, we must attach a watcher to
-   * be notified when the leader is registered.
-   */
-  private boolean isLeaderRegistered(SolrZkClient zkClient, ClusterState clusterState)
-      throws KeeperException, InterruptedException {
-    // First check if the znode exists, and register the watcher at the same time
-    return zkClient.exists(this.getZnodePath(), watcher, true) != null;
-  }
-
-  /**
-   * SolrZkClient does not guarantee that a watch object will only be triggered once for a given notification
-   * if we does not wrap the watcher - see SOLR-6621.
-   */
-  private Watcher initWatcher(SolrZkClient zkClient) {
-    wrappedWatcher = new LeaderStateWatcher();
-    return zkClient.wrapWatcher(wrappedWatcher);
-  }
-
-  private void checkIfIAmLeader() throws KeeperException, InterruptedException {
-    SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-    ZkNodeProps props = ZkNodeProps.load(zkClient.getData(CdcrLeaderStateManager.this.getZnodePath(), null, null, true));
-    if (props != null) {
-      CdcrLeaderStateManager.this.setAmILeader(props.get("core").equals(core.getName()));
-    }
-  }
-
-  private String getZnodePath() {
-    String myShardId = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-    String myCollection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    return "/collections/" + myCollection + "/leaders/" + myShardId + "/leader";
-  }
-
-  void setAmILeader(boolean amILeader) {
-    if (this.amILeader != amILeader) {
-      this.amILeader = amILeader;
-      this.callback(); // notify the observers of a state change
-    }
-  }
-
-  boolean amILeader() {
-    return amILeader;
-  }
-
-  void shutdown() {
-    if (wrappedWatcher != null) {
-      wrappedWatcher.cancel(); // cancel the watcher to avoid spurious warn messages during shutdown
-    }
-  }
-
-  private class LeaderStateWatcher implements Watcher {
-
-    private boolean isCancelled = false;
-
-    /**
-     * Cancel the watcher to avoid spurious warn messages during shutdown.
-     */
-    void cancel() {
-      isCancelled = true;
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-      if (isCancelled) return; // if the watcher is cancelled, do nothing.
-      String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-      String shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-
-      log.debug("The leader state has changed: {} @ {}:{}", event, collectionName, shard);
-      // session events are not change events, and do not remove the watcher
-      if (Event.EventType.None.equals(event.getType())) {
-        return;
-      }
-
-      try {
-        log.info("Received new leader state @ {}:{}", collectionName, shard);
-        SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-        ClusterState clusterState = core.getCoreContainer().getZkController().getClusterState();
-        if (CdcrLeaderStateManager.this.isLeaderRegistered(zkClient, clusterState)) {
-          CdcrLeaderStateManager.this.checkIfIAmLeader();
-        }
-      } catch (KeeperException | InterruptedException e) {
-        log.warn("Failed updating leader state and setting watch @ " + collectionName + ":" + shard, e);
-      }
-    }
-
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrParams.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrParams.java b/solr/core/src/java/org/apache/solr/handler/CdcrParams.java
deleted file mode 100644
index 3f65b90..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrParams.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.nio.charset.Charset;
-import java.util.Locale;
-
-public class CdcrParams {
-
-  /**
-   * The definition of a replica configuration *
-   */
-  public static final String REPLICA_PARAM = "replica";
-
-  /**
-   * The source collection of a replica *
-   */
-  public static final String SOURCE_COLLECTION_PARAM = "source";
-
-  /**
-   * The target collection of a replica *
-   */
-  public static final String TARGET_COLLECTION_PARAM = "target";
-
-  /**
-   * The Zookeeper host of the target cluster hosting the replica *
-   */
-  public static final String ZK_HOST_PARAM = "zkHost";
-
-  /**
-   * The definition of the {@link org.apache.solr.handler.CdcrReplicatorScheduler} configuration *
-   */
-  public static final String REPLICATOR_PARAM = "replicator";
-
-  /**
-   * The thread pool size of the replicator *
-   */
-  public static final String THREAD_POOL_SIZE_PARAM = "threadPoolSize";
-
-  /**
-   * The time schedule (in ms) of the replicator *
-   */
-  public static final String SCHEDULE_PARAM = "schedule";
-
-  /**
-   * The batch size of the replicator *
-   */
-  public static final String BATCH_SIZE_PARAM = "batchSize";
-
-  /**
-   * The definition of the {@link org.apache.solr.handler.CdcrUpdateLogSynchronizer} configuration *
-   */
-  public static final String UPDATE_LOG_SYNCHRONIZER_PARAM = "updateLogSynchronizer";
-
-  /**
-   * The definition of the {@link org.apache.solr.handler.CdcrBufferManager} configuration *
-   */
-  public static final String BUFFER_PARAM = "buffer";
-
-  /**
-   * The default state at startup of the buffer *
-   */
-  public static final String DEFAULT_STATE_PARAM = "defaultState";
-
-  /**
-   * The latest update checkpoint on a target cluster *
-   */
-  public final static String CHECKPOINT = "checkpoint";
-
-  /**
-   * The last processed version on a source cluster *
-   */
-  public final static String LAST_PROCESSED_VERSION = "lastProcessedVersion";
-
-  /**
-   * A list of replica queues on a source cluster *
-   */
-  public final static String QUEUES = "queues";
-
-  /**
-   * The size of a replica queue on a source cluster *
-   */
-  public final static String QUEUE_SIZE = "queueSize";
-
-  /**
-   * The timestamp of the last processed operation in a replica queue *
-   */
-  public final static String LAST_TIMESTAMP = "lastTimestamp";
-
-  /**
-   * A list of qps statistics per collection *
-   */
-  public final static String OPERATIONS_PER_SECOND = "operationsPerSecond";
-
-  /**
-   * Overall counter *
-   */
-  public final static String COUNTER_ALL = "all";
-
-  /**
-   * Counter for Adds *
-   */
-  public final static String COUNTER_ADDS = "adds";
-
-  /**
-   * Counter for Deletes *
-   */
-  public final static String COUNTER_DELETES = "deletes";
-
-  /**
-   * Counter for Bootstrap operations *
-   */
-  public final static String COUNTER_BOOTSTRAP = "bootstraps";
-
-  /**
-   * A list of errors per target collection *
-   */
-  public final static String ERRORS = "errors";
-
-  /**
-   * Counter for consecutive errors encountered by a replicator thread *
-   */
-  public final static String CONSECUTIVE_ERRORS = "consecutiveErrors";
-
-  /**
-   * A list of the last errors encountered by a replicator thread *
-   */
-  public final static String LAST = "last";
-
-  /**
-   * Total size of transaction logs *
-   */
-  public final static String TLOG_TOTAL_SIZE = "tlogTotalSize";
-
-  /**
-   * Total count of transaction logs *
-   */
-  public final static String TLOG_TOTAL_COUNT = "tlogTotalCount";
-
-  /**
-   * The state of the update log synchronizer *
-   */
-  public final static String UPDATE_LOG_SYNCHRONIZER = "updateLogSynchronizer";
-
-  /**
-   * The actions supported by the CDCR API
-   */
-  public enum CdcrAction {
-    START,
-    STOP,
-    STATUS,
-    COLLECTIONCHECKPOINT,
-    SHARDCHECKPOINT,
-    ENABLEBUFFER,
-    DISABLEBUFFER,
-    LASTPROCESSEDVERSION,
-    QUEUES,
-    OPS,
-    ERRORS,
-    BOOTSTRAP,
-    BOOTSTRAP_STATUS,
-    CANCEL_BOOTSTRAP;
-
-    public static CdcrAction get(String p) {
-      if (p != null) {
-        try {
-          return CdcrAction.valueOf(p.toUpperCase(Locale.ROOT));
-        } catch (Exception e) {
-        }
-      }
-      return null;
-    }
-
-    public String toLower() {
-      return toString().toLowerCase(Locale.ROOT);
-    }
-
-  }
-
-  /**
-   * The possible states of the CDCR process
-   */
-  public enum ProcessState {
-    STARTED,
-    STOPPED;
-
-    public static ProcessState get(byte[] state) {
-      if (state != null) {
-        try {
-          return ProcessState.valueOf(new String(state, Charset.forName("UTF-8")).toUpperCase(Locale.ROOT));
-        } catch (Exception e) {
-        }
-      }
-      return null;
-    }
-
-    public String toLower() {
-      return toString().toLowerCase(Locale.ROOT);
-    }
-
-    public byte[] getBytes() {
-      return toLower().getBytes(Charset.forName("UTF-8"));
-    }
-
-    public static String getParam() {
-      return "process";
-    }
-
-  }
-
-  /**
-   * The possible states of the CDCR buffer
-   */
-  public enum BufferState {
-    ENABLED,
-    DISABLED;
-
-    public static BufferState get(byte[] state) {
-      if (state != null) {
-        try {
-          return BufferState.valueOf(new String(state, Charset.forName("UTF-8")).toUpperCase(Locale.ROOT));
-        } catch (Exception e) {
-        }
-      }
-      return null;
-    }
-
-    public String toLower() {
-      return toString().toLowerCase(Locale.ROOT);
-    }
-
-    public byte[] getBytes() {
-      return toLower().getBytes(Charset.forName("UTF-8"));
-    }
-
-    public static String getParam() {
-      return "buffer";
-    }
-
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java
deleted file mode 100644
index 05be077..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrProcessStateManager.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.core.SolrCore;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * <p>
- * Manage the life-cycle state of the CDCR process. It is responsible of synchronising the state
- * through Zookeeper. The state of the CDCR process is stored in the zk node defined by {@link #getZnodePath()}.
- * </p>
- * <p>
- * It takes care of notifying the {@link CdcrReplicatorManager} in case
- * of a process state change.
- * </p>
- */
-class CdcrProcessStateManager extends CdcrStateManager {
-
-  private CdcrParams.ProcessState state = DEFAULT_STATE;
-
-  private ProcessStateWatcher wrappedWatcher;
-  private Watcher watcher;
-
-  private SolrCore core;
-
-  /**
-   * The default state must be STOPPED. See comments in
-   * {@link #setState(org.apache.solr.handler.CdcrParams.ProcessState)}.
-   */
-  static CdcrParams.ProcessState DEFAULT_STATE = CdcrParams.ProcessState.STOPPED;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CdcrProcessStateManager(final SolrCore core) {
-    this.core = core;
-
-    // Ensure that the status znode exists
-    this.createStateNode();
-
-    // Register the watcher at startup
-    try {
-      SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-      watcher = this.initWatcher(zkClient);
-      this.setState(CdcrParams.ProcessState.get(zkClient.getData(this.getZnodePath(), watcher, null, true)));
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Failed fetching initial state", e);
-    }
-  }
-
-  /**
-   * SolrZkClient does not guarantee that a watch object will only be triggered once for a given notification
-   * if we does not wrap the watcher - see SOLR-6621.
-   */
-  private Watcher initWatcher(SolrZkClient zkClient) {
-    wrappedWatcher = new ProcessStateWatcher();
-    return zkClient.wrapWatcher(wrappedWatcher);
-  }
-
-  private String getZnodeBase() {
-    return "/collections/" + core.getCoreDescriptor().getCloudDescriptor().getCollectionName() + "/cdcr/state";
-  }
-
-  private String getZnodePath() {
-    return getZnodeBase() + "/process";
-  }
-
-  void setState(CdcrParams.ProcessState state) {
-    if (this.state != state) {
-      this.state = state;
-      this.callback(); // notify the observers of a state change
-    }
-  }
-
-  CdcrParams.ProcessState getState() {
-    return state;
-  }
-
-  /**
-   * Synchronise the state to Zookeeper. This method must be called only by the handler receiving the
-   * action.
-   */
-  void synchronize() {
-    SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-    try {
-      zkClient.setData(this.getZnodePath(), this.getState().getBytes(), true);
-      // check if nobody changed it in the meantime, and set a new watcher
-      this.setState(CdcrParams.ProcessState.get(zkClient.getData(this.getZnodePath(), watcher, null, true)));
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Failed synchronising new state", e);
-    }
-  }
-
-  private void createStateNode() {
-    SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-    try {
-      if (!zkClient.exists(this.getZnodePath(), true)) {
-        if (!zkClient.exists(this.getZnodeBase(), true)) { // Should be a no-op if the node exists
-          zkClient.makePath(this.getZnodeBase(), null, CreateMode.PERSISTENT, null, false, true);
-        }
-        zkClient.create(this.getZnodePath(), DEFAULT_STATE.getBytes(), CreateMode.PERSISTENT, true);
-        log.info("Created znode {}", this.getZnodePath());
-      }
-    } catch (KeeperException.NodeExistsException ne) {
-      // Someone got in first and created the node.
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Failed to create CDCR process state node", e);
-    }
-  }
-
-  void shutdown() {
-    if (wrappedWatcher != null) {
-      wrappedWatcher.cancel(); // cancel the watcher to avoid spurious warn messages during shutdown
-    }
-  }
-
-  private class ProcessStateWatcher implements Watcher {
-
-    private boolean isCancelled = false;
-
-    /**
-     * Cancel the watcher to avoid spurious warn messages during shutdown.
-     */
-    void cancel() {
-      isCancelled = true;
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-      if (isCancelled) return; // if the watcher is cancelled, do nothing.
-      String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-      String shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-
-      log.info("The CDCR process state has changed: {} @ {}:{}", event, collectionName, shard);
-      // session events are not change events, and do not remove the watcher
-      if (Event.EventType.None.equals(event.getType())) {
-        return;
-      }
-      SolrZkClient zkClient = core.getCoreContainer().getZkController().getZkClient();
-      try {
-        CdcrParams.ProcessState state = CdcrParams.ProcessState.get(zkClient.getData(CdcrProcessStateManager.this.getZnodePath(), watcher, null, true));
-        log.info("Received new CDCR process state from watcher: {} @ {}:{}", state, collectionName, shard);
-        CdcrProcessStateManager.this.setState(state);
-      } catch (KeeperException | InterruptedException e) {
-        log.warn("Failed synchronising new state @ " + collectionName + ":" + shard, e);
-      }
-    }
-
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
deleted file mode 100644
index 5dca0d8..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.Charset;
-import java.util.List;
-
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.update.processor.CdcrUpdateProcessor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.VERSION_FIELD;
-
-/**
- * The replication logic. Given a {@link org.apache.solr.handler.CdcrReplicatorState}, it reads all the new entries
- * in the update log and forward them to the target cluster. If an error occurs, the replication is stopped and
- * will be tried again later.
- */
-public class CdcrReplicator implements Runnable {
-
-  private final CdcrReplicatorState state;
-  private final int batchSize;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public CdcrReplicator(CdcrReplicatorState state, int batchSize) {
-    this.state = state;
-    this.batchSize = batchSize;
-  }
-
-  @Override
-  public void run() {
-    CdcrUpdateLog.CdcrLogReader logReader = state.getLogReader();
-    CdcrUpdateLog.CdcrLogReader subReader = null;
-    if (logReader == null) {
-      log.warn("Log reader for target {} is not initialised, it will be ignored.", state.getTargetCollection());
-      return;
-    }
-
-    try {
-      // create update request
-      UpdateRequest req = new UpdateRequest();
-      // Add the param to indicate the {@link CdcrUpdateProcessor} to keep the provided version number
-      req.setParam(CdcrUpdateProcessor.CDCR_UPDATE, "");
-
-      // Start the benchmark timer
-      state.getBenchmarkTimer().start();
-
-      long counter = 0;
-      subReader = logReader.getSubReader();
-
-      for (int i = 0; i < batchSize; i++) {
-        Object o = subReader.next();
-        if (o == null) break; // we have reached the end of the update logs, we should close the batch
-
-        if (isTargetCluster(o)) {
-          continue;
-        }
-
-        if (isDelete(o)) {
-
-          /*
-          * Deletes are sent one at a time.
-          */
-
-          // First send out current batch of SolrInputDocument, the non-deletes.
-          List<SolrInputDocument> docs = req.getDocuments();
-
-          if (docs != null && docs.size() > 0) {
-            subReader.resetToLastPosition(); // Push back the delete for now.
-            this.sendRequest(req); // Send the batch update request
-            logReader.forwardSeek(subReader); // Advance the main reader to just before the delete.
-            o = subReader.next(); // Read the delete again
-            counter += docs.size();
-            req.clear();
-          }
-
-          // Process Delete
-          this.processUpdate(o, req);
-          this.sendRequest(req);
-          logReader.forwardSeek(subReader);
-          counter++;
-          req.clear();
-
-        } else {
-
-          this.processUpdate(o, req);
-
-        }
-      }
-
-      //Send the final batch out.
-      List<SolrInputDocument> docs = req.getDocuments();
-
-      if ((docs != null && docs.size() > 0)) {
-        this.sendRequest(req);
-        counter += docs.size();
-      }
-
-      // we might have read a single commit operation and reached the end of the update logs
-      logReader.forwardSeek(subReader);
-
-      log.info("Forwarded {} updates to target {}", counter, state.getTargetCollection());
-    } catch (Exception e) {
-      // report error and update error stats
-      this.handleException(e);
-    } finally {
-      // stop the benchmark timer
-      state.getBenchmarkTimer().stop();
-      // ensure that the subreader is closed and the associated pointer is removed
-      if (subReader != null) subReader.close();
-    }
-  }
-
-  private void sendRequest(UpdateRequest req) throws IOException, SolrServerException, CdcrReplicatorException {
-    UpdateResponse rsp = req.process(state.getClient());
-    if (rsp.getStatus() != 0) {
-      throw new CdcrReplicatorException(req, rsp);
-    }
-    state.resetConsecutiveErrors();
-  }
-
-  /** check whether the update read from TLog is received from source
-   *  or received via solr client
-   */
-  private boolean isTargetCluster(Object o) {
-    List entry = (List) o;
-    int operationAndFlags = (Integer) entry.get(0);
-    int oper = operationAndFlags & UpdateLog.OPERATION_MASK;
-    Boolean isTarget = false;
-    if (oper == UpdateLog.DELETE_BY_QUERY ||  oper == UpdateLog.DELETE) {
-      if (entry.size() == 4) { //back-combat - skip for previous versions
-        isTarget = (Boolean) entry.get(entry.size() - 1);
-      }
-    } else if (oper == UpdateLog.UPDATE_INPLACE) {
-      if (entry.size() == 6) { //back-combat - skip for previous versions
-        isTarget = (Boolean) entry.get(entry.size() - 2);
-      }
-    } else if (oper == UpdateLog.ADD) {
-      if (entry.size() == 4) { //back-combat - skip for previous versions
-        isTarget = (Boolean) entry.get(entry.size() - 2);
-      }
-    }
-    return isTarget;
-  }
-
-  private boolean isDelete(Object o) {
-    List entry = (List) o;
-    int operationAndFlags = (Integer) entry.get(0);
-    int oper = operationAndFlags & UpdateLog.OPERATION_MASK;
-    return oper == UpdateLog.DELETE_BY_QUERY || oper == UpdateLog.DELETE;
-  }
-
-  private void handleException(Exception e) {
-    if (e instanceof CdcrReplicatorException) {
-      UpdateRequest req = ((CdcrReplicatorException) e).req;
-      UpdateResponse rsp = ((CdcrReplicatorException) e).rsp;
-      log.warn("Failed to forward update request {} to target: {}. Got response {}", req, state.getTargetCollection(), rsp);
-      state.reportError(CdcrReplicatorState.ErrorType.BAD_REQUEST);
-    } else if (e instanceof CloudSolrClient.RouteException) {
-      log.warn("Failed to forward update request to target: " + state.getTargetCollection(), e);
-      state.reportError(CdcrReplicatorState.ErrorType.BAD_REQUEST);
-    } else {
-      log.warn("Failed to forward update request to target: " + state.getTargetCollection(), e);
-      state.reportError(CdcrReplicatorState.ErrorType.INTERNAL);
-    }
-  }
-
-  private UpdateRequest processUpdate(Object o, UpdateRequest req) {
-
-    // should currently be a List<Oper,Ver,Doc/Id>
-    List entry = (List) o;
-
-    int operationAndFlags = (Integer) entry.get(0);
-    int oper = operationAndFlags & UpdateLog.OPERATION_MASK;
-    long version = (Long) entry.get(1);
-
-    // record the operation in the benchmark timer
-    state.getBenchmarkTimer().incrementCounter(oper);
-
-    switch (oper) {
-      case UpdateLog.ADD: {
-        // the version is already attached to the document
-        SolrInputDocument sdoc = (SolrInputDocument) entry.get(entry.size() - 1);
-        req.add(sdoc);
-        return req;
-      }
-      case UpdateLog.DELETE: {
-        byte[] idBytes = (byte[]) entry.get(2);
-        req.deleteById(new String(idBytes, Charset.forName("UTF-8")));
-        req.setParam(VERSION_FIELD, Long.toString(version));
-        return req;
-      }
-
-      case UpdateLog.DELETE_BY_QUERY: {
-        String query = (String) entry.get(2);
-        req.deleteByQuery(query);
-        req.setParam(VERSION_FIELD, Long.toString(version));
-        return req;
-      }
-
-      case UpdateLog.COMMIT: {
-        return null;
-      }
-
-      default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
-    }
-  }
-
-  /**
-   * Exception to catch update request issues with the target cluster.
-   */
-  public static class CdcrReplicatorException extends Exception {
-
-    private final UpdateRequest req;
-    private final UpdateResponse rsp;
-
-    public CdcrReplicatorException(UpdateRequest req, UpdateResponse rsp) {
-      this.req = req;
-      this.rsp = rsp;
-    }
-
-  }
-
-}
-


[13/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
deleted file mode 100644
index 4938f82..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.nio.file.NoSuchFileException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.lucene.analysis.util.TokenizerFactory;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiTerms;
-import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.SegmentReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.PriorityQueue;
-import org.apache.solr.analysis.TokenizerChain;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.luke.FieldFlag;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.Base64;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.CopyField;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.SolrIndexWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.lucene.index.IndexOptions.DOCS;
-import static org.apache.lucene.index.IndexOptions.DOCS_AND_FREQS;
-import static org.apache.lucene.index.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
-
-/**
- * This handler exposes the internal lucene index.  It is inspired by and 
- * modeled on Luke, the Lucene Index Browser by Andrzej Bialecki.
- *   http://www.getopt.org/luke/
- *
- * For more documentation see:
- *  http://wiki.apache.org/solr/LukeRequestHandler
- *
- * @since solr 1.2
- */
-public class LukeRequestHandler extends RequestHandlerBase
-{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String NUMTERMS = "numTerms";
-  public static final String INCLUDE_INDEX_FIELD_FLAGS = "includeIndexFieldFlags";
-  public static final String DOC_ID = "docId";
-  public static final String ID = CommonParams.ID;
-  public static final int DEFAULT_COUNT = 10;
-
-  static final int HIST_ARRAY_SIZE = 33;
-
-  private static enum ShowStyle {
-    ALL,
-    DOC,
-    SCHEMA,
-    INDEX;
-
-    public static ShowStyle get(String v) {
-      if(v==null) return null;
-      if("schema".equalsIgnoreCase(v)) return SCHEMA;
-      if("index".equalsIgnoreCase(v))  return INDEX;
-      if("doc".equalsIgnoreCase(v))    return DOC;
-      if("all".equalsIgnoreCase(v))    return ALL;
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown Show Style: "+v);
-    }
-  };
-
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
-  {
-    IndexSchema schema = req.getSchema();
-    SolrIndexSearcher searcher = req.getSearcher();
-    DirectoryReader reader = searcher.getIndexReader();
-    SolrParams params = req.getParams();
-    ShowStyle style = ShowStyle.get(params.get("show"));
-
-    // If no doc is given, show all fields and top terms
-
-    rsp.add("index", getIndexInfo(reader));
-
-    if(ShowStyle.INDEX==style) {
-      return; // that's all we need
-    }
-
-
-    Integer docId = params.getInt( DOC_ID );
-    if( docId == null && params.get( ID ) != null ) {
-      // Look for something with a given solr ID
-      SchemaField uniqueKey = schema.getUniqueKeyField();
-      String v = uniqueKey.getType().toInternal( params.get(ID) );
-      Term t = new Term( uniqueKey.getName(), v );
-      docId = searcher.getFirstMatch( t );
-      if( docId < 0 ) {
-        throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+params.get( ID ) );
-      }
-    }
-
-    // Read the document from the index
-    if( docId != null ) {
-      if( style != null && style != ShowStyle.DOC ) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "missing doc param for doc style");
-      }
-      Document doc = null;
-      try {
-        doc = reader.document( docId );
-      }
-      catch( Exception ex ) {}
-      if( doc == null ) {
-        throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+docId );
-      }
-
-      SimpleOrderedMap<Object> info = getDocumentFieldsInfo( doc, docId, reader, schema );
-
-      SimpleOrderedMap<Object> docinfo = new SimpleOrderedMap<>();
-      docinfo.add( "docId", docId );
-      docinfo.add( "lucene", info );
-      docinfo.add( "solr", doc );
-      rsp.add( "doc", docinfo );
-    }
-    else if ( ShowStyle.SCHEMA == style ) {
-      rsp.add( "schema", getSchemaInfo( req.getSchema() ) );
-    }
-    else {
-      rsp.add( "fields", getIndexedFieldsInfo(req) ) ;
-    }
-
-    // Add some generally helpful information
-    NamedList<Object> info = new SimpleOrderedMap<>();
-    info.add( "key", getFieldFlagsKey() );
-    info.add( "NOTE", "Document Frequency (df) is not updated when a document is marked for deletion.  df values include deleted documents." );
-    rsp.add( "info", info );
-    rsp.setHttpCaching(false);
-  }
-
-
-
-  /**
-   * @return a string representing a IndexableField's flags.  
-   */
-  private static String getFieldFlags( IndexableField f )
-  {
-    IndexOptions opts = (f == null) ? null : f.fieldType().indexOptions();
-
-    StringBuilder flags = new StringBuilder();
-
-    flags.append( (f != null && f.fieldType().indexOptions() != IndexOptions.NONE)                     ? FieldFlag.INDEXED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().tokenized())                   ? FieldFlag.TOKENIZED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().stored())                      ? FieldFlag.STORED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().docValuesType() != DocValuesType.NONE)        ? FieldFlag.DOC_VALUES.getAbbreviation() : "-" );
-    flags.append( (false)                                          ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-' ); // SchemaField Specific
-    flags.append( (f != null && f.fieldType().storeTermVectors())            ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().storeTermVectorOffsets())   ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().storeTermVectorPositions()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().storeTermVectorPayloads())   ? FieldFlag.TERM_VECTOR_PAYLOADS.getAbbreviation() : '-' );
-    flags.append( (f != null && f.fieldType().omitNorms())                  ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' );
-
-    flags.append( (f != null && DOCS == opts ) ?
-        FieldFlag.OMIT_TF.getAbbreviation() : '-' );
-
-    flags.append((f != null && DOCS_AND_FREQS == opts) ?
-        FieldFlag.OMIT_POSITIONS.getAbbreviation() : '-');
-    
-    flags.append((f != null && DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS == opts) ?
-        FieldFlag.STORE_OFFSETS_WITH_POSITIONS.getAbbreviation() : '-');
-
-    flags.append( (f != null && f.getClass().getSimpleName().equals("LazyField")) ? FieldFlag.LAZY.getAbbreviation() : '-' );
-    flags.append( (f != null && f.binaryValue()!=null)                      ? FieldFlag.BINARY.getAbbreviation() : '-' );
-    flags.append( (false)                                          ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-' ); // SchemaField Specific
-    flags.append( (false)                                          ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-' ); // SchemaField Specific
-    return flags.toString();
-  }
-
-  /**
-   * @return a string representing a SchemaField's flags.  
-   */
-  private static String getFieldFlags( SchemaField f )
-  {
-    FieldType t = (f==null) ? null : f.getType();
-
-    // see: http://www.nabble.com/schema-field-properties-tf3437753.html#a9585549
-    boolean lazy = false; // "lazy" is purely a property of reading fields
-    boolean binary = false; // Currently not possible
-
-    StringBuilder flags = new StringBuilder();
-    flags.append( (f != null && f.indexed())             ? FieldFlag.INDEXED.getAbbreviation() : '-' );
-    flags.append( (t != null && t.isTokenized())         ? FieldFlag.TOKENIZED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.stored())              ? FieldFlag.STORED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.hasDocValues())        ? FieldFlag.DOC_VALUES.getAbbreviation() : "-" );
-    flags.append( (f != null && f.multiValued())         ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.storeTermVector() )    ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.storeTermOffsets() )   ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' );
-    flags.append( (f != null && f.storeTermPositions() ) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' );
-    flags.append( (f != null && f.storeTermPayloads() )  ? FieldFlag.TERM_VECTOR_PAYLOADS.getAbbreviation() : '-' );
-    flags.append( (f != null && f.omitNorms())           ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' );
-    flags.append( (f != null &&
-        f.omitTermFreqAndPositions() )        ? FieldFlag.OMIT_TF.getAbbreviation() : '-' );
-    flags.append( (f != null && f.omitPositions() )      ? FieldFlag.OMIT_POSITIONS.getAbbreviation() : '-' );
-    flags.append( (f != null && f.storeOffsetsWithPositions() )      ? FieldFlag.STORE_OFFSETS_WITH_POSITIONS.getAbbreviation() : '-' );
-    flags.append( (lazy)                                 ? FieldFlag.LAZY.getAbbreviation() : '-' );
-    flags.append( (binary)                               ? FieldFlag.BINARY.getAbbreviation() : '-' );
-    flags.append( (f != null && f.sortMissingFirst() )   ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-' );
-    flags.append( (f != null && f.sortMissingLast() )    ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-' );
-    return flags.toString();
-  }
-
-  /**
-   * @return a key to what each character means
-   */
-  public static SimpleOrderedMap<String> getFieldFlagsKey() {
-    SimpleOrderedMap<String> key = new SimpleOrderedMap<>();
-    for (FieldFlag f : FieldFlag.values()) {
-      key.add(String.valueOf(f.getAbbreviation()), f.getDisplay() );
-    }
-    return key;
-  }
-
-  private static SimpleOrderedMap<Object> getDocumentFieldsInfo( Document doc, int docId, IndexReader reader,
-                                                                 IndexSchema schema ) throws IOException
-  {
-    final CharsRefBuilder spare = new CharsRefBuilder();
-    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
-    for( Object o : doc.getFields() ) {
-      Field field = (Field)o;
-      SimpleOrderedMap<Object> f = new SimpleOrderedMap<>();
-
-      SchemaField sfield = schema.getFieldOrNull( field.name() );
-      FieldType ftype = (sfield==null)?null:sfield.getType();
-
-      f.add( "type", (ftype==null)?null:ftype.getTypeName() );
-      f.add( "schema", getFieldFlags( sfield ) );
-      f.add( "flags", getFieldFlags( field ) );
-
-      f.add( "value", (ftype==null)?null:ftype.toExternal( field ) );
-
-      // TODO: this really should be "stored"
-      f.add( "internal", field.stringValue() );  // may be a binary number
-
-      BytesRef bytes = field.binaryValue();
-      if (bytes != null) {
-        f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length));
-      }
-      if (!ftype.isPointField()) {
-        Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
-        f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
-      }// TODO: Calculate docFreq for point fields
-
-      // If we have a term vector, return that
-      if( field.fieldType().storeTermVectors() ) {
-        try {
-          Terms v = reader.getTermVector( docId, field.name() );
-          if( v != null ) {
-            SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<>();
-            final TermsEnum termsEnum = v.iterator();
-            BytesRef text;
-            while((text = termsEnum.next()) != null) {
-              final int freq = (int) termsEnum.totalTermFreq();
-              spare.copyUTF8Bytes(text);
-              tfv.add(spare.toString(), freq);
-            }
-            f.add( "termVector", tfv );
-          }
-        }
-        catch( Exception ex ) {
-          log.warn( "error writing term vector", ex );
-        }
-      }
-
-      finfo.add( field.name(), f );
-    }
-    return finfo;
-  }
-
-  private static SimpleOrderedMap<Object> getIndexedFieldsInfo(SolrQueryRequest req)
-      throws Exception {
-
-    SolrIndexSearcher searcher = req.getSearcher();
-    SolrParams params = req.getParams();
-
-    Set<String> fields = null;
-    String fl = params.get(CommonParams.FL);
-    if (fl != null) {
-      fields = new TreeSet<>(Arrays.asList(fl.split( "[,\\s]+" )));
-    }
-
-    LeafReader reader = searcher.getSlowAtomicReader();
-    IndexSchema schema = searcher.getSchema();
-
-    // Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields!
-    Set<String> fieldNames = new TreeSet<>();
-    for(FieldInfo fieldInfo : reader.getFieldInfos()) {
-      fieldNames.add(fieldInfo.name);
-    }
-
-    // Walk the term enum and keep a priority queue for each map in our set
-    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
-
-    for (String fieldName : fieldNames) {
-      if (fields != null && ! fields.contains(fieldName) && ! fields.contains("*")) {
-        continue; //we're not interested in this field Still an issue here
-      }
-
-      SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>();
-
-      SchemaField sfield = schema.getFieldOrNull( fieldName );
-      FieldType ftype = (sfield==null)?null:sfield.getType();
-
-      fieldMap.add( "type", (ftype==null)?null:ftype.getTypeName() );
-      fieldMap.add("schema", getFieldFlags(sfield));
-      if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) {
-        fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
-      }
-      Terms terms = reader.terms(fieldName);
-      if (terms == null) { // Not indexed, so we need to report what we can (it made it through the fl param if specified)
-        finfo.add( fieldName, fieldMap );
-        continue;
-      }
-
-      if(sfield != null && sfield.indexed() ) {
-        if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS,true)) {
-          Document doc = getFirstLiveDoc(terms, reader);
-
-          if (doc != null) {
-            // Found a document with this field
-            try {
-              IndexableField fld = doc.getField(fieldName);
-              if (fld != null) {
-                fieldMap.add("index", getFieldFlags(fld));
-              } else {
-                // it is a non-stored field...
-                fieldMap.add("index", "(unstored field)");
-              }
-            } catch (Exception ex) {
-              log.warn("error reading field: " + fieldName);
-            }
-          }
-        }
-        fieldMap.add("docs", terms.getDocCount());
-      }
-      if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) {
-        getDetailedFieldInfo(req, fieldName, fieldMap);
-      }
-      // Add the field
-      finfo.add( fieldName, fieldMap );
-    }
-    return finfo;
-  }
-
-  // Just get a document with the term in it, the first one will do!
-  // Is there a better way to do this? Shouldn't actually be very costly
-  // to do it this way.
-  private static Document getFirstLiveDoc(Terms terms, LeafReader reader) throws IOException {
-    PostingsEnum postingsEnum = null;
-    TermsEnum termsEnum = terms.iterator();
-    BytesRef text;
-    // Deal with the chance that the first bunch of terms are in deleted documents. Is there a better way?
-    for (int idx = 0; idx < 1000 && postingsEnum == null; ++idx) {
-      text = termsEnum.next();
-      if (text == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
-        return null;
-      }
-      postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);
-      final Bits liveDocs = reader.getLiveDocs();
-      if (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
-        if (liveDocs != null && liveDocs.get(postingsEnum.docID())) {
-          continue;
-        }
-        return reader.document(postingsEnum.docID());
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Return info from the index
-   */
-  private static SimpleOrderedMap<Object> getSchemaInfo( IndexSchema schema ) {
-    Map<String, List<String>> typeusemap = new TreeMap<>();
-    Map<String, Object> fields = new TreeMap<>();
-    SchemaField uniqueField = schema.getUniqueKeyField();
-    for( SchemaField f : schema.getFields().values() ) {
-      populateFieldInfo(schema, typeusemap, fields, uniqueField, f);
-    }
-
-    Map<String, Object> dynamicFields = new TreeMap<>();
-    for (SchemaField f : schema.getDynamicFieldPrototypes()) {
-      populateFieldInfo(schema, typeusemap, dynamicFields, uniqueField, f);
-    }
-    SimpleOrderedMap<Object> types = new SimpleOrderedMap<>();
-    Map<String, FieldType> sortedTypes = new TreeMap<>(schema.getFieldTypes());
-    for( FieldType ft : sortedTypes.values() ) {
-      SimpleOrderedMap<Object> field = new SimpleOrderedMap<>();
-      field.add("fields", typeusemap.get( ft.getTypeName() ) );
-      field.add("tokenized", ft.isTokenized() );
-      field.add("className", ft.getClass().getName());
-      field.add("indexAnalyzer", getAnalyzerInfo(ft.getIndexAnalyzer()));
-      field.add("queryAnalyzer", getAnalyzerInfo(ft.getQueryAnalyzer()));
-      field.add("similarity", getSimilarityInfo(ft.getSimilarity()));
-      types.add( ft.getTypeName(), field );
-    }
-
-    // Must go through this to maintain binary compatbility. Putting a TreeMap into a resp leads to casting errors
-    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
-
-    SimpleOrderedMap<Object> fieldsSimple = new SimpleOrderedMap<>();
-    for (Map.Entry<String, Object> ent : fields.entrySet()) {
-      fieldsSimple.add(ent.getKey(), ent.getValue());
-    }
-    finfo.add("fields", fieldsSimple);
-
-    SimpleOrderedMap<Object> dynamicSimple = new SimpleOrderedMap<>();
-    for (Map.Entry<String, Object> ent : dynamicFields.entrySet()) {
-      dynamicSimple.add(ent.getKey(), ent.getValue());
-    }
-    finfo.add("dynamicFields", dynamicSimple);
-
-    finfo.add("uniqueKeyField",
-        null == uniqueField ? null : uniqueField.getName());
-    finfo.add("similarity", getSimilarityInfo(schema.getSimilarity()));
-    finfo.add("types", types);
-    return finfo;
-  }
-
-  private static SimpleOrderedMap<Object> getSimilarityInfo(Similarity similarity) {
-    SimpleOrderedMap<Object> toReturn = new SimpleOrderedMap<>();
-    if (similarity != null) {
-      toReturn.add("className", similarity.getClass().getName());
-      toReturn.add("details", similarity.toString());
-    }
-    return toReturn;
-  }
-
-  private static SimpleOrderedMap<Object> getAnalyzerInfo(Analyzer analyzer) {
-    SimpleOrderedMap<Object> aninfo = new SimpleOrderedMap<>();
-    aninfo.add("className", analyzer.getClass().getName());
-    if (analyzer instanceof TokenizerChain) {
-
-      TokenizerChain tchain = (TokenizerChain)analyzer;
-
-      CharFilterFactory[] cfiltfacs = tchain.getCharFilterFactories();
-      if (0 < cfiltfacs.length) {
-        SimpleOrderedMap<Map<String, Object>> cfilters = new SimpleOrderedMap<>();
-        for (CharFilterFactory cfiltfac : cfiltfacs) {
-          Map<String, Object> tok = new HashMap<>();
-          String className = cfiltfac.getClass().getName();
-          tok.put("className", className);
-          tok.put("args", cfiltfac.getOriginalArgs());
-          cfilters.add(className.substring(className.lastIndexOf('.')+1), tok);
-        }
-        aninfo.add("charFilters", cfilters);
-      }
-
-      SimpleOrderedMap<Object> tokenizer = new SimpleOrderedMap<>();
-      TokenizerFactory tfac = tchain.getTokenizerFactory();
-      tokenizer.add("className", tfac.getClass().getName());
-      tokenizer.add("args", tfac.getOriginalArgs());
-      aninfo.add("tokenizer", tokenizer);
-
-      TokenFilterFactory[] filtfacs = tchain.getTokenFilterFactories();
-      if (0 < filtfacs.length) {
-        SimpleOrderedMap<Map<String, Object>> filters = new SimpleOrderedMap<>();
-        for (TokenFilterFactory filtfac : filtfacs) {
-          Map<String, Object> tok = new HashMap<>();
-          String className = filtfac.getClass().getName();
-          tok.put("className", className);
-          tok.put("args", filtfac.getOriginalArgs());
-          filters.add(className.substring(className.lastIndexOf('.')+1), tok);
-        }
-        aninfo.add("filters", filters);
-      }
-    }
-    return aninfo;
-  }
-
-  private static void populateFieldInfo(IndexSchema schema,
-                                        Map<String, List<String>> typeusemap, Map<String, Object> fields,
-                                        SchemaField uniqueField, SchemaField f) {
-    FieldType ft = f.getType();
-    SimpleOrderedMap<Object> field = new SimpleOrderedMap<>();
-    field.add( "type", ft.getTypeName() );
-    field.add( "flags", getFieldFlags(f) );
-    if( f.isRequired() ) {
-      field.add( "required", f.isRequired() );
-    }
-    if( f.getDefaultValue() != null ) {
-      field.add( "default", f.getDefaultValue() );
-    }
-    if (f == uniqueField){
-      field.add("uniqueKey", true);
-    }
-    if (ft.getIndexAnalyzer().getPositionIncrementGap(f.getName()) != 0) {
-      field.add("positionIncrementGap", ft.getIndexAnalyzer().getPositionIncrementGap(f.getName()));
-    }
-    field.add("copyDests", toListOfStringDests(schema.getCopyFieldsList(f.getName())));
-    field.add("copySources", schema.getCopySources(f.getName()));
-
-
-    fields.put( f.getName(), field );
-
-    List<String> v = typeusemap.get( ft.getTypeName() );
-    if( v == null ) {
-      v = new ArrayList<>();
-    }
-    v.add( f.getName() );
-    typeusemap.put( ft.getTypeName(), v );
-  }
-
-  // This method just gets the top-most level of information. This was conflated with getting detailed info
-  // for *all* the fields, called from CoreAdminHandler etc.
-
-  public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
-    Directory dir = reader.directory();
-    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<>();
-
-    indexInfo.add("numDocs", reader.numDocs());
-    indexInfo.add("maxDoc", reader.maxDoc());
-    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
-    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));
-
-    indexInfo.add("version", reader.getVersion());  // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
-    indexInfo.add("segmentCount", reader.leaves().size());
-    indexInfo.add("current", closeSafe( reader::isCurrent));
-    indexInfo.add("hasDeletions", reader.hasDeletions() );
-    indexInfo.add("directory", dir );
-    IndexCommit indexCommit = reader.getIndexCommit();
-    String segmentsFileName = indexCommit.getSegmentsFileName();
-    indexInfo.add("segmentsFile", segmentsFileName);
-    indexInfo.add("segmentsFileSizeInBytes", getSegmentsFileLength(indexCommit));
-    Map<String,String> userData = indexCommit.getUserData();
-    indexInfo.add("userData", userData);
-    String s = userData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
-    if (s != null) {
-      indexInfo.add("lastModified", new Date(Long.parseLong(s)));
-    }
-    return indexInfo;
-  }
-
-  @FunctionalInterface
-  interface IOSupplier {
-    boolean get() throws IOException;
-  }
-  
-  private static Object closeSafe(IOSupplier isCurrent) {
-    try {
-      return isCurrent.get();
-    }catch(AlreadyClosedException | IOException exception) {
-    }
-    return false;
-  }
-
-
-  /**
-   * <p>A helper method that attempts to determine the file length of the the segments file for the 
-   * specified IndexCommit from it's Directory.
-   * </p>
-   * <p>
-   * If any sort of {@link IOException} occurs, this method will return "-1" and swallow the exception since 
-   * this may be normal if the IndexCommit is no longer "on disk".  The specific type of the Exception will 
-   * affect how severely it is logged: {@link NoSuchFileException} is considered more "acceptible" then other 
-   * types of IOException which may indicate an actual problem with the Directory.
-   */
-  private static long getSegmentsFileLength(IndexCommit commit) {
-    try {
-      return commit.getDirectory().fileLength(commit.getSegmentsFileName());
-    } catch (NoSuchFileException okException) {
-      log.debug("Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is "
-                + "no longer in the Directory, this can happen if there are new commits since the Reader was opened",
-                okException);
-    } catch (IOException strangeException) {
-      log.warn("Ignoring IOException wile attempting to determine the (optional) fileSize stat for the current IndexReader's segments file",
-               strangeException);
-    }
-    return -1;
-  }
-
-  /** Returns the sum of RAM bytes used by each segment */
-  private static long getIndexHeapUsed(DirectoryReader reader) {
-    long indexHeapRamBytesUsed = 0;
-    for(LeafReaderContext leafReaderContext : reader.leaves()) {
-      LeafReader leafReader = leafReaderContext.reader();
-      if (leafReader instanceof SegmentReader) {
-        indexHeapRamBytesUsed += ((SegmentReader) leafReader).ramBytesUsed();
-      } else {
-        // Not supported for any reader that is not a SegmentReader
-        return -1;
-      }
-    }
-    return indexHeapRamBytesUsed;
-  }
-
-  // Get terribly detailed information about a particular field. This is a very expensive call, use it with caution
-  // especially on large indexes!
-  @SuppressWarnings("unchecked")
-  private static void getDetailedFieldInfo(SolrQueryRequest req, String field, SimpleOrderedMap<Object> fieldMap)
-      throws IOException {
-
-    SolrParams params = req.getParams();
-    final int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
-
-    TopTermQueue tiq = new TopTermQueue(numTerms + 1);  // Something to collect the top N terms in.
-
-    final CharsRefBuilder spare = new CharsRefBuilder();
-
-    Terms terms = MultiTerms.getTerms(req.getSearcher().getIndexReader(), field);
-    if (terms == null) {  // field does not exist
-      return;
-    }
-    TermsEnum termsEnum = terms.iterator();
-    BytesRef text;
-    int[] buckets = new int[HIST_ARRAY_SIZE];
-    while ((text = termsEnum.next()) != null) {
-      ++tiq.distinctTerms;
-      int freq = termsEnum.docFreq();  // This calculation seems odd, but it gives the same results as it used to.
-      int slot = 32 - Integer.numberOfLeadingZeros(Math.max(0, freq - 1));
-      buckets[slot] = buckets[slot] + 1;
-      if (numTerms > 0 && freq > tiq.minFreq) {
-        spare.copyUTF8Bytes(text);
-        String t = spare.toString();
-
-        tiq.add(new TopTermQueue.TermInfo(new Term(field, t), termsEnum.docFreq()));
-        if (tiq.size() > numTerms) { // if tiq full
-          tiq.pop(); // remove lowest in tiq
-          tiq.minFreq = tiq.getTopTermInfo().docFreq;
-        }
-      }
-    }
-    tiq.histogram.add(buckets);
-    fieldMap.add("distinct", tiq.distinctTerms);
-
-    // Include top terms
-    fieldMap.add("topTerms", tiq.toNamedList(req.getSearcher().getSchema()));
-
-    // Add a histogram
-    fieldMap.add("histogram", tiq.histogram.toNamedList());
-  }
-
-  private static List<String> toListOfStrings(SchemaField[] raw) {
-    List<String> result = new ArrayList<>(raw.length);
-    for (SchemaField f : raw) {
-      result.add(f.getName());
-    }
-    return result;
-  }
-  private static List<String> toListOfStringDests(List<CopyField> raw) {
-    List<String> result = new ArrayList<>(raw.size());
-    for (CopyField f : raw) {
-      result.add(f.getDestination().getName());
-    }
-    return result;
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Lucene Index Browser.  Inspired and modeled after Luke: http://www.getopt.org/luke/";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  ///////////////////////////////////////////////////////////////////////////////////////
-
-  static class TermHistogram
-  {
-    int _maxBucket = -1;
-    int _buckets[] = new int[HIST_ARRAY_SIZE];
-    public void add(int[] buckets) {
-      for (int idx = 0; idx < buckets.length; ++idx) {
-        if (buckets[idx] != 0) _maxBucket = idx;
-      }
-      for (int idx = 0; idx <= _maxBucket; ++idx) {
-        _buckets[idx] = buckets[idx];
-      }
-    }
-    // TODO? should this be a list or a map?
-    public NamedList<Integer> toNamedList()
-    {
-      NamedList<Integer> nl = new NamedList<>();
-      for( int bucket = 0; bucket <= _maxBucket; bucket++ ) {
-        nl.add( ""+ (1 << bucket), _buckets[bucket] );
-      }
-      return nl;
-    }
-  }
-  /**
-   * Private internal class that counts up frequent terms
-   */
-  private static class TopTermQueue extends PriorityQueue
-  {
-    static class TermInfo {
-      TermInfo(Term t, int df) {
-        term = t;
-        docFreq = df;
-      }
-      int docFreq;
-      Term term;
-    }
-
-    public int minFreq = 0;
-    public int distinctTerms = 0;
-    public TermHistogram histogram;
-
-
-    TopTermQueue(int size) {
-      super(size);
-      histogram = new TermHistogram();
-    }
-
-    @Override
-    protected final boolean lessThan(Object a, Object b) {
-      TermInfo termInfoA = (TermInfo)a;
-      TermInfo termInfoB = (TermInfo)b;
-      return termInfoA.docFreq < termInfoB.docFreq;
-    }
-
-    /**
-     * This is a destructive call... the queue is empty at the end
-     */
-    public NamedList<Integer> toNamedList( IndexSchema schema )
-    {
-      // reverse the list..
-      List<TermInfo> aslist = new LinkedList<>();
-      while( size() > 0 ) {
-        aslist.add( 0, (TermInfo)pop() );
-      }
-
-      NamedList<Integer> list = new NamedList<>();
-      for (TermInfo i : aslist) {
-        String txt = i.term.text();
-        SchemaField ft = schema.getFieldOrNull( i.term.field() );
-        if( ft != null ) {
-          txt = ft.getType().indexedToReadable( txt );
-        }
-        list.add( txt, i.docFreq );
-      }
-      return list;
-    }
-    public TermInfo getTopTermInfo() {
-      return (TermInfo)top();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/MergeIndexesOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MergeIndexesOp.java b/solr/core/src/java/org/apache/solr/handler/admin/MergeIndexesOp.java
deleted file mode 100644
index 90690ff..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/MergeIndexesOp.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.Lists;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.IOUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.core.CachingDirectoryFactory;
-import org.apache.solr.core.DirectoryFactory;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.MergeIndexesCommand;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
-import org.apache.solr.util.RefCounted;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-class MergeIndexesOp implements CoreAdminHandler.CoreAdminOp {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.CORE);
-    SolrCore core = it.handler.coreContainer.getCore(cname);
-    SolrQueryRequest wrappedReq = null;
-    if (core == null) return;
-
-    List<SolrCore> sourceCores = Lists.newArrayList();
-    List<RefCounted<SolrIndexSearcher>> searchers = Lists.newArrayList();
-    // stores readers created from indexDir param values
-    List<DirectoryReader> readersToBeClosed = Lists.newArrayList();
-    Map<Directory, Boolean> dirsToBeReleased = new HashMap<>();
-
-    try {
-      String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
-      if (dirNames == null || dirNames.length == 0) {
-        String[] sources = params.getParams("srcCore");
-        if (sources == null || sources.length == 0)
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "At least one indexDir or srcCore must be specified");
-
-        for (int i = 0; i < sources.length; i++) {
-          String source = sources[i];
-          SolrCore srcCore = it.handler.coreContainer.getCore(source);
-          if (srcCore == null)
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Core: " + source + " does not exist");
-          sourceCores.add(srcCore);
-        }
-      } else {
-        DirectoryFactory dirFactory = core.getDirectoryFactory();
-        for (int i = 0; i < dirNames.length; i++) {
-          boolean markAsDone = false;
-          if (dirFactory instanceof CachingDirectoryFactory) {
-            if (!((CachingDirectoryFactory) dirFactory).getLivePaths().contains(dirNames[i])) {
-              markAsDone = true;
-            }
-          }
-          Directory dir = dirFactory.get(dirNames[i], DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
-          dirsToBeReleased.put(dir, markAsDone);
-          // TODO: why doesn't this use the IR factory? what is going on here?
-          readersToBeClosed.add(DirectoryReader.open(dir));
-        }
-      }
-
-      List<DirectoryReader> readers = null;
-      if (readersToBeClosed.size() > 0) {
-        readers = readersToBeClosed;
-      } else {
-        readers = Lists.newArrayList();
-        for (SolrCore solrCore : sourceCores) {
-          // record the searchers so that we can decref
-          RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
-          searchers.add(searcher);
-          readers.add(searcher.get().getIndexReader());
-        }
-      }
-
-      UpdateRequestProcessorChain processorChain =
-          core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
-      wrappedReq = new LocalSolrQueryRequest(core, it.req.getParams());
-      UpdateRequestProcessor processor =
-          processorChain.createProcessor(wrappedReq, it.rsp);
-      processor.processMergeIndexes(new MergeIndexesCommand(readers, it.req));
-    } catch (Exception e) {
-      // log and rethrow so that if the finally fails we don't lose the original problem
-      log.error("ERROR executing merge:", e);
-      throw e;
-    } finally {
-      for (RefCounted<SolrIndexSearcher> searcher : searchers) {
-        if (searcher != null) searcher.decref();
-      }
-      for (SolrCore solrCore : sourceCores) {
-        if (solrCore != null) solrCore.close();
-      }
-      IOUtils.closeWhileHandlingException(readersToBeClosed);
-      Set<Map.Entry<Directory, Boolean>> entries = dirsToBeReleased.entrySet();
-      for (Map.Entry<Directory, Boolean> entry : entries) {
-        DirectoryFactory dirFactory = core.getDirectoryFactory();
-        Directory dir = entry.getKey();
-        boolean markAsDone = entry.getValue();
-        if (markAsDone) {
-          dirFactory.doneWithDirectory(dir);
-        }
-        dirFactory.release(dir);
-      }
-      if (wrappedReq != null) wrappedReq.close();
-      core.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java
deleted file mode 100644
index 7de3ac2..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsCollectorHandler.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.codahale.metrics.MetricRegistry;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.loader.ContentStreamLoader;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.loader.CSVLoader;
-import org.apache.solr.handler.loader.JavabinLoader;
-import org.apache.solr.handler.loader.JsonLoader;
-import org.apache.solr.handler.loader.XMLLoader;
-import org.apache.solr.metrics.AggregateMetric;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.reporters.solr.SolrReporter;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.update.AddUpdateCommand;
-import org.apache.solr.update.CommitUpdateCommand;
-import org.apache.solr.update.DeleteUpdateCommand;
-import org.apache.solr.update.MergeIndexesCommand;
-import org.apache.solr.update.RollbackUpdateCommand;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-import org.apache.solr.util.stats.MetricUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handler to collect and aggregate metric reports.  Each report indicates the target registry where
- * metrics values should be collected and aggregated. Metrics with the same names are
- * aggregated using {@link AggregateMetric} instances, which track the source of updates and
- * their count, as well as providing simple statistics over collected values.
- *
- * Each report consists of {@link SolrInputDocument}-s that are expected to contain
- * the following fields:
- * <ul>
- *   <li>{@link SolrReporter#GROUP_ID} - (required) specifies target registry name where metrics will be grouped.</li>
- *   <li>{@link SolrReporter#REPORTER_ID} - (required) id of the reporter that sent this update. This can be eg.
- *   node name or replica name or other id that uniquely identifies the source of metrics values.</li>
- *   <li>{@link MetricUtils#METRIC_NAME} - (required) metric name (in the source registry)</li>
- *   <li>{@link SolrReporter#LABEL_ID} - (optional) label to prepend to metric names in the target registry.</li>
- *   <li>{@link SolrReporter#REGISTRY_ID} - (optional) name of the source registry.</li>
- * </ul>
- * Remaining fields are assumed to be single-valued, and to contain metric attributes and their values. Example:
- * <pre>
- *   &lt;doc&gt;
- *     &lt;field name="_group_"&gt;solr.core.collection1.shard1.leader&lt;/field&gt;
- *     &lt;field name="_reporter_"&gt;core_node3&lt;/field&gt;
- *     &lt;field name="metric"&gt;INDEX.merge.errors&lt;/field&gt;
- *     &lt;field name="value"&gt;0&lt;/field&gt;
- *   &lt;/doc&gt;
- * </pre>
- */
-public class MetricsCollectorHandler extends RequestHandlerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String HANDLER_PATH = "/admin/metrics/collector";
-
-  private final CoreContainer coreContainer;
-  private final SolrMetricManager metricManager;
-  private final Map<String, ContentStreamLoader> loaders = new HashMap<>();
-  private SolrParams params;
-
-  public MetricsCollectorHandler(final CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-    this.metricManager = coreContainer.getMetricManager();
-
-  }
-
-  @Override
-  public void init(NamedList initArgs) {
-    super.init(initArgs);
-    if (initArgs != null) {
-      params = initArgs.toSolrParams();
-    } else {
-      params = new ModifiableSolrParams();
-    }
-    loaders.put("application/xml", new XMLLoader().init(params) );
-    loaders.put("application/json", new JsonLoader().init(params) );
-    loaders.put("application/csv", new CSVLoader().init(params) );
-    loaders.put("application/javabin", new JavabinLoader().init(params) );
-    loaders.put("text/csv", loaders.get("application/csv") );
-    loaders.put("text/xml", loaders.get("application/xml") );
-    loaders.put("text/json", loaders.get("application/json"));
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    if (coreContainer == null || coreContainer.isShutDown()) {
-      // silently drop request
-      return;
-    }
-    //log.info("#### " + req.toString());
-    if (req.getContentStreams() == null) { // no content
-      return;
-    }
-    for (ContentStream cs : req.getContentStreams()) {
-      if (cs.getContentType() == null) {
-        log.warn("Missing content type - ignoring");
-        continue;
-      }
-      ContentStreamLoader loader = loaders.get(cs.getContentType());
-      if (loader == null) {
-        throw new SolrException(SolrException.ErrorCode.UNSUPPORTED_MEDIA_TYPE, "Unsupported content type for stream: " + cs.getSourceInfo() + ", contentType=" + cs.getContentType());
-      }
-      loader.load(req, rsp, cs, new MetricUpdateProcessor(metricManager));
-    }
-  }
-
-  @Override
-  public String getDescription() {
-    return "Handler for collecting and aggregating SolrCloud metric reports.";
-  }
-
-  private static class MetricUpdateProcessor extends UpdateRequestProcessor {
-    private final SolrMetricManager metricManager;
-
-    public MetricUpdateProcessor(SolrMetricManager metricManager) {
-      super(null);
-      this.metricManager = metricManager;
-    }
-
-    @Override
-    public void processAdd(AddUpdateCommand cmd) throws IOException {
-      SolrInputDocument doc = cmd.solrDoc;
-      if (doc == null) {
-        return;
-      }
-      String metricName = (String)doc.getFieldValue(MetricUtils.METRIC_NAME);
-      if (metricName == null) {
-        log.warn("Missing " + MetricUtils.METRIC_NAME + " field in document, skipping: " + doc);
-        return;
-      }
-      doc.remove(MetricUtils.METRIC_NAME);
-      // XXX we could modify keys by using this original registry name
-      doc.remove(SolrReporter.REGISTRY_ID);
-      String groupId = (String)doc.getFieldValue(SolrReporter.GROUP_ID);
-      if (groupId == null) {
-        log.warn("Missing " + SolrReporter.GROUP_ID + " field in document, skipping: " + doc);
-        return;
-      }
-      doc.remove(SolrReporter.GROUP_ID);
-      String reporterId = (String)doc.getFieldValue(SolrReporter.REPORTER_ID);
-      if (reporterId == null) {
-        log.warn("Missing " + SolrReporter.REPORTER_ID + " field in document, skipping: " + doc);
-        return;
-      }
-      doc.remove(SolrReporter.REPORTER_ID);
-      String labelId = (String)doc.getFieldValue(SolrReporter.LABEL_ID);
-      doc.remove(SolrReporter.LABEL_ID);
-      doc.forEach(f -> {
-        String key;
-        if (doc.size() == 1 && f.getName().equals(MetricUtils.VALUE)) {
-          // only one "value" field - skip the unnecessary field name
-          key = MetricRegistry.name(labelId, metricName);
-        } else {
-          key = MetricRegistry.name(labelId, metricName, f.getName());
-        }
-        MetricRegistry registry = metricManager.registry(groupId);
-        AggregateMetric metric = getOrCreate(registry, key);
-        Object o = f.getFirstValue();
-        if (o != null) {
-          metric.set(reporterId, o);
-        } else {
-          // remove missing values
-          metric.clear(reporterId);
-        }
-      });
-    }
-
-    private AggregateMetric getOrCreate(MetricRegistry registry, String name) {
-      AggregateMetric existing = (AggregateMetric)registry.getMetrics().get(name);
-      if (existing != null) {
-        return existing;
-      }
-      AggregateMetric add = new AggregateMetric();
-      try {
-        registry.register(name, add);
-        return add;
-      } catch (IllegalArgumentException e) {
-        // someone added before us
-        existing = (AggregateMetric)registry.getMetrics().get(name);
-        if (existing == null) { // now, that is weird...
-          throw new IllegalArgumentException("Inconsistent metric status, " + name);
-        }
-        return existing;
-      }
-    }
-
-    @Override
-    public void processDelete(DeleteUpdateCommand cmd) throws IOException {
-      throw new UnsupportedOperationException("processDelete");
-    }
-
-    @Override
-    public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException {
-      throw new UnsupportedOperationException("processMergeIndexes");
-    }
-
-    @Override
-    public void processCommit(CommitUpdateCommand cmd) throws IOException {
-      throw new UnsupportedOperationException("processCommit");
-    }
-
-    @Override
-    public void processRollback(RollbackUpdateCommand cmd) throws IOException {
-      throw new UnsupportedOperationException("processRollback");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
deleted file mode 100644
index 752e021..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.BiConsumer;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.Gauge;
-import com.codahale.metrics.Histogram;
-import com.codahale.metrics.Meter;
-import com.codahale.metrics.Metric;
-import com.codahale.metrics.MetricFilter;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Timer;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.stats.MetricUtils;
-
-/**
- * Request handler to return metrics
- */
-public class MetricsHandler extends RequestHandlerBase implements PermissionNameProvider {
-  final SolrMetricManager metricManager;
-
-  public static final String COMPACT_PARAM = "compact";
-  public static final String PREFIX_PARAM = "prefix";
-  public static final String REGEX_PARAM = "regex";
-  public static final String PROPERTY_PARAM = "property";
-  public static final String REGISTRY_PARAM = "registry";
-  public static final String GROUP_PARAM = "group";
-  public static final String KEY_PARAM = "key";
-  public static final String TYPE_PARAM = "type";
-
-  public static final String ALL = "all";
-
-  private static final Pattern KEY_REGEX = Pattern.compile("(?<!" + Pattern.quote("\\") + ")" + Pattern.quote(":"));
-  private CoreContainer cc;
-
-  public MetricsHandler() {
-    this.metricManager = null;
-  }
-
-  public MetricsHandler(CoreContainer coreContainer) {
-    this.metricManager = coreContainer.getMetricManager();
-    this.cc = coreContainer;
-  }
-
-  public MetricsHandler(SolrMetricManager metricManager) {
-    this.metricManager = metricManager;
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext request) {
-    return Name.METRICS_READ_PERM;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    if (metricManager == null) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "SolrMetricManager instance not initialized");
-    }
-
-    if (cc != null && AdminHandlersProxy.maybeProxyToNodes(req, rsp, cc)) {
-      return; // Request was proxied to other node
-    }
-
-    handleRequest(req.getParams(), (k, v) -> rsp.add(k, v));
-  }
-  
-  public void handleRequest(SolrParams params, BiConsumer<String, Object> consumer) throws Exception {
-    boolean compact = params.getBool(COMPACT_PARAM, true);
-    String[] keys = params.getParams(KEY_PARAM);
-    if (keys != null && keys.length > 0) {
-      handleKeyRequest(keys, consumer);
-      return;
-    }
-    MetricFilter mustMatchFilter = parseMustMatchFilter(params);
-    MetricUtils.PropertyFilter propertyFilter = parsePropertyFilter(params);
-    List<MetricType> metricTypes = parseMetricTypes(params);
-    List<MetricFilter> metricFilters = metricTypes.stream().map(MetricType::asMetricFilter).collect(Collectors.toList());
-    Set<String> requestedRegistries = parseRegistries(params);
-
-    NamedList response = new SimpleOrderedMap();
-    for (String registryName : requestedRegistries) {
-      MetricRegistry registry = metricManager.registry(registryName);
-      SimpleOrderedMap result = new SimpleOrderedMap();
-      MetricUtils.toMaps(registry, metricFilters, mustMatchFilter, propertyFilter, false,
-          false, compact, false, (k, v) -> result.add(k, v));
-      if (result.size() > 0) {
-        response.add(registryName, result);
-      }
-    }
-    consumer.accept("metrics", response);
-  }
-
-  public void handleKeyRequest(String[] keys, BiConsumer<String, Object> consumer) throws Exception {
-    SimpleOrderedMap result = new SimpleOrderedMap();
-    SimpleOrderedMap errors = new SimpleOrderedMap();
-    for (String key : keys) {
-      if (key == null || key.isEmpty()) {
-        continue;
-      }
-      String[] parts = KEY_REGEX.split(key);
-      if (parts.length < 2 || parts.length > 3) {
-        errors.add(key, "at least two and at most three colon-separated parts must be provided");
-        continue;
-      }
-      final String registryName = unescape(parts[0]);
-      final String metricName = unescape(parts[1]);
-      final String propertyName = parts.length > 2 ? unescape(parts[2]) : null;
-      if (!metricManager.hasRegistry(registryName)) {
-        errors.add(key, "registry '" + registryName + "' not found");
-        continue;
-      }
-      MetricRegistry registry = metricManager.registry(registryName);
-      Metric m = registry.getMetrics().get(metricName);
-      if (m == null) {
-        errors.add(key, "metric '" + metricName + "' not found");
-        continue;
-      }
-      MetricUtils.PropertyFilter propertyFilter = MetricUtils.PropertyFilter.ALL;
-      if (propertyName != null) {
-        propertyFilter = (name) -> name.equals(propertyName);
-        // use escaped versions
-        key = parts[0] + ":" + parts[1];
-      }
-      MetricUtils.convertMetric(key, m, propertyFilter, false, true, true, false, ":", (k, v) -> {
-        if ((v instanceof Map) && propertyName != null) {
-          ((Map)v).forEach((k1, v1) -> result.add(k + ":" + k1, v1));
-        } else {
-          result.add(k, v);
-        }
-      });
-    }
-    consumer.accept("metrics", result);
-    if (errors.size() > 0) {
-      consumer.accept("errors", errors);
-    }
-  }
-
-  private static String unescape(String s) {
-    if (s.indexOf('\\') == -1) {
-      return s;
-    }
-    StringBuilder sb = new StringBuilder(s.length());
-    for (int i = 0; i < s.length(); i++) {
-      char c = s.charAt(i);
-      if (c == '\\') {
-        continue;
-      }
-      sb.append(c);
-    }
-    return sb.toString();
-  }
-
-  private MetricFilter parseMustMatchFilter(SolrParams params) {
-    String[] prefixes = params.getParams(PREFIX_PARAM);
-    MetricFilter prefixFilter = null;
-    if (prefixes != null && prefixes.length > 0) {
-      Set<String> prefixSet = new HashSet<>();
-      for (String prefix : prefixes) {
-        prefixSet.addAll(StrUtils.splitSmart(prefix, ','));
-      }
-      prefixFilter = new SolrMetricManager.PrefixFilter(prefixSet);
-    }
-    String[] regexes = params.getParams(REGEX_PARAM);
-    MetricFilter regexFilter = null;
-    if (regexes != null && regexes.length > 0) {
-      regexFilter = new SolrMetricManager.RegexFilter(regexes);
-    }
-    MetricFilter mustMatchFilter;
-    if (prefixFilter == null && regexFilter == null) {
-      mustMatchFilter = MetricFilter.ALL;
-    } else {
-      if (prefixFilter == null) {
-        mustMatchFilter = regexFilter;
-      } else if (regexFilter == null) {
-        mustMatchFilter = prefixFilter;
-      } else {
-        mustMatchFilter = new SolrMetricManager.OrFilter(prefixFilter, regexFilter);
-      }
-    }
-    return mustMatchFilter;
-  }
-
-  private MetricUtils.PropertyFilter parsePropertyFilter(SolrParams params) {
-    String[] props = params.getParams(PROPERTY_PARAM);
-    if (props == null || props.length == 0) {
-      return MetricUtils.PropertyFilter.ALL;
-    }
-    final Set<String> filter = new HashSet<>();
-    for (String prop : props) {
-      if (prop != null && !prop.trim().isEmpty()) {
-        filter.add(prop.trim());
-      }
-    }
-    if (filter.isEmpty()) {
-      return MetricUtils.PropertyFilter.ALL;
-    } else {
-      return (name) -> filter.contains(name);
-    }
-  }
-
-  private Set<String> parseRegistries(SolrParams params) {
-    String[] groupStr = params.getParams(GROUP_PARAM);
-    String[] registryStr = params.getParams(REGISTRY_PARAM);
-    return parseRegistries(groupStr, registryStr);
-  }
-
-  public Set<String> parseRegistries(String[] groupStr, String[] registryStr) {
-    if ((groupStr == null || groupStr.length == 0) && (registryStr == null || registryStr.length == 0)) {
-      // return all registries
-      return metricManager.registryNames();
-    }
-    boolean allRegistries = false;
-    Set<String> initialPrefixes = Collections.emptySet();
-    if (groupStr != null && groupStr.length > 0) {
-      initialPrefixes = new HashSet<>();
-      for (String g : groupStr) {
-        List<String> split = StrUtils.splitSmart(g, ',');
-        for (String s : split) {
-          if (s.trim().equals(ALL)) {
-            allRegistries = true;
-            break;
-          }
-          initialPrefixes.add(SolrMetricManager.enforcePrefix(s.trim()));
-        }
-        if (allRegistries) {
-          return metricManager.registryNames();
-        }
-      }
-    }
-
-    if (registryStr != null && registryStr.length > 0) {
-      if (initialPrefixes.isEmpty()) {
-        initialPrefixes = new HashSet<>();
-      }
-      for (String r : registryStr) {
-        List<String> split = StrUtils.splitSmart(r, ',');
-        for (String s : split) {
-          if (s.trim().equals(ALL)) {
-            allRegistries = true;
-            break;
-          }
-          initialPrefixes.add(SolrMetricManager.enforcePrefix(s.trim()));
-        }
-        if (allRegistries) {
-          return metricManager.registryNames();
-        }
-      }
-    }
-    Set<String> validRegistries = new HashSet<>();
-    for (String r : metricManager.registryNames()) {
-      for (String prefix : initialPrefixes) {
-        if (r.startsWith(prefix)) {
-          validRegistries.add(r);
-          break;
-        }
-      }
-    }
-    return validRegistries;
-  }
-
-  private List<MetricType> parseMetricTypes(SolrParams params) {
-    String[] typeStr = params.getParams(TYPE_PARAM);
-    List<String> types = Collections.emptyList();
-    if (typeStr != null && typeStr.length > 0)  {
-      types = new ArrayList<>();
-      for (String type : typeStr) {
-        types.addAll(StrUtils.splitSmart(type, ','));
-      }
-    }
-
-    List<MetricType> metricTypes = Collections.singletonList(MetricType.all); // include all metrics by default
-    try {
-      if (types.size() > 0) {
-        metricTypes = types.stream().map(String::trim).map(MetricType::valueOf).collect(Collectors.toList());
-      }
-    } catch (IllegalArgumentException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid metric type in: " + types +
-          " specified. Must be one of " + MetricType.SUPPORTED_TYPES_MSG, e);
-    }
-    return metricTypes;
-  }
-
-  @Override
-  public String getDescription() {
-    return "A handler to return all the metrics gathered by Solr";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  enum MetricType {
-    histogram(Histogram.class),
-    meter(Meter.class),
-    timer(Timer.class),
-    counter(Counter.class),
-    gauge(Gauge.class),
-    all(null);
-
-    public static final String SUPPORTED_TYPES_MSG = EnumSet.allOf(MetricType.class).toString();
-
-    private final Class klass;
-
-    MetricType(Class klass) {
-      this.klass = klass;
-    }
-
-    public MetricFilter asMetricFilter() {
-      return (name, metric) -> klass == null || klass.isInstance(metric);
-    }
-  }
-}


[30/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
deleted file mode 100644
index 1747fa2..0000000
--- a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Properties;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.util.PropertiesUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A Solr core descriptor
- *
- * @since solr 1.3
- */
-public class CoreDescriptor {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  // Properties file name constants
-  public static final String CORE_NAME = "name";
-  public static final String CORE_CONFIG = "config";
-  public static final String CORE_DATADIR = "dataDir";
-  public static final String CORE_ULOGDIR = "ulogDir";
-  public static final String CORE_SCHEMA = "schema";
-  public static final String CORE_SHARD = "shard";
-  public static final String CORE_COLLECTION = "collection";
-  public static final String CORE_ROLES = "roles";
-  public static final String CORE_PROPERTIES = "properties";
-  public static final String CORE_LOADONSTARTUP = "loadOnStartup";
-  public static final String CORE_TRANSIENT = "transient";
-  public static final String CORE_NODE_NAME = "coreNodeName";
-  public static final String CORE_CONFIGSET = "configSet";
-  public static final String CORE_CONFIGSET_PROPERTIES = "configSetProperties";
-  public static final String SOLR_CORE_PROP_PREFIX = "solr.core.";
-
-  public static final String DEFAULT_EXTERNAL_PROPERTIES_FILE = "conf" + File.separator + "solrcore.properties";
-
-  /**
-   * Whether this core was configured using a configSet that was trusted.
-   * This helps in avoiding the loading of plugins that have potential
-   * vulnerabilities, when the configSet was not uploaded from a trusted
-   * user.
-   */
-  private boolean trustedConfigSet = true;
-
-  /**
-   * Get the standard properties in persistable form
-   * @return the standard core properties in persistable form
-   */
-  public Properties getPersistableStandardProperties() {
-    return originalCoreProperties;
-  }
-
-  /**
-   * Get user-defined core properties in persistable form
-   * @return user-defined core properties in persistable form
-   */
-  public Properties getPersistableUserProperties() {
-    return originalExtraProperties;
-  }
-
-  private static ImmutableMap<String, String> defaultProperties = new ImmutableMap.Builder<String, String>()
-      .put(CORE_CONFIG, "solrconfig.xml")
-      .put(CORE_SCHEMA, "schema.xml")
-      .put(CORE_CONFIGSET_PROPERTIES, ConfigSetProperties.DEFAULT_FILENAME)
-      .put(CORE_DATADIR, "data" + File.separator)
-      .put(CORE_TRANSIENT, "false")
-      .put(CORE_LOADONSTARTUP, "true")
-      .build();
-
-  private static ImmutableList<String> requiredProperties = ImmutableList.of(
-      CORE_NAME
-  );
-
-  public static ImmutableList<String> standardPropNames = ImmutableList.of(
-      CORE_NAME,
-      CORE_CONFIG,
-      CORE_DATADIR,
-      CORE_ULOGDIR,
-      CORE_SCHEMA,
-      CORE_PROPERTIES,
-      CORE_CONFIGSET_PROPERTIES,
-      CORE_LOADONSTARTUP,
-      CORE_TRANSIENT,
-      CORE_CONFIGSET,
-      // cloud props
-      CORE_SHARD,
-      CORE_COLLECTION,
-      CORE_ROLES,
-      CORE_NODE_NAME,
-      CloudDescriptor.NUM_SHARDS
-  );
-
-  private final CloudDescriptor cloudDesc;
-
-  private final Path instanceDir;
-
-  /** The original standard core properties, before substitution */
-  protected final Properties originalCoreProperties = new Properties();
-
-  /** The original extra core properties, before substitution */
-  protected final Properties originalExtraProperties = new Properties();
-
-  /** The properties for this core, as available through getProperty() */
-  protected final Properties coreProperties = new Properties();
-
-  /** The properties for this core, substitutable by resource loaders */
-  protected final Properties substitutableProperties = new Properties();
-
-  public CoreDescriptor(String name, Path instanceDir, Properties containerProperties,
-                        boolean isZooKeeperAware, String... properties) {
-    this(name, instanceDir, toMap(properties), containerProperties, isZooKeeperAware);
-  }
-
-  private static Map<String, String> toMap(String... properties) {
-    Map<String, String> props = new HashMap<>();
-    assert properties.length % 2 == 0;
-    for (int i = 0; i < properties.length; i += 2) {
-      props.put(properties[i], properties[i+1]);
-    }
-    return props;
-  }
-
-  /**
-   * Create a new CoreDescriptor with a given name and instancedir
-   * @param name          the CoreDescriptor's name
-   * @param instanceDir   the CoreDescriptor's instancedir
-   * @param containerProperties the enclosing container properties for variable resolution
-   * @param isZooKeeperAware whether we are part of SolrCloud or not. 
-   */
-  public CoreDescriptor(String name, Path instanceDir,
-                        Properties containerProperties, boolean isZooKeeperAware) {
-    this(name, instanceDir, Collections.emptyMap(), containerProperties, isZooKeeperAware);
-  }
-
-  /**
-   * Create a new CoreDescriptor using the properties of an existing one
-   * @param coreName the new CoreDescriptor's name
-   * @param other    the CoreDescriptor to copy
-   */
-  public CoreDescriptor(String coreName, CoreDescriptor other) {
-    this.cloudDesc = other.cloudDesc;
-    this.instanceDir = other.instanceDir;
-    this.originalExtraProperties.putAll(other.originalExtraProperties);
-    this.originalCoreProperties.putAll(other.originalCoreProperties);
-    this.coreProperties.putAll(other.coreProperties);
-    this.substitutableProperties.putAll(other.substitutableProperties);
-    this.coreProperties.setProperty(CORE_NAME, coreName);
-    this.originalCoreProperties.setProperty(CORE_NAME, coreName);
-    this.substitutableProperties.setProperty(SOLR_CORE_PROP_PREFIX + CORE_NAME, coreName);
-    this.trustedConfigSet = other.trustedConfigSet;
-  }
-
-  /**
-   * Create a new CoreDescriptor.
-   * @param name            the CoreDescriptor's name
-   * @param instanceDir     a Path resolving to the instanceDir
-   * @param coreProps       a Map of the properties for this core
-   * @param containerProperties the properties from the enclosing container.
-   * @param isZooKeeperAware if true, we ar in SolrCloud mode.
-   */
-
-
-  public CoreDescriptor(String name, Path instanceDir, Map<String, String> coreProps,
-                        Properties containerProperties, boolean isZooKeeperAware) {
-    this.instanceDir = instanceDir;
-
-    originalCoreProperties.setProperty(CORE_NAME, name);
-
-    name = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(name, CORE_NAME),
-                                             containerProperties);
-
-    coreProperties.putAll(defaultProperties);
-    coreProperties.put(CORE_NAME, name);
-
-    for (String propname : coreProps.keySet()) {
-
-      String propvalue = coreProps.get(propname);
-
-      if (isUserDefinedProperty(propname))
-        originalExtraProperties.put(propname, propvalue);
-      else
-        originalCoreProperties.put(propname, propvalue);
-
-      if (!requiredProperties.contains(propname))   // Required props are already dealt with
-        coreProperties.setProperty(propname,
-            PropertiesUtil.substituteProperty(propvalue, containerProperties));
-    }
-
-    loadExtraProperties();
-    buildSubstitutableProperties();
-
-    // TODO maybe make this a CloudCoreDescriptor subclass?
-    if (isZooKeeperAware) {
-      cloudDesc = new CloudDescriptor(name, coreProperties, this);
-    }
-    else {
-      cloudDesc = null;
-    }
-
-    log.debug("Created CoreDescriptor: " + coreProperties);
-  }
-
-  /**
-   * Load properties specified in an external properties file.
-   *
-   * The file to load can be specified in a {@code properties} property on
-   * the original Properties object used to create this CoreDescriptor.  If
-   * this has not been set, then we look for {@code conf/solrcore.properties}
-   * underneath the instance dir.
-   *
-   * File paths are taken as read from the core's instance directory
-   * if they are not absolute.
-   */
-  protected void loadExtraProperties() {
-    String filename = coreProperties.getProperty(CORE_PROPERTIES, DEFAULT_EXTERNAL_PROPERTIES_FILE);
-    Path propertiesFile = instanceDir.resolve(filename);
-    if (Files.exists(propertiesFile)) {
-      try (InputStream is = Files.newInputStream(propertiesFile)) {
-        Properties externalProps = new Properties();
-        externalProps.load(new InputStreamReader(is, StandardCharsets.UTF_8));
-        coreProperties.putAll(externalProps);
-      } catch (IOException e) {
-        String message = String.format(Locale.ROOT, "Could not load properties from %s: %s:",
-            propertiesFile.toString(), e.toString());
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, message);
-      }
-    }
-  }
-
-  /**
-   * Create the properties object used by resource loaders, etc, for property
-   * substitution.  The default solr properties are prefixed with 'solr.core.', so,
-   * e.g., 'name' becomes 'solr.core.name'
-   */
-  protected void buildSubstitutableProperties() {
-    for (String propName : coreProperties.stringPropertyNames()) {
-      String propValue = coreProperties.getProperty(propName);
-      if (!isUserDefinedProperty(propName))
-        propName = SOLR_CORE_PROP_PREFIX + propName;
-      substitutableProperties.setProperty(propName, propValue);
-    }
-    substitutableProperties.setProperty("solr.core.instanceDir", instanceDir.toAbsolutePath().toString());
-  }
-
-  /**
-   * Is this property a Solr-standard property, or is it an extra property
-   * defined per-core by the user?
-   * @param propName the Property name
-   * @return {@code true} if this property is user-defined
-   */
-  protected static boolean isUserDefinedProperty(String propName) {
-    return !standardPropNames.contains(propName);
-  }
-
-  public static String checkPropertyIsNotEmpty(String value, String propName) {
-    if (StringUtils.isEmpty(value)) {
-      String message = String.format(Locale.ROOT, "Cannot create core with empty %s value", propName);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, message);
-    }
-    return value;
-  }
-
-  public String getPropertiesName() {
-    return coreProperties.getProperty(CORE_PROPERTIES);
-  }
-
-  public String getDataDir() {
-    return coreProperties.getProperty(CORE_DATADIR);
-  }
-  
-  public boolean usingDefaultDataDir() {
-    return defaultProperties.get(CORE_DATADIR).equals(coreProperties.getProperty(CORE_DATADIR));
-  }
-
-  /**
-   * @return the core instance directory
-   */
-  public Path getInstanceDir() {
-    return instanceDir;
-  }
-
-  /**@return the core configuration resource name. */
-  public String getConfigName() {
-    return coreProperties.getProperty(CORE_CONFIG);
-  }
-
-  /**@return the core schema resource name. */
-  public String getSchemaName() {
-    return coreProperties.getProperty(CORE_SCHEMA);
-  }
-
-  /**@return the initial core name */
-  public String getName() {
-    return coreProperties.getProperty(CORE_NAME);
-  }
-  
-  public void setProperty(String prop, String val) {
-    if (substitutableProperties.containsKey(prop)) {
-      substitutableProperties.setProperty(prop, val);
-      return;
-    }
-    coreProperties.setProperty(prop, val);
-  }
-
-  public String getCollectionName() {
-    return cloudDesc == null ? null : cloudDesc.getCollectionName();
-  }
-
-  public CloudDescriptor getCloudDescriptor() {
-    return cloudDesc;
-  }
-
-  public boolean isLoadOnStartup() {
-    String tmp = coreProperties.getProperty(CORE_LOADONSTARTUP, "false");
-    return Boolean.parseBoolean(tmp);
-  }
-
-  public boolean isTransient() {
-    String tmp = coreProperties.getProperty(CORE_TRANSIENT, "false");
-    return PropertiesUtil.toBoolean(tmp);
-  }
-
-  public String getUlogDir() {
-    return coreProperties.getProperty(CORE_ULOGDIR);
-  }
-
-  /**
-   * Returns a specific property defined on this CoreDescriptor
-   * @param prop    - value to read from the properties structure.
-   * @param defVal  - return if no property found.
-   * @return associated string. May be null.
-   */
-  public String getCoreProperty(String prop, String defVal) {
-    return coreProperties.getProperty(prop, defVal);
-  }
-
-  /**
-   * Returns all substitutable properties defined on this CoreDescriptor
-   * @return all substitutable properties defined on this CoreDescriptor
-   */
-  public Properties getSubstitutableProperties() {
-    return substitutableProperties;
-  }
-
-  @Override
-  public String toString() {
-    return "CoreDescriptor[name=" + this.getName() + ";instanceDir=" + this.getInstanceDir() + "]";
-  }
-
-  public String getConfigSet() {
-    return coreProperties.getProperty(CORE_CONFIGSET);
-  }
-
-  public String getConfigSetPropertiesName() {
-    return coreProperties.getProperty(CORE_CONFIGSET_PROPERTIES);
-  }
-
-  public boolean isConfigSetTrusted() {
-    return trustedConfigSet;
-  }
-
-  public void setConfigSetTrusted(boolean trusted) {
-    this.trustedConfigSet = trusted;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java b/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
deleted file mode 100644
index 76eb5c4..0000000
--- a/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.FileVisitOption;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.Lists;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.util.FileUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Persists CoreDescriptors as properties files
- */
-public class CorePropertiesLocator implements CoresLocator {
-
-  public static final String PROPERTIES_FILENAME = "core.properties";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final Path rootDirectory;
-
-  public CorePropertiesLocator(Path coreDiscoveryRoot) {
-    this.rootDirectory = coreDiscoveryRoot;
-    log.debug("Config-defined core root directory: {}", this.rootDirectory);
-  }
-
-  @Override
-  public void create(CoreContainer cc, CoreDescriptor... coreDescriptors) {
-    for (CoreDescriptor cd : coreDescriptors) {
-      Path propertiesFile = this.rootDirectory.resolve(cd.getInstanceDir()).resolve(PROPERTIES_FILENAME);
-      if (Files.exists(propertiesFile))
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                                "Could not create a new core in " + cd.getInstanceDir()
-                              + " as another core is already defined there");
-      writePropertiesFile(cd, propertiesFile);
-    }
-  }
-
-  // TODO, this isn't atomic!  If we crash in the middle of a rename, we
-  // could end up with two cores with identical names, in which case one of
-  // them won't start up.  Are we happy with this?
-
-  @Override
-  public void persist(CoreContainer cc, CoreDescriptor... coreDescriptors) {
-    for (CoreDescriptor cd : coreDescriptors) {
-      Path propFile = this.rootDirectory.resolve(cd.getInstanceDir()).resolve(PROPERTIES_FILENAME);
-      writePropertiesFile(cd, propFile);
-    }
-  }
-
-  private void writePropertiesFile(CoreDescriptor cd, Path propfile)  {
-    Properties p = buildCoreProperties(cd);
-    try {
-      FileUtils.createDirectories(propfile.getParent()); // Handling for symlinks.
-      try (Writer os = new OutputStreamWriter(Files.newOutputStream(propfile), StandardCharsets.UTF_8)) {
-        p.store(os, "Written by CorePropertiesLocator");
-      }
-    }
-    catch (IOException e) {
-      log.error("Couldn't persist core properties to {}: {}", propfile, e.getMessage());
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Couldn't persist core properties to " + propfile.toAbsolutePath().toString() + " : " + e.getMessage());
-    }
-  }
-
-  @Override
-  public void delete(CoreContainer cc, CoreDescriptor... coreDescriptors) {
-    if (coreDescriptors == null) {
-      return;
-    }
-    for (CoreDescriptor cd : coreDescriptors) {
-      if (cd == null) continue;
-      Path propfile = this.rootDirectory.resolve(cd.getInstanceDir()).resolve(PROPERTIES_FILENAME);
-      try {
-        Files.deleteIfExists(propfile);
-      } catch (IOException e) {
-        log.warn("Couldn't delete core properties file {}: {}", propfile, e.getMessage());
-      }
-    }
-  }
-
-  @Override
-  public void rename(CoreContainer cc, CoreDescriptor oldCD, CoreDescriptor newCD) {
-    String oldName = newCD.getPersistableStandardProperties().getProperty(CoreDescriptor.CORE_NAME);
-    String newName = newCD.coreProperties.getProperty(CoreDescriptor.CORE_NAME);
-    if (oldName == null ||
-        (newName != null && oldName.equals(newName) == false)) {
-      newCD.getPersistableStandardProperties().put(CoreDescriptor.CORE_NAME, newName);
-    }
-    persist(cc, newCD);
-  }
-
-  @Override
-  public void swap(CoreContainer cc, CoreDescriptor cd1, CoreDescriptor cd2) {
-    persist(cc, cd1, cd2);
-  }
-
-  @Override
-  public List<CoreDescriptor> discover(final CoreContainer cc) {
-    log.debug("Looking for core definitions underneath {}", rootDirectory);
-    final List<CoreDescriptor> cds = Lists.newArrayList();
-    try {
-      Set<FileVisitOption> options = new HashSet<>();
-      options.add(FileVisitOption.FOLLOW_LINKS);
-      final int maxDepth = 256;
-      Files.walkFileTree(this.rootDirectory, options, maxDepth, new SimpleFileVisitor<Path>() {
-        @Override
-        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
-          if (file.getFileName().toString().equals(PROPERTIES_FILENAME)) {
-            CoreDescriptor cd = buildCoreDescriptor(file, cc);
-            if (cd != null) {
-              log.debug("Found core {} in {}", cd.getName(), cd.getInstanceDir());
-              cds.add(cd);
-            }
-            return FileVisitResult.SKIP_SIBLINGS;
-          }
-          return FileVisitResult.CONTINUE;
-        }
-
-        @Override
-        public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
-          // if we get an error on the root, then fail the whole thing
-          // otherwise, log a warning and continue to try and load other cores
-          if (file.equals(rootDirectory)) {
-            log.error("Error reading core root directory {}: {}", file, exc);
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error reading core root directory");
-          }
-          log.warn("Error visiting {}: {}", file, exc);
-          return FileVisitResult.CONTINUE;
-        }
-      });
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Couldn't walk file tree under " + this.rootDirectory, e);
-    }
-    log.info("Found {} core definitions underneath {}", cds.size(), rootDirectory);
-    if (cds.size() > 0) {
-      log.info("Cores are: {}", cds.stream().map(CoreDescriptor::getName).collect(Collectors.toList()));
-    }
-    return cds;
-  }
-
-  protected CoreDescriptor buildCoreDescriptor(Path propertiesFile, CoreContainer cc) {
-
-    Path instanceDir = propertiesFile.getParent();
-    Properties coreProperties = new Properties();
-    try (InputStream fis = Files.newInputStream(propertiesFile)) {
-      coreProperties.load(new InputStreamReader(fis, StandardCharsets.UTF_8));
-      String name = createName(coreProperties, instanceDir);
-      Map<String, String> propMap = new HashMap<>();
-      for (String key : coreProperties.stringPropertyNames()) {
-        propMap.put(key, coreProperties.getProperty(key));
-      }
-      CoreDescriptor ret = new CoreDescriptor(name, instanceDir, propMap, cc.getContainerProperties(), cc.isZooKeeperAware());
-      ret.loadExtraProperties();
-      return ret;
-    }
-    catch (IOException e) {
-      log.error("Couldn't load core descriptor from {}:{}", propertiesFile, e.toString());
-      return null;
-    }
-
-  }
-
-  protected static String createName(Properties p, Path instanceDir) {
-    return p.getProperty(CoreDescriptor.CORE_NAME, instanceDir.getFileName().toString());
-  }
-
-  protected Properties buildCoreProperties(CoreDescriptor cd) {
-    Properties p = new Properties();
-    p.putAll(cd.getPersistableStandardProperties());
-    p.putAll(cd.getPersistableUserProperties());
-    return p;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CoreSorter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreSorter.java b/solr/core/src/java/org/apache/solr/core/CoreSorter.java
deleted file mode 100644
index cccd84b..0000000
--- a/solr/core/src/java/org/apache/solr/core/CoreSorter.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core;
-
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-
-import static java.util.Collections.emptyList;
-import static java.util.stream.Collectors.toList;
-
-/**
- * This is a utility class that sorts cores in such a way as to minimize other cores
- * waiting for replicas in the current node. This helps in avoiding leaderVote timeouts
- * happening in other nodes of the cluster
- */
-public class CoreSorter {
-  Map<String, CountsForEachShard> shardsVsReplicaCounts = new LinkedHashMap<>();
-  CoreContainer cc;
-  private static final CountsForEachShard zero = new CountsForEachShard(0, 0, 0);
-
-  public final static Comparator<CountsForEachShard> countsComparator = (c1, c2) -> {
-    if (c1 == null) c1 = zero;//just to avoid  NPE
-    if (c2 == null) c2 = zero;
-    if (c1.totalReplicasInDownNodes < c2.totalReplicasInDownNodes) {
-      //Prioritize replicas with least no:of down nodes waiting.
-      //It's better to bring up a node that is a member of a shard
-      //with 0 down nodes than 1 down node because it will make the shard
-      // complete earlier and avoid waiting by the other live nodes
-      if (c1.totalReplicasInLiveNodes > 0) {
-        //means nobody else is waiting for this , so no need to prioritize
-        return -1;
-      }
-    }
-    if (c2.totalReplicasInDownNodes < c1.totalReplicasInDownNodes) {
-      //same is the above, just to take care of the case where c2 has to be prioritized
-      if (c2.totalReplicasInLiveNodes > 0) {
-        //means nobody else is waiting for this , so no need to priotitize
-        return 1;
-      }
-    }
-
-    //Prioritize replicas where most no:of other nodes are waiting for
-    // For example if 1 other replicas are waiting for this replica, then
-    // prioritize that over the replica were zero other nodes are waiting
-    if (c1.totalReplicasInLiveNodes > c2.totalReplicasInLiveNodes) return -1;
-    if (c2.totalReplicasInLiveNodes > c1.totalReplicasInLiveNodes) return 1;
-
-    //If all else is same. prioritize fewer replicas I have because that will complete the
-    //quorum for shard faster. If I have only one replica for a shard I can finish it faster
-    // than a shard with 2 replicas in this node
-    if (c1.myReplicas < c2.myReplicas) return -1;
-    if (c2.myReplicas < c1.myReplicas) return 1;
-    //if everything is same return 0
-    return 0;
-  };
-
-
-  public CoreSorter init(CoreContainer cc) {
-    this.cc = cc;
-    if (cc == null || !cc.isZooKeeperAware()) {
-      return this;
-    }
-    String myNodeName = getNodeName();
-    ClusterState state = cc.getZkController().getClusterState();
-    for (CloudDescriptor cloudDescriptor : getCloudDescriptors()) {
-      String coll = cloudDescriptor.getCollectionName();
-      String sliceName = getShardName(cloudDescriptor);
-      if (shardsVsReplicaCounts.containsKey(sliceName)) continue;
-      CountsForEachShard c = new CountsForEachShard(0, 0, 0);
-      for (Replica replica : getReplicas(state, coll, cloudDescriptor.getShardId())) {
-        if (replica.getNodeName().equals(myNodeName)) {
-          c.myReplicas++;
-        } else {
-          Set<String> liveNodes = state.getLiveNodes();
-          if (liveNodes.contains(replica.getNodeName())) {
-            c.totalReplicasInLiveNodes++;
-          } else {
-            c.totalReplicasInDownNodes++;
-          }
-        }
-      }
-      shardsVsReplicaCounts.put(sliceName, c);
-    }
-
-    return this;
-
-  }
-
-
-  public int compare(CoreDescriptor cd1, CoreDescriptor cd2) {
-    String s1 = getShardName(cd1.getCloudDescriptor());
-    String s2 = getShardName(cd2.getCloudDescriptor());
-    if (s1 == null || s2 == null) return cd1.getName().compareTo(cd2.getName());
-    CountsForEachShard c1 = shardsVsReplicaCounts.get(s1);
-    CountsForEachShard c2 = shardsVsReplicaCounts.get(s2);
-    int result = countsComparator.compare(c1, c2);
-    return result == 0 ? s1.compareTo(s2) : result;
-  }
-
-
-  static class CountsForEachShard {
-    public int totalReplicasInDownNodes = 0, myReplicas = 0, totalReplicasInLiveNodes = 0;
-
-    public CountsForEachShard(int totalReplicasInDownNodes,  int totalReplicasInLiveNodes,int myReplicas) {
-      this.totalReplicasInDownNodes = totalReplicasInDownNodes;
-      this.myReplicas = myReplicas;
-      this.totalReplicasInLiveNodes = totalReplicasInLiveNodes;
-    }
-
-    public boolean equals(Object obj) {
-      if (obj instanceof CountsForEachShard) {
-        CountsForEachShard that = (CountsForEachShard) obj;
-        return that.totalReplicasInDownNodes == totalReplicasInDownNodes && that.myReplicas == myReplicas;
-
-      }
-      return false;
-    }
-
-    @Override
-    public String toString() {
-      return "down : " + totalReplicasInDownNodes + " , up :  " + totalReplicasInLiveNodes + " my : " + myReplicas;
-    }
-
-
-  }
-
-  static String getShardName(CloudDescriptor cd) {
-    return cd == null ?
-        null :
-        cd.getCollectionName()
-            + "_"
-            + cd.getShardId();
-  }
-
-
-  String getNodeName() {
-    return cc.getNodeConfig().getNodeName();
-  }
-
-  /**Return all replicas for a given collection+slice combo
-   */
-  Collection<Replica> getReplicas(ClusterState cs, String coll, String slice) {
-    DocCollection c = cs.getCollectionOrNull(coll);
-    if (c == null) return emptyList();
-    Slice s = c.getSlice(slice);
-    if (s == null) return emptyList();
-    return s.getReplicas();
-  }
-
-
-  /**return cloud descriptors for all cores in this node
-   */
-  Collection<CloudDescriptor> getCloudDescriptors() {
-    return cc.getCores()
-        .stream()
-        .map((core) -> core.getCoreDescriptor().getCloudDescriptor())
-        .collect(toList());
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CoresLocator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoresLocator.java b/solr/core/src/java/org/apache/solr/core/CoresLocator.java
deleted file mode 100644
index 52927f1..0000000
--- a/solr/core/src/java/org/apache/solr/core/CoresLocator.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.util.List;
-
-/**
- * Manage the discovery and persistence of core definitions across Solr restarts
- */
-public interface CoresLocator {
-
-  /**
-   * Make new cores available for discovery
-   * @param cc              the CoreContainer
-   * @param coreDescriptors CoreDescriptors to persist
-   */
-  public void create(CoreContainer cc, CoreDescriptor... coreDescriptors);
-
-  /**
-   * Ensure that the core definitions from the passed in CoreDescriptors
-   * will persist across container restarts.
-   * @param cc              the CoreContainer
-   * @param coreDescriptors CoreDescriptors to persist
-   */
-  public void persist(CoreContainer cc, CoreDescriptor... coreDescriptors);
-
-  /**
-   * Ensure that the core definitions from the passed in CoreDescriptors
-   * are not available for discovery
-   * @param cc              the CoreContainer
-   * @param coreDescriptors CoreDescriptors of the cores to remove
-   */
-  public void delete(CoreContainer cc, CoreDescriptor... coreDescriptors);
-
-  /**
-   * Persist the new name of a renamed core
-   * @param cc    the CoreContainer
-   * @param oldCD the CoreDescriptor of the core before renaming
-   * @param newCD the CoreDescriptor of the core after renaming
-   */
-  public void rename(CoreContainer cc, CoreDescriptor oldCD, CoreDescriptor newCD);
-
-  /**
-   * Swap two core definitions
-   * @param cc  the CoreContainer
-   * @param cd1 the core descriptor of the first core, after swapping
-   * @param cd2 the core descriptor of the second core, after swapping
-   */
-  public void swap(CoreContainer cc, CoreDescriptor cd1, CoreDescriptor cd2);
-
-  /**
-   * Load all the CoreDescriptors from persistence store
-   * @param cc the CoreContainer
-   * @return a list of all CoreDescriptors found
-   */
-  public List<CoreDescriptor> discover(CoreContainer cc);
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/Diagnostics.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/Diagnostics.java b/solr/core/src/java/org/apache/solr/core/Diagnostics.java
deleted file mode 100644
index d7d6178..0000000
--- a/solr/core/src/java/org/apache/solr/core/Diagnostics.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.invoke.MethodHandles;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-
-public class Diagnostics {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public interface Callable {
-    public void call(Object... data);  // data depends on the context
-  }
-
-  public static void call(Callable callable, Object... data) {
-    try {
-      callable.call(data);
-    } catch (Exception e) {
-      log.error("TEST HOOK EXCEPTION", e);
-    }
-  }
-
-  public static void logThreadDumps(String message) {
-    StringBuilder sb = new StringBuilder(32768);
-    if (message == null) message = "============ THREAD DUMP REQUESTED ============";
-    sb.append(message);
-    sb.append("\n");
-    ThreadInfo[] threads = ManagementFactory.getThreadMXBean().dumpAllThreads(true, true);
-    for (ThreadInfo info : threads) {
-      sb.append(info);
-      // sb.append("\n");
-    }
-    log.error(sb.toString());
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
deleted file mode 100644
index fab3300..0000000
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.NoSuchFileException;
-import java.util.Arrays;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FilterDirectory;
-import org.apache.lucene.store.FlushInfo;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.LockFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.core.CachingDirectoryFactory.CloseListener;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provides access to a Directory implementation. You must release every
- * Directory that you get.
- */
-public abstract class DirectoryFactory implements NamedListInitializedPlugin,
-    Closeable {
-
-  // Estimate 10M docs, 100GB size, to avoid caching by NRTCachingDirectory
-  // Stayed away from upper bounds of the int/long in case any other code tried to aggregate these numbers.
-  // A large estimate should currently have no other side effects.
-  public static final IOContext IOCONTEXT_NO_CACHE = new IOContext(new FlushInfo(10*1000*1000, 100L*1000*1000*1000));
-
-  protected static final String INDEX_W_TIMESTAMP_REGEX = "index\\.[0-9]{17}"; // see SnapShooter.DATE_FMT
-
-  // May be set by sub classes as data root, in which case getDataHome will use it as base
-  protected Path dataHomePath;
-
-  // hint about what the directory contains - default is index directory
-  public enum DirContext {DEFAULT, META_DATA}
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  // Available lock types
-  public final static String LOCK_TYPE_SIMPLE = "simple";
-  public final static String LOCK_TYPE_NATIVE = "native";
-  public final static String LOCK_TYPE_SINGLE = "single";
-  public final static String LOCK_TYPE_NONE   = "none";
-  public final static String LOCK_TYPE_HDFS   = "hdfs";
-
-  protected volatile CoreContainer coreContainer;
-  
-  /**
-   * Indicates a Directory will no longer be used, and when its ref count
-   * hits 0, it can be closed. On close all directories will be closed
-   * whether this has been called or not. This is simply to allow early cleanup.
-   * 
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract void doneWithDirectory(Directory directory) throws IOException;
-  
-  /**
-   * Adds a close listener for a Directory.
-   */
-  public abstract void addCloseListener(Directory dir, CloseListener closeListener);
-  
-  /**
-   * Close the this and all of the Directories it contains.
-   * 
-   * @throws IOException If there is a low-level I/O error.
-   */
-  @Override
-  public abstract void close() throws IOException;
-  
-  /**
-   * Creates a new Directory for a given path.
-   * 
-   * @throws IOException If there is a low-level I/O error.
-   */
-  protected abstract Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException;
-  
-  /**
-   * Creates a new LockFactory for a given path.
-   * @param rawLockType A string value as passed in config. Every factory should at least support 'none' to disable locking.
-   * @throws IOException If there is a low-level I/O error.
-   */
-  protected abstract LockFactory createLockFactory(String rawLockType) throws IOException;
-  
-  /**
-   * Returns true if a Directory exists for a given path in the underlying (stable) storage <em>and</em> 
-   * contains at least one file.  
-   * Note that the existence of a {@link Directory} <em>Object</em> as returned by a previous call to the 
-   * {@link #get} method (on the specified <code>path</code>) is not enough to cause this method to return 
-   * true.  Some prior user of that Directory must have written &amp; synced at least one file to that 
-   * Directory (and at least one file must still exist)
-   *
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract boolean exists(String path) throws IOException;
-  
-  /**
-   * Removes the Directory's persistent storage.
-   * For example: A file system impl may remove the
-   * on disk directory.
-   * @throws IOException If there is a low-level I/O error.
-   * 
-   */
-  public abstract void remove(Directory dir) throws IOException;
-  
-  /**
-   * Removes the Directory's persistent storage.
-   * For example: A file system impl may remove the
-   * on disk directory.
-   * @throws IOException If there is a low-level I/O error.
-   * 
-   */
-  public abstract void remove(Directory dir, boolean afterCoreClose) throws IOException;
-  
-  /**
-   * This remove is special in that it may be called even after
-   * the factory has been closed. Remove only makes sense for
-   * persistent directory factories.
-   * 
-   * @param path to remove
-   * @param afterCoreClose whether to wait until after the core is closed.
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract void remove(String path, boolean afterCoreClose) throws IOException;
-  
-  /**
-   * This remove is special in that it may be called even after
-   * the factory has been closed. Remove only makes sense for
-   * persistent directory factories.
-   * 
-   * @param path to remove
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract void remove(String path) throws IOException;
-  
-  /**
-   * @param directory to calculate size of
-   * @return size in bytes
-   * @throws IOException on low level IO error
-   */
-  public long size(Directory directory) throws IOException {
-    return sizeOfDirectory(directory);
-  }
-  
-  /**
-   * @param path to calculate size of
-   * @return size in bytes
-   * @throws IOException on low level IO error
-   */
-  public long size(String path) throws IOException {
-    Directory dir = get(path, DirContext.DEFAULT, null);
-    long size;
-    try {
-      size = sizeOfDirectory(dir);
-    } finally {
-      release(dir); 
-    }
-    return size;
-  }
-  
-  /**
-   * Override for more efficient moves.
-   * 
-   * Intended for use with replication - use
-   * carefully - some Directory wrappers will
-   * cache files for example.
-   * 
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public void move(Directory fromDir, Directory toDir, String fileName, IOContext ioContext) throws IOException {
-    toDir.copyFrom(fromDir, fileName, fileName, ioContext);
-    fromDir.deleteFile(fileName);
-  }
-  
-  // sub classes perform an atomic rename if possible, otherwise fall back to delete + rename
-  // this is important to support for index roll over durability after crashes
-  public void renameWithOverwrite(Directory dir, String fileName, String toName) throws IOException {
-    try {
-      dir.deleteFile(toName);
-    } catch (FileNotFoundException | NoSuchFileException e) {
-
-    } catch (Exception e) {
-      log.error("Exception deleting file", e);
-    }
-
-    dir.rename(fileName, toName);
-  }
-  
-  /**
-   * Returns the Directory for a given path, using the specified rawLockType.
-   * Will return the same Directory instance for the same path.
-   * 
-   * 
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract Directory get(String path, DirContext dirContext, String rawLockType)
-      throws IOException;
-  
-  /**
-   * Increment the number of references to the given Directory. You must call
-   * release for every call to this method.
-   * 
-   */
-  public abstract void incRef(Directory directory);
-  
-  
-  /**
-   * @return true if data is kept after close.
-   */
-  public abstract boolean isPersistent();
-  
-  /**
-   * @return true if storage is shared.
-   */
-  public boolean isSharedStorage() {
-    return false;
-  }
-  
-  /**
-   * Releases the Directory so that it may be closed when it is no longer
-   * referenced.
-   * 
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract void release(Directory directory) throws IOException;
-  
-  /**
-   * Normalize a given path.
-   * 
-   * @param path to normalize
-   * @return normalized path
-   * @throws IOException on io error
-   */
-  public String normalize(String path) throws IOException {
-    return path;
-  }
-  
-  /**
-   * @param path the path to check
-   * @return true if absolute, as in not relative
-   */
-  public boolean isAbsolute(String path) {
-    // back compat
-    return new File(path).isAbsolute();
-  }
-  
-  public static long sizeOfDirectory(Directory directory) throws IOException {
-    final String[] files = directory.listAll();
-    long size = 0;
-    
-    for (final String file : files) {
-      size += sizeOf(directory, file);
-      if (size < 0) {
-        break;
-      }
-    }
-    
-    return size;
-  }
-  
-  public static long sizeOf(Directory directory, String file) throws IOException {
-    try {
-      return directory.fileLength(file);
-    } catch (IOException e) {
-      // could be a race, file no longer exists, access denied, is a directory, etc.
-      return 0;
-    }
-  }
-  
-  /**
-   * Delete the files in the Directory
-   */
-  public static boolean empty(Directory dir) {
-    boolean isSuccess = true;
-    String contents[];
-    try {
-      contents = dir.listAll();
-      if (contents != null) {
-        for (String file : contents) {
-          dir.deleteFile(file);
-        }
-      }
-    } catch (IOException e) {
-      SolrException.log(log, "Error deleting files from Directory", e);
-      isSuccess = false;
-    }
-    return isSuccess;
-  }
-
-  /**
-   * If your implementation can count on delete-on-last-close semantics
-   * or throws an exception when trying to remove a file in use, return
-   * false (eg NFS). Otherwise, return true. Defaults to returning false.
-   * 
-   * @return true if factory impl requires that Searcher's explicitly
-   * reserve commit points.
-   */
-  public boolean searchersReserveCommitPoints() {
-    return false;
-  }
-
-  /**
-   * Get the data home folder. If solr.data.home is set, that is used, else base on instanceDir
-   * @param cd core descriptor instance
-   * @return a String with absolute path to data direcotry
-   */
-  public String getDataHome(CoreDescriptor cd) throws IOException {
-    String dataDir;
-    if (dataHomePath != null) {
-      String instanceDirLastPath = cd.getInstanceDir().getName(cd.getInstanceDir().getNameCount()-1).toString();
-      dataDir = Paths.get(coreContainer.getSolrHome()).resolve(dataHomePath)
-          .resolve(instanceDirLastPath).resolve(cd.getDataDir()).toAbsolutePath().toString();
-    } else {
-      // by default, we go off the instance directory
-      dataDir = cd.getInstanceDir().resolve(cd.getDataDir()).toAbsolutePath().toString();
-    }
-    return dataDir;
-  }
-
-  public void cleanupOldIndexDirectories(final String dataDirPath, final String currentIndexDirPath, boolean afterCoreReload) {
-    File dataDir = new File(dataDirPath);
-    if (!dataDir.isDirectory()) {
-      log.debug("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDirPath);
-      return;
-    }
-
-    final File currentIndexDir = new File(currentIndexDirPath);
-    File[] oldIndexDirs = dataDir.listFiles(new FileFilter() {
-      @Override
-      public boolean accept(File file) {
-        String fileName = file.getName();
-        return file.isDirectory() &&
-               !file.equals(currentIndexDir) &&
-               (fileName.equals("index") || fileName.matches(INDEX_W_TIMESTAMP_REGEX));
-      }
-    });
-
-    if (oldIndexDirs == null || oldIndexDirs.length == 0)
-      return; // nothing to do (no log message needed)
-
-    List<File> dirsList = Arrays.asList(oldIndexDirs);
-    Collections.sort(dirsList, Collections.reverseOrder());
-    
-    int i = 0;
-    if (afterCoreReload) {
-      log.info("Will not remove most recent old directory after reload {}", oldIndexDirs[0]);
-      i = 1;
-    }
-    log.info("Found {} old index directories to clean-up under {} afterReload={}", oldIndexDirs.length - i, dataDirPath, afterCoreReload);
-    for (; i < dirsList.size(); i++) {
-      File dir = dirsList.get(i);
-      String dirToRmPath = dir.getAbsolutePath();
-      try {
-        if (deleteOldIndexDirectory(dirToRmPath)) {
-          log.info("Deleted old index directory: {}", dirToRmPath);
-        } else {
-          log.warn("Delete old index directory {} failed.", dirToRmPath);
-        }
-      } catch (IOException ioExc) {
-        log.error("Failed to delete old directory {} due to: {}", dir.getAbsolutePath(), ioExc.toString());
-      }
-    }
-  }
-
-  // Extension point to allow sub-classes to infuse additional code when deleting old index directories
-  protected boolean deleteOldIndexDirectory(String oldDirPath) throws IOException {
-    File dirToRm = new File(oldDirPath);
-    FileUtils.deleteDirectory(dirToRm);
-    return !dirToRm.isDirectory();
-  }
-  
-  public void initCoreContainer(CoreContainer cc) {
-    this.coreContainer = cc;
-    if (cc != null && cc.getConfig() != null) {
-      this.dataHomePath = cc.getConfig().getSolrDataHome();
-    }
-  }
-  
-  // special hack to work with FilterDirectory
-  protected Directory getBaseDir(Directory dir) {
-    Directory baseDir = dir;
-    while (baseDir instanceof FilterDirectory) {
-      baseDir = ((FilterDirectory)baseDir).getDelegate();
-    } 
-    
-    return baseDir;
-  }
-
-  /**
-   * Create a new DirectoryFactory instance from the given SolrConfig and tied to the specified core container.
-   */
-  static DirectoryFactory loadDirectoryFactory(SolrConfig config, CoreContainer cc, String registryName) {
-    final PluginInfo info = config.getPluginInfo(DirectoryFactory.class.getName());
-    final DirectoryFactory dirFactory;
-    if (info != null) {
-      log.debug(info.className);
-      dirFactory = config.getResourceLoader().newInstance(info.className, DirectoryFactory.class);
-      // allow DirectoryFactory instances to access the CoreContainer
-      dirFactory.initCoreContainer(cc);
-      dirFactory.init(info.initArgs);
-    } else {
-      log.debug("solr.NRTCachingDirectoryFactory");
-      dirFactory = new NRTCachingDirectoryFactory();
-      dirFactory.initCoreContainer(cc);
-    }
-    return dirFactory;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java
deleted file mode 100644
index c7708ea..0000000
--- a/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-
-import org.apache.lucene.store.Directory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Directory provider for implementations that do not persist over reboots.
- * 
- */
-public abstract class EphemeralDirectoryFactory extends CachingDirectoryFactory {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  @Override
-  public boolean exists(String path) throws IOException {
-    String fullPath = normalize(path);
-    synchronized (this) {
-      final CacheValue cacheValue = byPathCache.get(fullPath);
-      if (null == cacheValue) {
-        return false;
-      }
-      final Directory directory = cacheValue.directory;
-      if (null == directory) {
-        return false;
-      }
-      if (0 < directory.listAll().length) {
-        return true;
-      } 
-      return false;
-    }
-  }
-  
-  public boolean isPersistent() {
-    return false;
-  }
-  
-  @Override
-  public boolean isAbsolute(String path) {
-    return true;
-  }
-  
-  
-  @Override
-  public void remove(Directory dir) throws IOException {
-    // ram dir does not persist its dir anywhere
-  }
-  
-  @Override
-  public void remove(String path) throws IOException {
-    // ram dir does not persist its dir anywhere
-  }
-  
-  public void cleanupOldIndexDirectories(final String dataDirPath, final String currentIndexDirPath, boolean reload) {
-    // currently a no-op
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
deleted file mode 100644
index 13e1de1..0000000
--- a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URLEncoder;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.NRTCachingDirectory;
-import org.apache.lucene.store.NoLockFactory;
-import org.apache.lucene.store.SingleInstanceLockFactory;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.store.blockcache.BlockCache;
-import org.apache.solr.store.blockcache.BlockDirectory;
-import org.apache.solr.store.blockcache.BlockDirectoryCache;
-import org.apache.solr.store.blockcache.BufferStore;
-import org.apache.solr.store.blockcache.Cache;
-import org.apache.solr.store.blockcache.Metrics;
-import org.apache.solr.store.hdfs.HdfsDirectory;
-import org.apache.solr.store.hdfs.HdfsLocalityReporter;
-import org.apache.solr.store.hdfs.HdfsLockFactory;
-import org.apache.solr.util.HdfsUtil;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-
-public class HdfsDirectoryFactory extends CachingDirectoryFactory implements SolrCoreAware, SolrMetricProducer {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  public static final String BLOCKCACHE_SLAB_COUNT = "solr.hdfs.blockcache.slab.count";
-  public static final String BLOCKCACHE_DIRECT_MEMORY_ALLOCATION = "solr.hdfs.blockcache.direct.memory.allocation";
-  public static final String BLOCKCACHE_ENABLED = "solr.hdfs.blockcache.enabled";
-  public static final String BLOCKCACHE_GLOBAL = "solr.hdfs.blockcache.global";
-  public static final String BLOCKCACHE_READ_ENABLED = "solr.hdfs.blockcache.read.enabled";
-  public static final String BLOCKCACHE_WRITE_ENABLED = "solr.hdfs.blockcache.write.enabled"; // currently buggy and disabled
-  
-  public static final String NRTCACHINGDIRECTORY_ENABLE = "solr.hdfs.nrtcachingdirectory.enable";
-  public static final String NRTCACHINGDIRECTORY_MAXMERGESIZEMB = "solr.hdfs.nrtcachingdirectory.maxmergesizemb";
-  public static final String NRTCACHINGDIRECTORY_MAXCACHEMB = "solr.hdfs.nrtcachingdirectory.maxcachedmb";
-  public static final String NUMBEROFBLOCKSPERBANK = "solr.hdfs.blockcache.blocksperbank";
-
-  public static final String LOCALITYMETRICS_ENABLED = "solr.hdfs.locality.metrics.enabled";
-
-  public static final String KERBEROS_ENABLED = "solr.hdfs.security.kerberos.enabled";
-  public static final String KERBEROS_KEYTAB = "solr.hdfs.security.kerberos.keytabfile";
-  public static final String KERBEROS_PRINCIPAL = "solr.hdfs.security.kerberos.principal";
-  
-  public static final String HDFS_HOME = "solr.hdfs.home";
-  
-  public static final String CONFIG_DIRECTORY = "solr.hdfs.confdir";
-  
-  public static final String CACHE_MERGES = "solr.hdfs.blockcache.cachemerges";
-  public static final String CACHE_READONCE = "solr.hdfs.blockcache.cachereadonce";
-  
-  private SolrParams params;
-  
-  private String hdfsDataDir;
-  
-  private String confDir;
-
-  private boolean cacheReadOnce;
-
-  private boolean cacheMerges;
-
-  private static BlockCache globalBlockCache;
-  
-  public static Metrics metrics;
-  private static Boolean kerberosInit;
-
-  // we use this cache for FileSystem instances when we don't have access to a long lived instance
-  private com.google.common.cache.Cache<String,FileSystem> tmpFsCache = CacheBuilder.newBuilder()
-      .concurrencyLevel(10)
-      .maximumSize(1000)
-      .expireAfterAccess(5, TimeUnit.MINUTES).removalListener(new RemovalListener<String,FileSystem>() {
-        @Override
-        public void onRemoval(RemovalNotification<String,FileSystem> rn) {
-          IOUtils.closeQuietly(rn.getValue());
-        }
-      })
-      .build();
-
-  private final static class MetricsHolder {
-    // [JCIP SE, Goetz, 16.6] Lazy initialization
-    // Won't load until MetricsHolder is referenced
-    public static final Metrics metrics = new Metrics();
-  }
-  
-  @Override
-  public void close() throws IOException {
-    super.close();
-    Collection<FileSystem> values = tmpFsCache.asMap().values();
-    for (FileSystem fs : values) {
-      IOUtils.closeQuietly(fs);
-    }
-    tmpFsCache.invalidateAll();
-    tmpFsCache.cleanUp();
-  }
-
-  private final static class LocalityHolder {
-    public static final HdfsLocalityReporter reporter = new HdfsLocalityReporter();
-  }
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    params = args.toSolrParams();
-    this.hdfsDataDir = getConfig(HDFS_HOME, null);
-    if (this.hdfsDataDir != null && this.hdfsDataDir.length() == 0) {
-      this.hdfsDataDir = null;
-    } else {
-      log.info(HDFS_HOME + "=" + this.hdfsDataDir);
-    }
-    cacheMerges = getConfig(CACHE_MERGES, false);
-    cacheReadOnce = getConfig(CACHE_READONCE, false);
-    boolean kerberosEnabled = getConfig(KERBEROS_ENABLED, false);
-    log.info("Solr Kerberos Authentication "
-        + (kerberosEnabled ? "enabled" : "disabled"));
-    if (kerberosEnabled) {
-      initKerberos();
-    }
-  }
-  
-  @Override
-  protected LockFactory createLockFactory(String rawLockType) throws IOException {
-    if (null == rawLockType) {
-      rawLockType = DirectoryFactory.LOCK_TYPE_HDFS;
-      log.warn("No lockType configured, assuming '"+rawLockType+"'.");
-    }
-    final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
-    switch (lockType) {
-      case DirectoryFactory.LOCK_TYPE_HDFS:
-        return HdfsLockFactory.INSTANCE;
-      case DirectoryFactory.LOCK_TYPE_SINGLE:
-        return new SingleInstanceLockFactory();
-      case DirectoryFactory.LOCK_TYPE_NONE:
-        return NoLockFactory.INSTANCE;
-      default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Unrecognized lockType: " + rawLockType);
-    }
-  }
-
-  @Override
-  @SuppressWarnings("resource")
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    assert params != null : "init must be called before create";
-    log.info("creating directory factory for path {}", path);
-    Configuration conf = getConf();
-    
-    if (metrics == null) {
-      metrics = MetricsHolder.metrics;
-    }
-    
-    boolean blockCacheEnabled = getConfig(BLOCKCACHE_ENABLED, true);
-    boolean blockCacheGlobal = getConfig(BLOCKCACHE_GLOBAL, true);
-    boolean blockCacheReadEnabled = getConfig(BLOCKCACHE_READ_ENABLED, true);
-    
-    final HdfsDirectory hdfsDir;
-
-    final Directory dir;
-    if (blockCacheEnabled && dirContext != DirContext.META_DATA) {
-      int numberOfBlocksPerBank = getConfig(NUMBEROFBLOCKSPERBANK, 16384);
-      
-      int blockSize = BlockDirectory.BLOCK_SIZE;
-      
-      int bankCount = getConfig(BLOCKCACHE_SLAB_COUNT, 1);
-      
-      boolean directAllocation = getConfig(BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true);
-      
-      int slabSize = numberOfBlocksPerBank * blockSize;
-      log.info(
-          "Number of slabs of block cache [{}] with direct memory allocation set to [{}]",
-          bankCount, directAllocation);
-      log.info(
-          "Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes",
-          new Object[] {slabSize, bankCount,
-              ((long) bankCount * (long) slabSize)});
-      
-      int bsBufferSize = params.getInt("solr.hdfs.blockcache.bufferstore.buffersize", blockSize);
-      int bsBufferCount = params.getInt("solr.hdfs.blockcache.bufferstore.buffercount", 0); // this is actually total size
-      
-      BlockCache blockCache = getBlockDirectoryCache(numberOfBlocksPerBank,
-          blockSize, bankCount, directAllocation, slabSize,
-          bsBufferSize, bsBufferCount, blockCacheGlobal);
-      
-      Cache cache = new BlockDirectoryCache(blockCache, path, metrics, blockCacheGlobal);
-      int readBufferSize = params.getInt("solr.hdfs.blockcache.read.buffersize", blockSize);
-      hdfsDir = new HdfsDirectory(new Path(path), lockFactory, conf, readBufferSize);
-      dir = new BlockDirectory(path, hdfsDir, cache, null, blockCacheReadEnabled, false, cacheMerges, cacheReadOnce);
-    } else {
-      hdfsDir = new HdfsDirectory(new Path(path), conf);
-      dir = hdfsDir;
-    }
-    if (params.getBool(LOCALITYMETRICS_ENABLED, false)) {
-      LocalityHolder.reporter.registerDirectory(hdfsDir);
-    }
-
-    boolean nrtCachingDirectory = getConfig(NRTCACHINGDIRECTORY_ENABLE, true);
-    if (nrtCachingDirectory) {
-      double nrtCacheMaxMergeSizeMB = getConfig(NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 16);
-      double nrtCacheMaxCacheMB = getConfig(NRTCACHINGDIRECTORY_MAXCACHEMB, 192);
-      
-      return new NRTCachingDirectory(dir, nrtCacheMaxMergeSizeMB, nrtCacheMaxCacheMB);
-    }
-    return dir;
-  }
-
-  boolean getConfig(String name, boolean defaultValue) {
-    Boolean value = params.getBool(name);
-    if (value == null) {
-      String sysValue = System.getProperty(name);
-      if (sysValue != null) {
-        value = Boolean.valueOf(sysValue);
-      }
-    }
-    return value == null ? defaultValue : value;
-  }
-  
-  int getConfig(String name, int defaultValue) {
-    Integer value = params.getInt(name);
-    if (value == null) {
-      String sysValue = System.getProperty(name);
-      if (sysValue != null) {
-        value = Integer.parseInt(sysValue);
-      }
-    }
-    return value == null ? defaultValue : value;
-  }
-
-  String getConfig(String name, String defaultValue) {
-    String value = params.get(name);
-    if (value == null) {
-      value = System.getProperty(name);
-    }
-    return value == null ? defaultValue : value;
-  }
-  
-  private BlockCache getBlockDirectoryCache(int numberOfBlocksPerBank, int blockSize, int bankCount,
-      boolean directAllocation, int slabSize, int bufferSize, int bufferCount, boolean staticBlockCache) {
-    if (!staticBlockCache) {
-      log.info("Creating new single instance HDFS BlockCache");
-      return createBlockCache(numberOfBlocksPerBank, blockSize, bankCount, directAllocation, slabSize, bufferSize, bufferCount);
-    }
-    synchronized (HdfsDirectoryFactory.class) {
-      
-      if (globalBlockCache == null) {
-        log.info("Creating new global HDFS BlockCache");
-        globalBlockCache = createBlockCache(numberOfBlocksPerBank, blockSize, bankCount,
-            directAllocation, slabSize, bufferSize, bufferCount);
-      }
-    }
-    return globalBlockCache;
-  }
-
-  private BlockCache createBlockCache(int numberOfBlocksPerBank, int blockSize,
-      int bankCount, boolean directAllocation, int slabSize, int bufferSize,
-      int bufferCount) {
-    BufferStore.initNewBuffer(bufferSize, bufferCount, metrics);
-    long totalMemory = (long) bankCount * (long) numberOfBlocksPerBank
-        * (long) blockSize;
-    
-    BlockCache blockCache;
-    try {
-      blockCache = new BlockCache(metrics, directAllocation, totalMemory, slabSize, blockSize);
-    } catch (OutOfMemoryError e) {
-      throw new RuntimeException(
-          "The max direct memory is likely too low.  Either increase it (by adding -XX:MaxDirectMemorySize=<size>g -XX:+UseLargePages to your containers startup args)"
-              + " or disable direct allocation using solr.hdfs.blockcache.direct.memory.allocation=false in solrconfig.xml. If you are putting the block cache on the heap,"
-              + " your java heap size might not be large enough."
-              + " Failed allocating ~" + totalMemory / 1000000.0 + " MB.",
-          e);
-    }
-    return blockCache;
-  }
-  
-  @Override
-  public boolean exists(String path) {
-    final Path hdfsDirPath = new Path(path);
-    FileSystem fileSystem = getCachedFileSystem(path);
-
-    try {
-      return fileSystem.exists(hdfsDirPath);
-    } catch (IOException e) {
-      log.error("Error checking if hdfs path exists", e);
-      throw new RuntimeException("Error checking if hdfs path exists", e);
-    }
-  }
-  
-  public Configuration getConf() {
-    Configuration conf = new Configuration();
-    confDir = getConfig(CONFIG_DIRECTORY, null);
-    HdfsUtil.addHdfsResources(conf, confDir);
-    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
-    return conf;
-  }
-  
-  protected synchronized void removeDirectory(final CacheValue cacheValue)
-      throws IOException {
-    FileSystem fileSystem = getCachedFileSystem(cacheValue.path);
-
-    try {
-      boolean success = fileSystem.delete(new Path(cacheValue.path), true);
-      if (!success) {
-        throw new RuntimeException("Could not remove directory");
-      }
-    } catch (Exception e) {
-      log.error("Could not remove directory", e);
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          "Could not remove directory", e);
-    }
-  }
-  
-  @Override
-  public boolean isAbsolute(String path) {
-    return path.startsWith("hdfs:/");
-  }
-  
-  @Override
-  public boolean isPersistent() {
-    return true;
-  }
-  
-  @Override
-  public boolean isSharedStorage() {
-    return true;
-  }
-  
-  @Override
-  public boolean searchersReserveCommitPoints() {
-    return true;
-  }
-  
-  @Override
-  public String getDataHome(CoreDescriptor cd) throws IOException {
-    if (hdfsDataDir == null) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "You must set the "
-          + this.getClass().getSimpleName() + " param " + HDFS_HOME
-          + " for relative dataDir paths to work");
-    }
-    
-    // by default, we go off the instance directory
-    String path;
-    if (cd.getCloudDescriptor() != null) {
-      path = URLEncoder.encode(cd.getCloudDescriptor().getCollectionName(),
-          "UTF-8")
-          + "/"
-          + URLEncoder.encode(cd.getCloudDescriptor().getCoreNodeName(),
-              "UTF-8");
-    } else {
-      path = cd.getName();
-    }
-    
-    return normalize(SolrResourceLoader.normalizeDir(ZkController
-        .trimLeadingAndTrailingSlashes(hdfsDataDir)
-        + "/"
-        + path
-        + "/"
-        + cd.getDataDir()));
-  }
-  
-  /**
-   * @param directory to calculate size of
-   * @return size in bytes
-   * @throws IOException on low level IO error
-   */
-  @Override
-  public long size(Directory directory) throws IOException {
-    String hdfsDirPath = getPath(directory);
-    return size(hdfsDirPath);
-  }
-  
-  /**
-   * @param path to calculate size of
-   * @return size in bytes
-   * @throws IOException on low level IO error
-   */
-  @Override
-  public long size(String path) throws IOException {
-    Path hdfsDirPath = new Path(path);
-    FileSystem fileSystem = getCachedFileSystem(path);
-    try {
-      return fileSystem.getContentSummary(hdfsDirPath).getLength();
-    } catch (IOException e) {
-      log.error("Error checking if hdfs path exists", e);
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error checking if hdfs path exists", e);
-    } finally {
-      IOUtils.closeQuietly(fileSystem);
-    }
-  }
-
-  private FileSystem getCachedFileSystem(String path) {
-    try {
-      // no need to close the fs, the cache will do it
-      return tmpFsCache.get(path, () -> FileSystem.get(new Path(path).toUri(), getConf()));
-    } catch (ExecutionException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  public String getConfDir() {
-    return confDir;
-  }
-  
-  private void initKerberos() {
-    String keytabFile = getConfig(KERBEROS_KEYTAB, "").trim();
-    if (keytabFile.length() == 0) {
-      throw new IllegalArgumentException(KERBEROS_KEYTAB + " required because "
-          + KERBEROS_ENABLED + " set to true");
-    }
-    String principal = getConfig(KERBEROS_PRINCIPAL, "");
-    if (principal.length() == 0) {
-      throw new IllegalArgumentException(KERBEROS_PRINCIPAL
-          + " required because " + KERBEROS_ENABLED + " set to true");
-    }
-    synchronized (HdfsDirectoryFactory.class) {
-      if (kerberosInit == null) {
-        kerberosInit = Boolean.TRUE;
-        final Configuration conf = getConf();
-        final String authVal = conf.get(HADOOP_SECURITY_AUTHENTICATION);
-        final String kerberos = "kerberos";
-        if (authVal != null && !authVal.equals(kerberos)) {
-          throw new IllegalArgumentException(HADOOP_SECURITY_AUTHENTICATION
-              + " set to: " + authVal + ", not kerberos, but attempting to "
-              + " connect to HDFS via kerberos");
-        }
-        // let's avoid modifying the supplied configuration, just to be conservative
-        final Configuration ugiConf = new Configuration(getConf());
-        ugiConf.set(HADOOP_SECURITY_AUTHENTICATION, kerberos);
-        UserGroupInformation.setConfiguration(ugiConf);
-        log.info(
-            "Attempting to acquire kerberos ticket with keytab: {}, principal: {} ",
-            keytabFile, principal);
-        try {
-          UserGroupInformation.loginUserFromKeytab(principal, keytabFile);
-        } catch (IOException ioe) {
-          throw new RuntimeException(ioe);
-        }
-        log.info("Got Kerberos ticket");
-      }
-    }
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registry, String tag, String scope) {
-    MetricsHolder.metrics.initializeMetrics(manager, registry, tag, scope);
-    LocalityHolder.reporter.initializeMetrics(manager, registry, tag, scope);
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    setHost(core.getCoreContainer().getHostName());
-  }
-
-  @VisibleForTesting
-  void setHost(String hostname) {
-    LocalityHolder.reporter.setHost(hostname);
-  }
-
-  @Override
-  public void cleanupOldIndexDirectories(final String dataDir, final String currentIndexDir, boolean afterReload) {
-
-    // Get the FileSystem object
-    final Path dataDirPath = new Path(dataDir);
-    FileSystem fileSystem = getCachedFileSystem(dataDir);
-
-    boolean pathExists = false;
-    try {
-      pathExists = fileSystem.exists(dataDirPath);
-    } catch (IOException e) {
-      log.error("Error checking if hdfs path "+dataDir+" exists", e);
-    }
-    if (!pathExists) {
-      log.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDir);
-      return;
-    }
-
-    final Path currentIndexDirPath = new Path(currentIndexDir); // make sure we don't delete the current
-    final FileSystem fs = fileSystem;
-    FileStatus[] oldIndexDirs = null;
-    try {
-      oldIndexDirs = fileSystem.listStatus(dataDirPath, new PathFilter() {
-        @Override
-        public boolean accept(Path path) {
-          boolean accept = false;
-          String pathName = path.getName();
-          try {
-            accept = fs.isDirectory(path) && !path.equals(currentIndexDirPath) &&
-                (pathName.equals("index") || pathName.matches(INDEX_W_TIMESTAMP_REGEX));
-          } catch (IOException e) {
-            log.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
-          }
-          return accept;
-        }
-      });
-    } catch (FileNotFoundException fnfe) {
-      // already deleted - ignore
-      log.debug("Old index directory already deleted - skipping...", fnfe);
-    } catch (IOException ioExc) {
-      log.error("Error checking for old index directories to clean-up.", ioExc);
-    }
-
-    if (oldIndexDirs == null || oldIndexDirs.length == 0)
-      return; // nothing to clean-up
-
-    List<Path> oldIndexPaths = new ArrayList<>(oldIndexDirs.length);
-    for (FileStatus ofs : oldIndexDirs) {
-      oldIndexPaths.add(ofs.getPath());
-    }
-
-    Collections.sort(oldIndexPaths, Collections.reverseOrder());
-    
-    Set<String> livePaths = getLivePaths();
-    
-    int i = 0;
-    if (afterReload) {
-      log.info("Will not remove most recent old directory on reload {}", oldIndexDirs[0]);
-      i = 1;
-    }
-    log.info("Found {} old index directories to clean-up under {} afterReload={}", oldIndexDirs.length - i, dataDirPath, afterReload);
-    for (; i < oldIndexPaths.size(); i++) {
-      Path oldDirPath = oldIndexPaths.get(i);
-      if (livePaths.contains(oldDirPath.toString())) {
-        log.warn("Cannot delete directory {} because it is still being referenced in the cache.", oldDirPath);
-      } else {
-        try {
-          if (fileSystem.delete(oldDirPath, true)) {
-            log.info("Deleted old index directory {}", oldDirPath);
-          } else {
-            log.warn("Failed to delete old index directory {}", oldDirPath);
-          }
-        } catch (IOException e) {
-          log.error("Failed to delete old index directory {} due to: {}", oldDirPath, e);
-        }
-      }
-    }
-  }
-  
-  // perform an atomic rename if possible
-  public void renameWithOverwrite(Directory dir, String fileName, String toName) throws IOException {
-    String hdfsDirPath = getPath(dir);
-    FileContext fileContext = FileContext.getFileContext(getConf());
-    fileContext.rename(new Path(hdfsDirPath + "/" + fileName), new Path(hdfsDirPath + "/" + toName), Options.Rename.OVERWRITE);
-  }
-  
-  @Override
-  public void move(Directory fromDir, Directory toDir, String fileName, IOContext ioContext) throws IOException {
-    
-    Directory baseFromDir = getBaseDir(fromDir);
-    Directory baseToDir = getBaseDir(toDir);
-    
-    if (baseFromDir instanceof HdfsDirectory && baseToDir instanceof HdfsDirectory) {
-      Path dir1 = ((HdfsDirectory) baseFromDir).getHdfsDirPath();
-      Path dir2 = ((HdfsDirectory) baseToDir).getHdfsDirPath();
-      Path file1 = new Path(dir1, fileName);
-      Path file2 = new Path(dir2, fileName);
-      FileContext fileContext = FileContext.getFileContext(getConf());
-      fileContext.rename(file1, file2);
-      return;
-    }
-
-    super.move(fromDir, toDir, fileName, ioContext);
-  }
-}


[36/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/OverseerTriggerThread.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/OverseerTriggerThread.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/OverseerTriggerThread.java
deleted file mode 100644
index 052b4c4..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/OverseerTriggerThread.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.ConnectException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrCloseable;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CloudConfig;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
-
-/**
- * Overseer thread responsible for reading triggers from zookeeper and
- * adding/removing them from {@link ScheduledTriggers}
- */
-public class OverseerTriggerThread implements Runnable, SolrCloseable {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final SolrCloudManager cloudManager;
-
-  private final CloudConfig cloudConfig;
-
-  private final ScheduledTriggers scheduledTriggers;
-
-  private final AutoScaling.TriggerFactory triggerFactory;
-
-  private final ReentrantLock updateLock = new ReentrantLock();
-
-  private final Condition updated = updateLock.newCondition();
-
-  /*
-  Following variables are only accessed or modified when updateLock is held
-   */
-  private int znodeVersion = -1;
-
-  private Map<String, AutoScaling.Trigger> activeTriggers = new HashMap<>();
-
-  private volatile boolean isClosed = false;
-
-  private AutoScalingConfig autoScalingConfig;
-
-  public OverseerTriggerThread(SolrResourceLoader loader, SolrCloudManager cloudManager, CloudConfig cloudConfig) {
-    this.cloudManager = cloudManager;
-    this.cloudConfig = cloudConfig;
-    scheduledTriggers = new ScheduledTriggers(loader, cloudManager);
-    triggerFactory = new AutoScaling.TriggerFactoryImpl(loader, cloudManager);
-  }
-
-  @Override
-  public void close() throws IOException {
-    updateLock.lock();
-    try {
-      isClosed = true;
-      activeTriggers.clear();
-      updated.signalAll();
-    } finally {
-      updateLock.unlock();
-    }
-    IOUtils.closeQuietly(triggerFactory);
-    IOUtils.closeQuietly(scheduledTriggers);
-    log.debug("OverseerTriggerThread has been closed explicitly");
-  }
-
-  /**
-   * For tests.
-   * @lucene.internal
-   * @return current {@link ScheduledTriggers} instance
-   */
-  public ScheduledTriggers getScheduledTriggers() {
-    return scheduledTriggers;
-  }
-
-  @Override
-  public boolean isClosed() {
-    return isClosed;
-  }
-
-  @Override
-  public void run() {
-    int lastZnodeVersion = znodeVersion;
-
-    // we automatically add a trigger for auto add replicas if it does not exists already
-    // we also automatically add a scheduled maintenance trigger
-    while (!isClosed)  {
-      try {
-        if (Thread.currentThread().isInterrupted()) {
-          log.warn("Interrupted");
-          break;
-        }
-        AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-        AutoScalingConfig updatedConfig = withAutoAddReplicasTrigger(autoScalingConfig);
-        updatedConfig = withScheduledMaintenanceTrigger(updatedConfig);
-        if (updatedConfig.equals(autoScalingConfig)) break;
-        log.debug("Adding .auto_add_replicas and .scheduled_maintenance triggers");
-        cloudManager.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(updatedConfig), updatedConfig.getZkVersion());
-        break;
-      } catch (BadVersionException bve) {
-        // somebody else has changed the configuration so we must retry
-      } catch (InterruptedException e) {
-        // Restore the interrupted status
-        Thread.currentThread().interrupt();
-        log.warn("Interrupted", e);
-        break;
-      }
-      catch (IOException | KeeperException e) {
-        if (e instanceof KeeperException.SessionExpiredException ||
-            (e.getCause()!=null && e.getCause() instanceof KeeperException.SessionExpiredException)) {
-          log.warn("Solr cannot talk to ZK, exiting " + 
-              getClass().getSimpleName() + " main queue loop", e);
-          return;
-        } else {
-          log.error("A ZK error has occurred", e);
-        }
-      }
-    }
-
-    if (isClosed || Thread.currentThread().isInterrupted())  return;
-
-    try {
-      refreshAutoScalingConf(new AutoScalingWatcher());
-    } catch (ConnectException e) {
-      log.warn("ZooKeeper watch triggered for autoscaling conf, but Solr cannot talk to ZK: [{}]", e.getMessage());
-    } catch (InterruptedException e) {
-      // Restore the interrupted status
-      Thread.currentThread().interrupt();
-      log.warn("Interrupted", e);
-    } catch (Exception e)  {
-      log.error("Unexpected exception", e);
-    }
-
-    while (true) {
-      Map<String, AutoScaling.Trigger> copy = null;
-      try {
-        // this can throw InterruptedException and we don't want to unlock if it did, so we keep this outside
-        // of the try/finally block
-        updateLock.lockInterruptibly();
-
-        // must check for close here before we await on the condition otherwise we can only be woken up on interruption
-        if (isClosed) {
-          log.warn("OverseerTriggerThread has been closed, exiting.");
-          break;
-        }
-
-        log.debug("Current znodeVersion {}, lastZnodeVersion {}", znodeVersion, lastZnodeVersion);
-
-        try {
-          if (znodeVersion == lastZnodeVersion) {
-            updated.await();
-
-            // are we closed?
-            if (isClosed) {
-              log.warn("OverseerTriggerThread woken up but we are closed, exiting.");
-              break;
-            }
-
-            // spurious wakeup?
-            if (znodeVersion == lastZnodeVersion) continue;
-          }
-          copy = new HashMap<>(activeTriggers);
-          lastZnodeVersion = znodeVersion;
-          log.debug("Processed trigger updates upto znodeVersion {}", znodeVersion);
-        } catch (InterruptedException e) {
-          // Restore the interrupted status
-          Thread.currentThread().interrupt();
-          log.warn("Interrupted", e);
-          break;
-        } finally {
-          updateLock.unlock();
-        }
-      } catch (InterruptedException e) {
-        // Restore the interrupted status
-        Thread.currentThread().interrupt();
-        log.warn("Interrupted", e);
-        break;
-      }
-
-      // update the current config
-      scheduledTriggers.setAutoScalingConfig(autoScalingConfig);
-
-      Set<String> managedTriggerNames = scheduledTriggers.getScheduledTriggerNames();
-      // remove the triggers which are no longer active
-      for (String managedTriggerName : managedTriggerNames) {
-        if (!copy.containsKey(managedTriggerName)) {
-          scheduledTriggers.remove(managedTriggerName);
-        }
-      }
-      // check for nodeLost triggers in the current config, and if
-      // absent then clean up old nodeLost / nodeAdded markers
-      boolean cleanOldNodeLostMarkers = true;
-      boolean cleanOldNodeAddedMarkers = true;
-      try {
-        // add new triggers and/or replace and close the replaced triggers
-        for (Map.Entry<String, AutoScaling.Trigger> entry : copy.entrySet()) {
-          if (entry.getValue().getEventType().equals(TriggerEventType.NODELOST)) {
-            cleanOldNodeLostMarkers = false;
-          }
-          if (entry.getValue().getEventType().equals(TriggerEventType.NODEADDED)) {
-            cleanOldNodeAddedMarkers = false;
-          }
-          try {
-            scheduledTriggers.add(entry.getValue());
-          } catch (Exception e) {
-            log.warn("Exception initializing trigger " + entry.getKey() + ", configuration ignored", e);
-          }
-        }
-      } catch (AlreadyClosedException e) {
-        // this _should_ mean that we're closing, complain loudly if that's not the case
-        if (isClosed) {
-          return;
-        } else {
-          throw new IllegalStateException("Caught AlreadyClosedException from ScheduledTriggers, but we're not closed yet!", e);
-        }
-      }
-      DistribStateManager stateManager = cloudManager.getDistribStateManager();
-      if (cleanOldNodeLostMarkers) {
-        log.debug("-- clean old nodeLost markers");
-        try {
-          List<String> markers = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
-          markers.forEach(n -> {
-            removeNodeMarker(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH, n);
-          });
-        } catch (NoSuchElementException e) {
-          // ignore
-        } catch (Exception e) {
-          log.warn("Error removing old nodeLost markers", e);
-        }
-      }
-      if (cleanOldNodeAddedMarkers) {
-        log.debug("-- clean old nodeAdded markers");
-        try {
-          List<String> markers = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
-          markers.forEach(n -> {
-            removeNodeMarker(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH, n);
-          });
-        } catch (NoSuchElementException e) {
-          // ignore
-        } catch (Exception e) {
-          log.warn("Error removing old nodeAdded markers", e);
-        }
-
-      }
-    }
-  }
-
-  private void removeNodeMarker(String path, String nodeName) {
-    path = path + "/" + nodeName;
-    try {
-      cloudManager.getDistribStateManager().removeData(path, -1);
-      log.debug("  -- deleted " + path);
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Error removing old marker " + path, e);
-    }
-  }
-
-  class AutoScalingWatcher implements Watcher  {
-    @Override
-    public void process(WatchedEvent watchedEvent) {
-      // session events are not change events, and do not remove the watcher
-      if (Event.EventType.None.equals(watchedEvent.getType())) {
-        return;
-      }
-
-      try {
-        refreshAutoScalingConf(this);
-      } catch (ConnectException e) {
-        log.warn("ZooKeeper watch triggered for autoscaling conf, but we cannot talk to ZK: [{}]", e.getMessage());
-      } catch (InterruptedException e) {
-        // Restore the interrupted status
-        Thread.currentThread().interrupt();
-        log.warn("Interrupted", e);
-      } catch (Exception e)  {
-        log.error("Unexpected exception", e);
-      }
-    }
-
-  }
-
-  private void refreshAutoScalingConf(Watcher watcher) throws InterruptedException, IOException {
-    updateLock.lock();
-    try {
-      if (isClosed) {
-        return;
-      }
-      AutoScalingConfig currentConfig = cloudManager.getDistribStateManager().getAutoScalingConfig(watcher);
-      log.debug("Refreshing {} with znode version {}", ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, currentConfig.getZkVersion());
-      if (znodeVersion >= currentConfig.getZkVersion()) {
-        // protect against reordered watcher fires by ensuring that we only move forward
-        return;
-      }
-      autoScalingConfig = currentConfig;
-      znodeVersion = autoScalingConfig.getZkVersion();
-      Map<String, AutoScaling.Trigger> triggerMap = loadTriggers(triggerFactory, autoScalingConfig);
-
-      // remove all active triggers that have been removed from ZK
-      Set<String> trackingKeySet = activeTriggers.keySet();
-      trackingKeySet.retainAll(triggerMap.keySet());
-
-      // now lets add or remove triggers which have been enabled or disabled respectively
-      for (Map.Entry<String, AutoScaling.Trigger> entry : triggerMap.entrySet()) {
-        String triggerName = entry.getKey();
-        AutoScaling.Trigger trigger = entry.getValue();
-        if (trigger.isEnabled()) {
-          activeTriggers.put(triggerName, trigger);
-        } else {
-          activeTriggers.remove(triggerName);
-        }
-      }
-      updated.signalAll();
-    } finally {
-      updateLock.unlock();
-    }
-  }
-
-  private AutoScalingConfig withAutoAddReplicasTrigger(AutoScalingConfig autoScalingConfig) {
-    Map<String, Object> triggerProps = AutoScaling.AUTO_ADD_REPLICAS_TRIGGER_PROPS;
-    return withDefaultTrigger(triggerProps, autoScalingConfig);
-  }
-
-  private AutoScalingConfig withScheduledMaintenanceTrigger(AutoScalingConfig autoScalingConfig) {
-    Map<String, Object> triggerProps = AutoScaling.SCHEDULED_MAINTENANCE_TRIGGER_PROPS;
-    return withDefaultTrigger(triggerProps, autoScalingConfig);
-  }
-
-  private AutoScalingConfig withDefaultTrigger(Map<String, Object> triggerProps, AutoScalingConfig autoScalingConfig) {
-    String triggerName = (String) triggerProps.get("name");
-    Map<String, AutoScalingConfig.TriggerConfig> configs = autoScalingConfig.getTriggerConfigs();
-    for (AutoScalingConfig.TriggerConfig cfg : configs.values()) {
-      if (triggerName.equals(cfg.name)) {
-        // already has this trigger
-        return autoScalingConfig;
-      }
-    }
-    // need to add
-    triggerProps.computeIfPresent("waitFor", (k, v) -> (long) (cloudConfig.getAutoReplicaFailoverWaitAfterExpiration() / 1000));
-    AutoScalingConfig.TriggerConfig config = new AutoScalingConfig.TriggerConfig(triggerName, triggerProps);
-    autoScalingConfig = autoScalingConfig.withTriggerConfig(config);
-    // need to add SystemLogListener explicitly here
-    autoScalingConfig = AutoScalingHandler.withSystemLogListener(autoScalingConfig, triggerName);
-    return autoScalingConfig;
-  }
-
-  private static Map<String, AutoScaling.Trigger> loadTriggers(AutoScaling.TriggerFactory triggerFactory, AutoScalingConfig autoScalingConfig) {
-    Map<String, AutoScalingConfig.TriggerConfig> triggers = autoScalingConfig.getTriggerConfigs();
-    if (triggers == null) {
-      return Collections.emptyMap();
-    }
-
-    Map<String, AutoScaling.Trigger> triggerMap = new HashMap<>(triggers.size());
-
-    for (Map.Entry<String, AutoScalingConfig.TriggerConfig> entry : triggers.entrySet()) {
-      AutoScalingConfig.TriggerConfig cfg = entry.getValue();
-      TriggerEventType eventType = cfg.event;
-      String triggerName = entry.getKey();
-      try {
-        triggerMap.put(triggerName, triggerFactory.create(eventType, triggerName, cfg.properties));
-      } catch (TriggerValidationException e) {
-        log.warn("Error in trigger '" + triggerName + "' configuration, trigger config ignored: " + cfg, e);
-      }
-    }
-    return triggerMap;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTrigger.java
deleted file mode 100644
index 5e25542..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTrigger.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.format.DateTimeFormatter;
-import java.time.format.DateTimeFormatterBuilder;
-import java.time.temporal.ChronoField;
-import java.util.Collections;
-import java.util.Date;
-import java.util.Locale;
-import java.util.Map;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.DateMathParser;
-import org.apache.solr.util.TimeZoneUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-/**
- * A trigger which creates {@link TriggerEventType#SCHEDULED} events as per the configured schedule
- */
-public class ScheduledTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final String DEFAULT_GRACE_DURATION = "+15MINUTES";
-  private static final String LAST_RUN_AT = "lastRunAt";
-  static final String ACTUAL_EVENT_TIME = "actualEventTime";
-
-  private String everyStr;
-
-  private String graceDurationStr;
-
-  private String preferredOp;
-
-  private TimeZone timeZone;
-
-  private Instant lastRunAt;
-
-  public ScheduledTrigger(String name) {
-    super(TriggerEventType.SCHEDULED, name);
-    TriggerUtils.requiredProperties(requiredProperties, validProperties, "startTime");
-    TriggerUtils.validProperties(validProperties, "timeZone", "every", "graceDuration", AutoScalingParams.PREFERRED_OP);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    String timeZoneStr = (String) properties.get("timeZone");
-    this.timeZone = TimeZoneUtils.parseTimezone(timeZoneStr); // defaults to UTC
-
-    String startTimeStr = (String) properties.get("startTime");
-    this.everyStr = (String) properties.get("every");
-    this.graceDurationStr = (String) properties.getOrDefault("graceDuration", DEFAULT_GRACE_DURATION);
-
-    preferredOp = (String) properties.get(PREFERRED_OP);
-
-    // attempt parsing to validate date math strings
-    // explicitly set NOW because it may be different for simulated time
-    Date now = new Date(TimeUnit.NANOSECONDS.toMillis(cloudManager.getTimeSource().getEpochTimeNs()));
-    Instant startTime = parseStartTime(now, startTimeStr, timeZoneStr);
-    DateMathParser.parseMath(now, startTime + everyStr, timeZone);
-    DateMathParser.parseMath(now, startTime + graceDurationStr, timeZone);
-
-    // We set lastRunAt to be the startTime (which could be a date math expression such as 'NOW')
-    // Ordinarily, NOW will always be evaluated in this constructor so it may seem that
-    // the trigger will always fire the first time.
-    // However, the lastRunAt is overwritten with the value from ZK
-    // during restoreState() operation (which is performed before run()) so the trigger works correctly
-    this.lastRunAt = startTime;
-  }
-
-  private Instant parseStartTime(Date now, String startTimeStr, String timeZoneStr) throws TriggerValidationException {
-    try {
-      // try parsing startTime as an ISO-8601 date time string
-      return DateMathParser.parseMath(now, startTimeStr).toInstant();
-    } catch (SolrException e) {
-      if (e.code() != SolrException.ErrorCode.BAD_REQUEST.code) {
-        throw new TriggerValidationException("startTime", "error parsing value '" + startTimeStr + "': " + e.toString());
-      }
-    }
-    if (timeZoneStr == null)  {
-      throw new TriggerValidationException("timeZone",
-          "Either 'startTime' should be an ISO-8601 date time string or 'timeZone' must be not be null");
-    }
-    TimeZone timeZone = TimeZone.getTimeZone(timeZoneStr);
-    DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder()
-        .append(DateTimeFormatter.ISO_LOCAL_DATE).appendPattern("['T'[HH[:mm[:ss]]]]")
-        .parseDefaulting(ChronoField.HOUR_OF_DAY, 0)
-        .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0)
-        .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0)
-        .toFormatter(Locale.ROOT).withZone(timeZone.toZoneId());
-    try {
-      return Instant.from(dateTimeFormatter.parse(startTimeStr));
-    } catch (Exception e) {
-      throw new TriggerValidationException("startTime", "error parsing startTime '" + startTimeStr + "': " + e.toString());
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    return Collections.singletonMap(LAST_RUN_AT, lastRunAt.toEpochMilli());
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    if (state.containsKey(LAST_RUN_AT)) {
-      this.lastRunAt = Instant.ofEpochMilli((Long) state.get(LAST_RUN_AT));
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof ScheduledTrigger) {
-      ScheduledTrigger scheduledTrigger = (ScheduledTrigger) old;
-      this.lastRunAt = scheduledTrigger.lastRunAt;
-    } else  {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  public void run() {
-    synchronized (this) {
-      if (isClosed) {
-        log.warn("ScheduledTrigger ran but was already closed");
-        throw new RuntimeException("Trigger has been closed");
-      }
-    }
-
-    TimeSource timeSource = cloudManager.getTimeSource();
-    DateMathParser dateMathParser = new DateMathParser(timeZone);
-    dateMathParser.setNow(new Date(lastRunAt.toEpochMilli()));
-    Instant nextRunTime, nextPlusGrace;
-    try {
-      Date next = dateMathParser.parseMath(everyStr);
-      dateMathParser.setNow(next);
-      nextPlusGrace = dateMathParser.parseMath(graceDurationStr).toInstant();
-      nextRunTime = next.toInstant();
-    } catch (ParseException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Unable to calculate next run time. lastRan: " + lastRunAt.toString() + " and date math string: " + everyStr, e);
-    }
-
-    Instant now = Instant.ofEpochMilli(
-        TimeUnit.NANOSECONDS.toMillis(timeSource.getEpochTimeNs()));
-    AutoScaling.TriggerEventProcessor processor = processorRef.get();
-
-    if (now.isBefore(nextRunTime)) {
-      return; // it's not time yet
-    }
-    if (now.isAfter(nextPlusGrace)) {
-      // we are past time and we could not run per schedule so skip this event
-      if (log.isWarnEnabled())  {
-        log.warn("ScheduledTrigger was not able to run event at scheduled time: {}. Now: {}",
-            nextRunTime, now);
-      }
-      // Even though we are skipping the event, we need to notify any listeners of the IGNORED stage
-      // so we create a dummy event with the ignored=true flag and ScheduledTriggers will do the rest
-      if (processor != null && processor.process(new ScheduledEvent(getEventType(), getName(), timeSource.getTimeNs(),
-          preferredOp, now.toEpochMilli(), true))) {
-        lastRunAt = nextRunTime;
-        return;
-      }
-    }
-
-    if (processor != null)  {
-      if (log.isDebugEnabled()) {
-        log.debug("ScheduledTrigger {} firing registered processor for scheduled time {}, now={}", name,
-            nextRunTime, now);
-      }
-      if (processor.process(new ScheduledEvent(getEventType(), getName(), timeSource.getTimeNs(),
-          preferredOp, now.toEpochMilli()))) {
-        lastRunAt = nextRunTime; // set to nextRunTime instead of now to avoid drift
-      }
-    } else  {
-      lastRunAt = nextRunTime; // set to nextRunTime instead of now to avoid drift
-    }
-  }
-
-  public static class ScheduledEvent extends TriggerEvent {
-    public ScheduledEvent(TriggerEventType eventType, String source, long eventTime, String preferredOp, long actualEventTime) {
-      this(eventType, source, eventTime, preferredOp, actualEventTime, false);
-    }
-
-    public ScheduledEvent(TriggerEventType eventType, String source, long eventTime, String preferredOp, long actualEventTime, boolean ignored) {
-      super(eventType, source, eventTime, null, ignored);
-      if (preferredOp != null)  {
-        properties.put(PREFERRED_OP, preferredOp);
-      }
-      properties.put(ACTUAL_EVENT_TIME, actualEventTime);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
deleted file mode 100644
index 7c3cbb0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.stream.Collectors;
-
-import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatusResponse;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.cloud.Stats;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.autoscaling.ExecutePlanAction.waitForTaskToFinish;
-import static org.apache.solr.common.params.AutoScalingParams.ACTION_THROTTLE_PERIOD_SECONDS;
-import static org.apache.solr.common.params.AutoScalingParams.TRIGGER_COOLDOWN_PERIOD_SECONDS;
-import static org.apache.solr.common.params.AutoScalingParams.TRIGGER_CORE_POOL_SIZE;
-import static org.apache.solr.common.params.AutoScalingParams.TRIGGER_SCHEDULE_DELAY_SECONDS;
-import static org.apache.solr.common.util.ExecutorUtil.awaitTermination;
-
-/**
- * Responsible for scheduling active triggers, starting and stopping them and
- * performing actions when they fire
- */
-public class ScheduledTriggers implements Closeable {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final int DEFAULT_SCHEDULED_TRIGGER_DELAY_SECONDS = 1;
-  public static final int DEFAULT_ACTION_THROTTLE_PERIOD_SECONDS = 5;
-  public static final int DEFAULT_COOLDOWN_PERIOD_SECONDS = 5;
-  public static final int DEFAULT_TRIGGER_CORE_POOL_SIZE = 4;
-
-  static final Map<String, Object> DEFAULT_PROPERTIES = new HashMap<>();
-
-  static {
-    DEFAULT_PROPERTIES.put(TRIGGER_SCHEDULE_DELAY_SECONDS, DEFAULT_SCHEDULED_TRIGGER_DELAY_SECONDS);
-    DEFAULT_PROPERTIES.put(TRIGGER_COOLDOWN_PERIOD_SECONDS, DEFAULT_COOLDOWN_PERIOD_SECONDS);
-    DEFAULT_PROPERTIES.put(TRIGGER_CORE_POOL_SIZE, DEFAULT_TRIGGER_CORE_POOL_SIZE);
-    DEFAULT_PROPERTIES.put(ACTION_THROTTLE_PERIOD_SECONDS, DEFAULT_ACTION_THROTTLE_PERIOD_SECONDS);
-  }
-
-  private final Map<String, TriggerWrapper> scheduledTriggerWrappers = new ConcurrentHashMap<>();
-
-  /**
-   * Thread pool for scheduling the triggers
-   */
-  private final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor;
-
-  /**
-   * Single threaded executor to run the actions upon a trigger event. We rely on this being a single
-   * threaded executor to ensure that trigger fires do not step on each other as well as to ensure
-   * that we do not run scheduled trigger threads while an action has been submitted to this executor
-   */
-  private final ExecutorService actionExecutor;
-
-  private boolean isClosed = false;
-
-  private final AtomicBoolean hasPendingActions = new AtomicBoolean(false);
-
-  private final AtomicLong cooldownStart = new AtomicLong();
-
-  private final AtomicLong cooldownPeriod = new AtomicLong(TimeUnit.SECONDS.toNanos(DEFAULT_COOLDOWN_PERIOD_SECONDS));
-
-  private final AtomicLong triggerDelay = new AtomicLong(DEFAULT_SCHEDULED_TRIGGER_DELAY_SECONDS);
-
-  private final SolrCloudManager cloudManager;
-
-  private final DistribStateManager stateManager;
-
-  private final SolrResourceLoader loader;
-
-  private final Stats queueStats;
-
-  private final TriggerListeners listeners;
-
-  private AutoScalingConfig autoScalingConfig;
-
-  public ScheduledTriggers(SolrResourceLoader loader, SolrCloudManager cloudManager) {
-    scheduledThreadPoolExecutor = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(DEFAULT_TRIGGER_CORE_POOL_SIZE,
-        new DefaultSolrThreadFactory("ScheduledTrigger"));
-    scheduledThreadPoolExecutor.setRemoveOnCancelPolicy(true);
-    scheduledThreadPoolExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-    actionExecutor = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("AutoscalingActionExecutor"));
-    this.cloudManager = cloudManager;
-    this.stateManager = cloudManager.getDistribStateManager();
-    this.loader = loader;
-    queueStats = new Stats();
-    listeners = new TriggerListeners();
-    // initialize cooldown timer
-    cooldownStart.set(cloudManager.getTimeSource().getTimeNs() - cooldownPeriod.get());
-  }
-
-  /**
-   * Set the current autoscaling config. This is invoked by {@link OverseerTriggerThread} when autoscaling.json is updated,
-   * and it re-initializes trigger listeners and other properties used by the framework
-   * @param autoScalingConfig current autoscaling.json
-   */
-  public void setAutoScalingConfig(AutoScalingConfig autoScalingConfig) {
-    Map<String, Object> currentProps = new HashMap<>(DEFAULT_PROPERTIES);
-    if (this.autoScalingConfig != null) {
-      currentProps.putAll(this.autoScalingConfig.getProperties());
-    }
-
-    // reset listeners early in order to capture first execution of newly scheduled triggers
-    listeners.setAutoScalingConfig(autoScalingConfig);
-
-    for (Map.Entry<String, Object> entry : currentProps.entrySet()) {
-      Map<String, Object> newProps = autoScalingConfig.getProperties();
-      String key = entry.getKey();
-      if (newProps.containsKey(key) && !entry.getValue().equals(newProps.get(key))) {
-        log.debug("Changing value of autoscaling property: {} from: {} to: {}", key, entry.getValue(), newProps.get(key));
-        switch (key) {
-          case TRIGGER_SCHEDULE_DELAY_SECONDS:
-            triggerDelay.set(((Number) newProps.get(key)).intValue());
-            synchronized (this) {
-              scheduledTriggerWrappers.forEach((s, triggerWrapper) -> {
-                if (triggerWrapper.scheduledFuture.cancel(false)) {
-                  triggerWrapper.scheduledFuture = scheduledThreadPoolExecutor.scheduleWithFixedDelay(
-                      triggerWrapper, 0,
-                      cloudManager.getTimeSource().convertDelay(TimeUnit.SECONDS, triggerDelay.get(), TimeUnit.MILLISECONDS),
-                      TimeUnit.MILLISECONDS);
-                } else  {
-                  log.debug("Failed to cancel scheduled task: {}", s);
-                }
-              });
-            }
-            break;
-          case TRIGGER_COOLDOWN_PERIOD_SECONDS:
-            cooldownPeriod.set(TimeUnit.SECONDS.toNanos(((Number) newProps.get(key)).longValue()));
-            break;
-          case TRIGGER_CORE_POOL_SIZE:
-            this.scheduledThreadPoolExecutor.setCorePoolSize(((Number) newProps.get(key)).intValue());
-            break;
-        }
-      }
-    }
-
-    this.autoScalingConfig = autoScalingConfig;
-    // reset cooldown
-    cooldownStart.set(cloudManager.getTimeSource().getTimeNs() - cooldownPeriod.get());
-  }
-
-  /**
-   * Adds a new trigger or replaces an existing one. The replaced trigger, if any, is closed
-   * <b>before</b> the new trigger is run. If a trigger is replaced with itself then this
-   * operation becomes a no-op.
-   *
-   * @param newTrigger the trigger to be managed
-   * @throws AlreadyClosedException if this class has already been closed
-   */
-  public synchronized void add(AutoScaling.Trigger newTrigger) throws Exception {
-    if (isClosed) {
-      throw new AlreadyClosedException("ScheduledTriggers has been closed and cannot be used anymore");
-    }
-    TriggerWrapper st;
-    try {
-      st = new TriggerWrapper(newTrigger, cloudManager, queueStats);
-    } catch (Exception e) {
-      if (isClosed) {
-        throw new AlreadyClosedException("ScheduledTriggers has been closed and cannot be used anymore");
-      }
-      if (cloudManager.isClosed()) {
-        log.error("Failed to add trigger " + newTrigger.getName() + " - closing or disconnected from data provider", e);
-      } else {
-        log.error("Failed to add trigger " + newTrigger.getName(), e);
-      }
-      return;
-    }
-    TriggerWrapper triggerWrapper = st;
-
-    TriggerWrapper old = scheduledTriggerWrappers.putIfAbsent(newTrigger.getName(), triggerWrapper);
-    if (old != null) {
-      if (old.trigger.equals(newTrigger)) {
-        // the trigger wasn't actually modified so we do nothing
-        return;
-      }
-      IOUtils.closeQuietly(old);
-      newTrigger.restoreState(old.trigger);
-      triggerWrapper.setReplay(false);
-      scheduledTriggerWrappers.replace(newTrigger.getName(), triggerWrapper);
-    }
-    newTrigger.setProcessor(event -> {
-      TriggerListeners triggerListeners = listeners.copy();
-      if (cloudManager.isClosed()) {
-        String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s because Solr has been shutdown.", event.toString());
-        log.warn(msg);
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.ABORTED, msg);
-        return false;
-      }
-      TriggerWrapper scheduledSource = scheduledTriggerWrappers.get(event.getSource());
-      if (scheduledSource == null) {
-        String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s because the source trigger: %s doesn't exist.", event.toString(), event.getSource());
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.FAILED, msg);
-        log.warn(msg);
-        return false;
-      }
-      boolean replaying = event.getProperty(TriggerEvent.REPLAYING) != null ? (Boolean)event.getProperty(TriggerEvent.REPLAYING) : false;
-      AutoScaling.Trigger source = scheduledSource.trigger;
-      if (scheduledSource.isClosed || source.isClosed()) {
-        String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s because the source trigger: %s has already been closed", event.toString(), source);
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.ABORTED, msg);
-        log.warn(msg);
-        // we do not want to lose this event just because the trigger was closed, perhaps a replacement will need it
-        return false;
-      }
-      if (event.isIgnored())  {
-        log.debug("-------- Ignoring event: " + event);
-        event.getProperties().put(TriggerEvent.IGNORED, true);
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.IGNORED, "Event was ignored.");
-        return true; // always return true for ignored events
-      }
-      // even though we pause all triggers during action execution there is a possibility that a trigger was already
-      // running at the time and would have already created an event so we reject such events during cooldown period
-      if (cooldownStart.get() + cooldownPeriod.get() > cloudManager.getTimeSource().getTimeNs()) {
-        log.debug("-------- Cooldown period - rejecting event: " + event);
-        event.getProperties().put(TriggerEvent.COOLDOWN, true);
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.IGNORED, "In cooldown period.");
-        return false;
-      } else {
-        log.debug("++++++++ Cooldown inactive - processing event: " + event);
-      }
-      if (hasPendingActions.compareAndSet(false, true)) {
-        // pause all triggers while we execute actions so triggers do not operate on a cluster in transition
-        pauseTriggers();
-
-        final boolean enqueued;
-        if (replaying) {
-          enqueued = false;
-        } else {
-          enqueued = triggerWrapper.enqueue(event);
-        }
-        // fire STARTED event listeners after enqueuing the event is successful
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.STARTED);
-        List<TriggerAction> actions = source.getActions();
-        if (actions != null) {
-          if (actionExecutor.isShutdown()) {
-            String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s from trigger %s because the executor has already been closed", event.toString(), source);
-            triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.ABORTED, msg);
-            log.warn(msg);
-            // we do not want to lose this event just because the trigger was closed, perhaps a replacement will need it
-            return false;
-          }
-          actionExecutor.submit(() -> {
-            assert hasPendingActions.get();
-            long eventProcessingStart = cloudManager.getTimeSource().getTimeNs();
-            TriggerListeners triggerListeners1 = triggerListeners.copy();
-            log.debug("-- processing actions for " + event);
-            try {
-              // in future, we could wait for pending tasks in a different thread and re-enqueue
-              // this event so that we continue processing other events and not block this action executor
-              waitForPendingTasks(newTrigger, actions);
-
-              ActionContext actionContext = new ActionContext(cloudManager, newTrigger, new HashMap<>());
-              for (TriggerAction action : actions) {
-                List<String> beforeActions = (List<String>) actionContext.getProperties().computeIfAbsent(TriggerEventProcessorStage.BEFORE_ACTION.toString(), k -> new ArrayList<String>());
-                beforeActions.add(action.getName());
-                triggerListeners1.fireListeners(event.getSource(), event, TriggerEventProcessorStage.BEFORE_ACTION, action.getName(), actionContext);
-                try {
-                  action.process(event, actionContext);
-                } catch (Exception e) {
-                  triggerListeners1.fireListeners(event.getSource(), event, TriggerEventProcessorStage.FAILED, action.getName(), actionContext, e, null);
-                  throw new TriggerActionException(event.getSource(), action.getName(), "Error processing action for trigger event: " + event, e);
-                }
-                List<String> afterActions = (List<String>) actionContext.getProperties().computeIfAbsent(TriggerEventProcessorStage.AFTER_ACTION.toString(), k -> new ArrayList<String>());
-                afterActions.add(action.getName());
-                triggerListeners1.fireListeners(event.getSource(), event, TriggerEventProcessorStage.AFTER_ACTION, action.getName(), actionContext);
-              }
-              if (enqueued) {
-                TriggerEvent ev = triggerWrapper.dequeue();
-                assert ev.getId().equals(event.getId());
-              }
-              triggerListeners1.fireListeners(event.getSource(), event, TriggerEventProcessorStage.SUCCEEDED);
-            } catch (TriggerActionException e) {
-              log.warn("Exception executing actions", e);
-            } catch (Exception e) {
-              triggerListeners1.fireListeners(event.getSource(), event, TriggerEventProcessorStage.FAILED);
-              log.warn("Unhandled exception executing actions", e);
-            } finally {
-              cooldownStart.set(cloudManager.getTimeSource().getTimeNs());
-              hasPendingActions.set(false);
-              // resume triggers after cool down period
-              resumeTriggers(cloudManager.getTimeSource().convertDelay(TimeUnit.NANOSECONDS, cooldownPeriod.get(), TimeUnit.MILLISECONDS));
-            }
-            log.debug("-- processing took {} ms for event id={}",
-                TimeUnit.NANOSECONDS.toMillis(cloudManager.getTimeSource().getTimeNs() - eventProcessingStart), event.id);
-          });
-        } else {
-          if (enqueued) {
-            TriggerEvent ev = triggerWrapper.dequeue();
-            if (!ev.getId().equals(event.getId())) {
-              throw new RuntimeException("Wrong event dequeued, queue of " + triggerWrapper.trigger.getName()
-              + " is broken! Expected event=" + event + " but got " + ev);
-            }
-          }
-          triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.SUCCEEDED);
-          hasPendingActions.set(false);
-          // resume triggers now
-          resumeTriggers(0);
-        }
-        return true;
-      } else {
-        // there is an action in the queue and we don't want to enqueue another until it is complete
-        triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.IGNORED, "Already processing another event.");
-        return false;
-      }
-    });
-    newTrigger.init(); // mark as ready for scheduling
-    triggerWrapper.scheduledFuture = scheduledThreadPoolExecutor.scheduleWithFixedDelay(triggerWrapper, 0,
-        cloudManager.getTimeSource().convertDelay(TimeUnit.SECONDS, triggerDelay.get(), TimeUnit.MILLISECONDS),
-        TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Pauses all scheduled trigger invocations without interrupting any that are in progress
-   * @lucene.internal
-   */
-  public synchronized void pauseTriggers()  {
-    if (log.isDebugEnabled()) {
-      log.debug("Pausing all triggers: {}", scheduledTriggerWrappers.keySet());
-    }
-    scheduledTriggerWrappers.forEach((s, triggerWrapper) -> triggerWrapper.scheduledFuture.cancel(false));
-  }
-
-  /**
-   * Resumes all previously cancelled triggers to be scheduled after the given initial delay
-   * @param afterDelayMillis the initial delay in milliseconds after which triggers should be resumed
-   * @lucene.internal
-   */
-  public synchronized void resumeTriggers(long afterDelayMillis) {
-    scheduledTriggerWrappers.forEach((s, triggerWrapper) ->  {
-      if (triggerWrapper.scheduledFuture.isCancelled()) {
-        log.debug("Resuming trigger: {} after {}ms", s, afterDelayMillis);
-        triggerWrapper.scheduledFuture = scheduledThreadPoolExecutor.scheduleWithFixedDelay(triggerWrapper, afterDelayMillis,
-            cloudManager.getTimeSource().convertDelay(TimeUnit.SECONDS, triggerDelay.get(), TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-      }
-    });
-  }
-
-  private void waitForPendingTasks(AutoScaling.Trigger newTrigger, List<TriggerAction> actions) throws AlreadyClosedException {
-    DistribStateManager stateManager = cloudManager.getDistribStateManager();
-    try {
-
-      for (TriggerAction action : actions) {
-        if (action instanceof ExecutePlanAction) {
-          String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + newTrigger.getName() + "/" + action.getName();
-          if (!stateManager.hasData(parentPath))  {
-            break;
-          }
-          List<String> children = stateManager.listData(parentPath);
-          if (children != null) {
-            for (String child : children) {
-              String path = parentPath + '/' + child;
-              VersionedData data = stateManager.getData(path, null);
-              if (data != null) {
-                Map map = (Map) Utils.fromJSON(data.getData());
-                String requestid = (String) map.get("requestid");
-                try {
-                  log.debug("Found pending task with requestid={}", requestid);
-                  RequestStatusResponse statusResponse = waitForTaskToFinish(cloudManager, requestid,
-                      ExecutePlanAction.DEFAULT_TASK_TIMEOUT_SECONDS, TimeUnit.SECONDS);
-                  if (statusResponse != null) {
-                    RequestStatusState state = statusResponse.getRequestStatus();
-                    if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED || state == RequestStatusState.NOT_FOUND) {
-                      stateManager.removeData(path, -1);
-                    }
-                  }
-                } catch (Exception e) {
-                  if (cloudManager.isClosed())  {
-                    throw e; // propagate the abort to the caller
-                  }
-                  Throwable rootCause = ExceptionUtils.getRootCause(e);
-                  if (rootCause instanceof IllegalStateException && rootCause.getMessage().contains("Connection pool shut down")) {
-                    throw e;
-                  }
-                  if (rootCause instanceof TimeoutException && rootCause.getMessage().contains("Could not connect to ZooKeeper")) {
-                    throw e;
-                  }
-                  log.error("Unexpected exception while waiting for pending task with requestid: " + requestid + " to finish", e);
-                }
-              }
-            }
-          }
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Thread interrupted", e);
-    } catch (Exception e) {
-      if (cloudManager.isClosed())  {
-        throw new AlreadyClosedException("The Solr instance has been shutdown");
-      }
-      // we catch but don't rethrow because a failure to wait for pending tasks
-      // should not keep the actions from executing
-      log.error("Unexpected exception while waiting for pending tasks to finish", e);
-    }
-  }
-
-  /**
-   * Remove and stop all triggers. Also cleans up any leftover
-   * state / events in ZK.
-   */
-  public synchronized void removeAll() {
-    getScheduledTriggerNames().forEach(t -> {
-      log.info("-- removing trigger: " + t);
-      remove(t);
-    });
-  }
-
-  /**
-   * Removes and stops the trigger with the given name. Also cleans up any leftover
-   * state / events in ZK.
-   *
-   * @param triggerName the name of the trigger to be removed
-   */
-  public synchronized void remove(String triggerName) {
-    TriggerWrapper removed = scheduledTriggerWrappers.remove(triggerName);
-    IOUtils.closeQuietly(removed);
-    removeTriggerZKData(triggerName);
-  }
-
-  private void removeTriggerZKData(String triggerName) {
-    String statePath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + triggerName;
-    String eventsPath = ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH + "/" + triggerName;
-    try {
-      stateManager.removeRecursively(statePath, true, true);
-    } catch (Exception e) {
-      log.warn("Failed to remove state for removed trigger " + statePath, e);
-    }
-    try {
-      stateManager.removeRecursively(eventsPath, true, true);
-    } catch (Exception e) {
-      log.warn("Failed to remove events for removed trigger " + eventsPath, e);
-    }
-  }
-
-  /**
-   * @return an unmodifiable set of names of all triggers being managed by this class
-   */
-  public synchronized Set<String> getScheduledTriggerNames() {
-    return Collections.unmodifiableSet(new HashSet<>(scheduledTriggerWrappers.keySet())); // shallow copy
-  }
-
-  @Override
-  public void close() throws IOException {
-    synchronized (this) {
-      // mark that we are closed
-      isClosed = true;
-      for (TriggerWrapper triggerWrapper : scheduledTriggerWrappers.values()) {
-        IOUtils.closeQuietly(triggerWrapper);
-      }
-      scheduledTriggerWrappers.clear();
-    }
-    // shutdown and interrupt all running tasks because there's no longer any
-    // guarantee about cluster state
-    log.debug("Shutting down scheduled thread pool executor now");
-    scheduledThreadPoolExecutor.shutdownNow();
-
-    log.debug("Shutting down action executor now");
-    actionExecutor.shutdownNow();
-
-    listeners.close();
-
-    log.debug("Awaiting termination for action executor");
-    awaitTermination(actionExecutor);
-
-    log.debug("Awaiting termination for scheduled thread pool executor");
-    awaitTermination(scheduledThreadPoolExecutor);
-
-    log.debug("ScheduledTriggers closed completely");
-  }
-
-  private class TriggerWrapper implements Runnable, Closeable {
-    AutoScaling.Trigger trigger;
-    ScheduledFuture<?> scheduledFuture;
-    TriggerEventQueue queue;
-    boolean replay;
-    volatile boolean isClosed;
-
-    TriggerWrapper(AutoScaling.Trigger trigger, SolrCloudManager cloudManager, Stats stats) throws IOException {
-      this.trigger = trigger;
-      this.queue = new TriggerEventQueue(cloudManager, trigger.getName(), stats);
-      this.replay = true;
-      this.isClosed = false;
-    }
-
-    public void setReplay(boolean replay) {
-      this.replay = replay;
-    }
-
-    public boolean enqueue(TriggerEvent event) {
-      if (isClosed) {
-        throw new AlreadyClosedException("ScheduledTrigger " + trigger.getName() + " has been closed.");
-      }
-      return queue.offerEvent(event);
-    }
-
-    public TriggerEvent dequeue() {
-      if (isClosed) {
-        throw new AlreadyClosedException("ScheduledTrigger " + trigger.getName() + " has been closed.");
-      }
-      TriggerEvent event = queue.pollEvent();
-      return event;
-    }
-
-    @Override
-    public void run() {
-      if (isClosed) {
-        throw new AlreadyClosedException("ScheduledTrigger " + trigger.getName() + " has been closed.");
-      }
-      // fire a trigger only if an action is not pending
-      // note this is not fool proof e.g. it does not prevent an action being executed while a trigger
-      // is still executing. There is additional protection against that scenario in the event listener.
-      if (!hasPendingActions.get())  {
-        // this synchronization is usually never under contention
-        // but the only reason to have it here is to ensure that when the set-properties API is used
-        // to change the schedule delay, we can safely cancel the old scheduled task
-        // and create another one with the new delay without worrying about concurrent
-        // execution of the same trigger instance
-        synchronized (TriggerWrapper.this) {
-          // replay accumulated events on first run, if any
-          if (replay) {
-            TriggerEvent event;
-            // peek first without removing - we may crash before calling the listener
-            while ((event = queue.peekEvent()) != null) {
-              // override REPLAYING=true
-              event.getProperties().put(TriggerEvent.REPLAYING, true);
-              if (! trigger.getProcessor().process(event)) {
-                log.error("Failed to re-play event, discarding: " + event);
-              }
-              queue.pollEvent(); // always remove it from queue
-            }
-            // now restore saved state to possibly generate new events from old state on the first run
-            try {
-              trigger.restoreState();
-            } catch (Exception e) {
-              // log but don't throw - see below
-              log.error("Error restoring trigger state " + trigger.getName(), e);
-            }
-            replay = false;
-          }
-          try {
-            trigger.run();
-          } catch (Exception e) {
-            // log but do not propagate exception because an exception thrown from a scheduled operation
-            // will suppress future executions
-            log.error("Unexpected exception from trigger: " + trigger.getName(), e);
-          } finally {
-            // checkpoint after each run
-            trigger.saveState();
-          }
-        }
-      }
-    }
-
-    @Override
-    public void close() throws IOException {
-      isClosed = true;
-      if (scheduledFuture != null) {
-        scheduledFuture.cancel(true);
-      }
-      IOUtils.closeQuietly(trigger);
-    }
-  }
-
-  private class TriggerListeners {
-    Map<String, Map<TriggerEventProcessorStage, List<TriggerListener>>> listenersPerStage = new HashMap<>();
-    Map<String, TriggerListener> listenersPerName = new HashMap<>();
-    ReentrantLock updateLock = new ReentrantLock();
-
-    public TriggerListeners() {
-
-    }
-
-    private TriggerListeners(Map<String, Map<TriggerEventProcessorStage, List<TriggerListener>>> listenersPerStage,
-                             Map<String, TriggerListener> listenersPerName) {
-      this.listenersPerStage = new HashMap<>();
-      listenersPerStage.forEach((n, listeners) -> {
-        Map<TriggerEventProcessorStage, List<TriggerListener>> perStage = this.listenersPerStage.computeIfAbsent(n, name -> new HashMap<>());
-        listeners.forEach((s, lst) -> {
-          List<TriggerListener> newLst = perStage.computeIfAbsent(s, stage -> new ArrayList<>());
-          newLst.addAll(lst);
-        });
-      });
-      this.listenersPerName = new HashMap<>(listenersPerName);
-    }
-
-    public TriggerListeners copy() {
-      return new TriggerListeners(listenersPerStage, listenersPerName);
-    }
-
-    void setAutoScalingConfig(AutoScalingConfig autoScalingConfig) {
-      updateLock.lock();
-      // we will recreate this from scratch
-      listenersPerStage.clear();
-      try {
-        Set<String> triggerNames = autoScalingConfig.getTriggerConfigs().keySet();
-        Map<String, AutoScalingConfig.TriggerListenerConfig> configs = autoScalingConfig.getTriggerListenerConfigs();
-        Set<String> listenerNames = configs.entrySet().stream().map(entry -> entry.getValue().name).collect(Collectors.toSet());
-        // close those for non-existent triggers and nonexistent listener configs
-        for (Iterator<Map.Entry<String, TriggerListener>> it = listenersPerName.entrySet().iterator(); it.hasNext(); ) {
-          Map.Entry<String, TriggerListener> entry = it.next();
-          String name = entry.getKey();
-          TriggerListener listener = entry.getValue();
-          if (!triggerNames.contains(listener.getConfig().trigger) || !listenerNames.contains(name)) {
-            try {
-              listener.close();
-            } catch (Exception e) {
-              log.warn("Exception closing old listener " + listener.getConfig(), e);
-            }
-            it.remove();
-          }
-        }
-        for (Map.Entry<String, AutoScalingConfig.TriggerListenerConfig> entry : configs.entrySet()) {
-          AutoScalingConfig.TriggerListenerConfig config = entry.getValue();
-          if (!triggerNames.contains(config.trigger)) {
-            log.debug("-- skipping listener for non-existent trigger: {}", config);
-            continue;
-          }
-          // find previous instance and reuse if possible
-          TriggerListener oldListener = listenersPerName.get(config.name);
-          TriggerListener listener = null;
-          if (oldListener != null) {
-            if (!oldListener.getConfig().equals(config)) { // changed config
-              try {
-                oldListener.close();
-              } catch (Exception e) {
-                log.warn("Exception closing old listener " + oldListener.getConfig(), e);
-              }
-            } else {
-              listener = oldListener; // reuse
-            }
-          }
-          if (listener == null) { // create new instance
-            String clazz = config.listenerClass;
-            try {
-              listener = loader.newInstance(clazz, TriggerListener.class);
-            } catch (Exception e) {
-              log.warn("Invalid TriggerListener class name '" + clazz + "', skipping...", e);
-            }
-            if (listener != null) {
-              try {
-                listener.configure(loader, cloudManager, config);
-                listener.init();
-                listenersPerName.put(config.name, listener);
-              } catch (Exception e) {
-                log.warn("Error initializing TriggerListener " + config, e);
-                IOUtils.closeQuietly(listener);
-                listener = null;
-              }
-            }
-          }
-          if (listener == null) {
-            continue;
-          }
-          // add per stage
-          for (TriggerEventProcessorStage stage : config.stages) {
-            addPerStage(config.trigger, stage, listener);
-          }
-          // add also for beforeAction / afterAction TriggerStage
-          if (!config.beforeActions.isEmpty()) {
-            addPerStage(config.trigger, TriggerEventProcessorStage.BEFORE_ACTION, listener);
-          }
-          if (!config.afterActions.isEmpty()) {
-            addPerStage(config.trigger, TriggerEventProcessorStage.AFTER_ACTION, listener);
-          }
-        }
-      } finally {
-        updateLock.unlock();
-      }
-    }
-
-    private void addPerStage(String triggerName, TriggerEventProcessorStage stage, TriggerListener listener) {
-      Map<TriggerEventProcessorStage, List<TriggerListener>> perStage =
-          listenersPerStage.computeIfAbsent(triggerName, k -> new HashMap<>());
-      List<TriggerListener> lst = perStage.computeIfAbsent(stage, k -> new ArrayList<>(3));
-      lst.add(listener);
-    }
-
-    void reset() {
-      updateLock.lock();
-      try {
-        listenersPerStage.clear();
-        for (TriggerListener listener : listenersPerName.values()) {
-          IOUtils.closeQuietly(listener);
-        }
-        listenersPerName.clear();
-      } finally {
-        updateLock.unlock();
-      }
-    }
-
-    void close() {
-      reset();
-    }
-
-    List<TriggerListener> getTriggerListeners(String trigger, TriggerEventProcessorStage stage) {
-      Map<TriggerEventProcessorStage, List<TriggerListener>> perStage = listenersPerStage.get(trigger);
-      if (perStage == null) {
-        return Collections.emptyList();
-      }
-      List<TriggerListener> lst = perStage.get(stage);
-      if (lst == null) {
-        return Collections.emptyList();
-      } else {
-        return Collections.unmodifiableList(lst);
-      }
-    }
-
-    void fireListeners(String trigger, TriggerEvent event, TriggerEventProcessorStage stage) {
-      fireListeners(trigger, event, stage, null, null, null, null);
-    }
-
-    void fireListeners(String trigger, TriggerEvent event, TriggerEventProcessorStage stage, String message) {
-      fireListeners(trigger, event, stage, null, null, null, message);
-    }
-
-    void fireListeners(String trigger, TriggerEvent event, TriggerEventProcessorStage stage, String actionName,
-                       ActionContext context) {
-      fireListeners(trigger, event, stage, actionName, context, null, null);
-    }
-
-    void fireListeners(String trigger, TriggerEvent event, TriggerEventProcessorStage stage, String actionName,
-                       ActionContext context, Throwable error, String message) {
-      updateLock.lock();
-      try {
-        for (TriggerListener listener : getTriggerListeners(trigger, stage)) {
-          if (!listener.isEnabled()) {
-            continue;
-          }
-          if (actionName != null) {
-            AutoScalingConfig.TriggerListenerConfig config = listener.getConfig();
-            if (stage == TriggerEventProcessorStage.BEFORE_ACTION) {
-              if (!config.beforeActions.contains(actionName)) {
-                continue;
-              }
-            } else if (stage == TriggerEventProcessorStage.AFTER_ACTION) {
-              if (!config.afterActions.contains(actionName)) {
-                continue;
-              }
-            }
-          }
-          try {
-            listener.onEvent(event, stage, actionName, context, error, message);
-          } catch (Exception e) {
-            log.warn("Exception running listener " + listener.getConfig(), e);
-          }
-        }
-      } finally {
-        updateLock.unlock();
-      }
-    }
-  }
-}


[06/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
deleted file mode 100644
index 33fe086..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.util.PivotListEntry;
-
-public class PivotFacetHelper {
-
-  /**
-   * Encodes a value path as a string for the purposes of a refinement request
-   *
-   * @see PivotFacetValue#getValuePath
-   * @see #decodeRefinementValuePath
-   */
-  public static String encodeRefinementValuePath(List<String> values) {
-    // HACK: prefix flag every value to account for empty string vs null
-    // NOTE: even if we didn't have to worry about null's smartSplit is stupid about
-    // pruning empty strings from list
-    // "^" prefix = null
-    // "~" prefix = not null, may be empty string
-
-    assert null != values;
-
-    // special case: empty list => empty string
-    if (values.isEmpty()) {
-      return "";
-    }
-    
-    StringBuilder out = new StringBuilder();
-    for (String val : values) {
-      if (null == val) {
-        out.append('^');
-      } else {
-        out.append('~');
-        StrUtils.appendEscapedTextToBuilder(out, val, ',');
-      }
-      out.append(',');
-    }
-    out.deleteCharAt(out.length()-1);  // prune the last separator
-    return out.toString();
-    // return StrUtils.join(values, ',');
-  }
-
-  /**
-   * Decodes a value path string specified for refinement.
-   *
-   * @see #encodeRefinementValuePath
-   */
-  public static List<String> decodeRefinementValuePath(String valuePath) {
-    List<String> rawvals = StrUtils.splitSmart(valuePath, ",", true);
-    // special case: empty list => empty string
-    if (rawvals.isEmpty()) return rawvals;
-
-    List<String> out = new ArrayList<>(rawvals.size());
-    for (String raw : rawvals) {
-      assert 0 < raw.length();
-      if ('^' == raw.charAt(0)) {
-        assert 1 == raw.length();
-        out.add(null);
-      } else {
-        assert '~' == raw.charAt(0);
-        out.add(raw.substring(1));
-      }
-    }
-
-    return out;
-  }
-
-  /** @see PivotListEntry#VALUE */
-  public static Comparable getValue(NamedList<Object> pivotList) {
-    return (Comparable) PivotListEntry.VALUE.extract(pivotList);
-  }
-
-  /** @see PivotListEntry#FIELD */
-  public static String getField(NamedList<Object> pivotList) {
-    return (String) PivotListEntry.FIELD.extract(pivotList);
-  }
-  
-  /** @see PivotListEntry#COUNT */
-  public static Integer getCount(NamedList<Object> pivotList) {
-    return (Integer) PivotListEntry.COUNT.extract(pivotList);
-  }
-
-  /** @see PivotListEntry#PIVOT */
-  public static List<NamedList<Object>> getPivots(NamedList<Object> pivotList) {
-    return (List<NamedList<Object>>) PivotListEntry.PIVOT.extract(pivotList);
-  }
-  
-  /** @see PivotListEntry#STATS */
-  public static NamedList<NamedList<NamedList<?>>> getStats(NamedList<Object> pivotList) {
-    return (NamedList<NamedList<NamedList<?>>>) PivotListEntry.STATS.extract(pivotList);
-  }
-
-  /** @see PivotListEntry#QUERIES */
-  public static NamedList<Number> getQueryCounts(NamedList<Object> pivotList) {
-    return (NamedList<Number>) PivotListEntry.QUERIES.extract(pivotList);
-  }
-  
-  /** @see PivotListEntry#RANGES */
-  public static SimpleOrderedMap<SimpleOrderedMap<Object>> getRanges(NamedList<Object> pivotList) {
-    return (SimpleOrderedMap<SimpleOrderedMap<Object>>) PivotListEntry.RANGES.extract(pivotList);
-  }
-  
-  /**
-   * Given a mapping of keys to {@link StatsValues} representing the currently 
-   * known "merged" stats (which may be null if none exist yet), and a 
-   * {@link NamedList} containing the "stats" response block returned by an individual 
-   * shard, this method accumulates the stats for each {@link StatsField} found in
-   * the shard response with the existing mergeStats
-   *
-   * @return the original <code>merged</code> Map after modifying, or a new Map if the <code>merged</code> param was originally null.
-   * @see StatsInfo#getStatsField
-   * @see StatsValuesFactory#createStatsValues
-   * @see StatsValues#accumulate(NamedList)
-   */
-  public static Map<String,StatsValues> mergeStats
-    (Map<String,StatsValues> merged, 
-     NamedList<NamedList<NamedList<?>>> remoteWrapper, 
-     StatsInfo statsInfo) {
-
-    if (null == merged) merged = new LinkedHashMap<>();
-
-    NamedList<NamedList<?>> remoteStats = StatsComponent.unwrapStats(remoteWrapper);
-
-    for (Entry<String,NamedList<?>> entry : remoteStats) {
-      StatsValues receivingStatsValues = merged.get(entry.getKey());
-      if (receivingStatsValues == null) {
-        StatsField receivingStatsField = statsInfo.getStatsField(entry.getKey());
-        if (null == receivingStatsField) {
-          throw new SolrException(ErrorCode.SERVER_ERROR , "No stats.field found corresponding to pivot stats received from shard: "+entry.getKey());
-        }
-        receivingStatsValues = StatsValuesFactory.createStatsValues(receivingStatsField);
-        merged.put(entry.getKey(), receivingStatsValues);
-      }
-      receivingStatsValues.accumulate(entry.getValue());
-    }
-    return merged;
-  }
-
-  /**
-   * Merges query counts returned by a shard into global query counts.
-   * Entries found only in shard's query counts will be added to global counts.
-   * Entries found in both shard and global query counts will be summed.
-   *
-   * @param globalQueryCounts The global query counts (across all shards) in which to merge the shard query counts
-   * @param shardQueryCounts  Named list from a shard response to be merged into the global counts.
-   * @return NamedList containing merged values
-   */
-  static NamedList<Number> mergeQueryCounts(
-      NamedList<Number> globalQueryCounts, NamedList<Number> shardQueryCounts) {
-    if (globalQueryCounts == null) {
-      return shardQueryCounts;
-    }
-    for (Entry<String, Number> entry : shardQueryCounts) {
-      int idx = globalQueryCounts.indexOf(entry.getKey(), 0);
-      if (idx == -1) {
-        globalQueryCounts.add(entry.getKey(), entry.getValue());
-      } else {
-        globalQueryCounts.setVal(idx, FacetComponent.num(globalQueryCounts.getVal(idx).longValue() + entry.getValue().longValue()));
-      }
-    }
-    return globalQueryCounts;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PivotFacetProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetProcessor.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetProcessor.java
deleted file mode 100644
index 011d662..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetProcessor.java
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.solr.common.StringUtils;
-import org.apache.solr.common.params.RequiredSolrParams;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.DocSet;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.util.PivotListEntry;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.params.StatsParams;
-import org.apache.solr.request.SimpleFacets;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.lucene.search.Query;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Processes all Pivot facet logic for a single node -- both non-distrib, and per-shard
- */
-public class PivotFacetProcessor extends SimpleFacets
-{
-  public static final String QUERY = "query";
-  public static final String RANGE = "range";
-  protected SolrParams params;
-    
-  public PivotFacetProcessor(SolrQueryRequest req, DocSet docs, SolrParams params, ResponseBuilder rb) {
-    super(req, docs, params, rb);
-    this.params = params;
-  }
-  
-  /**
-   * Processes all of the specified {@link FacetParams#FACET_PIVOT} strings, generating 
-   * a complete response tree for each pivot.  The values in this response will either
-   * be the complete tree of fields and values for the specified pivot in the local index, 
-   * or the requested refinements if the pivot params include the {@link PivotFacet#REFINE_PARAM}
-   */
-  public SimpleOrderedMap<List<NamedList<Object>>> process(String[] pivots) throws IOException {
-    if (!rb.doFacets || pivots == null) 
-      return null;
-    
-    // rb._statsInfo may be null if stats=false, ie: refine requests
-    // if that's the case, but we need to refine w/stats, then we'll lazy init our 
-    // own instance of StatsInfo
-    StatsInfo statsInfo = rb._statsInfo; 
-
-    SimpleOrderedMap<List<NamedList<Object>>> pivotResponse = new SimpleOrderedMap<>();
-    for (String pivotList : pivots) {
-      final ParsedParams parsed;
-      
-      try {
-        parsed = this.parseParams(FacetParams.FACET_PIVOT, pivotList);
-      } catch (SyntaxError e) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, e);
-      }
-      List<String> pivotFields = StrUtils.splitSmart(parsed.facetValue, ",", true);
-      if( pivotFields.size() < 1 ) {
-        throw new SolrException( ErrorCode.BAD_REQUEST,
-                                 "Pivot Facet needs at least one field name: " + pivotList);
-      } else {
-        SolrIndexSearcher searcher = rb.req.getSearcher();
-        for (String fieldName : pivotFields) {
-          SchemaField sfield = searcher.getSchema().getField(fieldName);
-          if (sfield == null) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "\"" + fieldName + "\" is not a valid field name in pivot: " + pivotList);
-          }
-        }
-      } 
-
-      // start by assuming no local params...
-
-      String refineKey = null; // no local => no refinement
-      List<StatsField> statsFields = Collections.emptyList(); // no local => no stats
-      List<FacetComponent.FacetBase> facetQueries = Collections.emptyList();
-      List<RangeFacetRequest> facetRanges = Collections.emptyList();
-      if (null != parsed.localParams) {
-        // we might be refining..
-        refineKey = parsed.localParams.get(PivotFacet.REFINE_PARAM);
-        
-        String statsLocalParam = parsed.localParams.get(StatsParams.STATS);
-        if (null != refineKey
-            && null != statsLocalParam
-            && null == statsInfo) {
-          // we are refining and need to compute stats, 
-          // but stats component hasn't inited StatsInfo (because we
-          // don't need/want top level stats when refining) so we lazy init
-          // our own copy of StatsInfo
-          statsInfo = new StatsInfo(rb);
-        }
-        statsFields = getTaggedStatsFields(statsInfo, statsLocalParam);
-
-        try {
-          FacetComponent.FacetContext facetContext = FacetComponent.FacetContext.getFacetContext(req);
-
-          String taggedQueries = parsed.localParams.get(QUERY);
-          if (StringUtils.isEmpty(taggedQueries))  {
-            facetQueries = Collections.emptyList();
-          } else  {
-            List<String> localParamValue = StrUtils.splitSmart(taggedQueries, ',');
-            if (localParamValue.size() > 1) {
-              String msg = QUERY + " local param of " + FacetParams.FACET_PIVOT +
-                  "may not include tags separated by a comma - please use a common tag on all " +
-                  FacetParams.FACET_QUERY + " params you wish to compute under this pivot";
-              throw new SolrException(ErrorCode.BAD_REQUEST, msg);
-            }
-            taggedQueries = localParamValue.get(0);
-            facetQueries = facetContext.getQueryFacetsForTag(taggedQueries);
-          }
-
-          String taggedRanges = parsed.localParams.get(RANGE);
-          if (StringUtils.isEmpty(taggedRanges)) {
-            facetRanges = Collections.emptyList();
-          } else  {
-            List<String> localParamValue = StrUtils.splitSmart(taggedRanges, ',');
-            if (localParamValue.size() > 1) {
-              String msg = RANGE + " local param of " + FacetParams.FACET_PIVOT +
-                  "may not include tags separated by a comma - please use a common tag on all " +
-                  FacetParams.FACET_RANGE + " params you wish to compute under this pivot";
-              throw new SolrException(ErrorCode.BAD_REQUEST, msg);
-            }
-            taggedRanges = localParamValue.get(0);
-            facetRanges = facetContext.getRangeFacetRequestsForTag(taggedRanges);
-          }
-        } catch (IllegalStateException e) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Faceting context not set, cannot calculate pivot values");
-        }
-      }
-
-      if (null != refineKey) {
-        String[] refinementValuesByField 
-          = params.getParams(PivotFacet.REFINE_PARAM + refineKey);
-
-        for(String refinements : refinementValuesByField){
-          pivotResponse.addAll(processSingle(pivotFields, refinements, statsFields, parsed, facetQueries, facetRanges));
-        }
-      } else{
-        pivotResponse.addAll(processSingle(pivotFields, null, statsFields, parsed, facetQueries, facetRanges));
-      }
-    }
-    return pivotResponse;
-  }
-
-  /**
-   * Process a single branch of refinement values for a specific pivot
-   * @param pivotFields the ordered list of fields in this pivot
-   * @param refinements the comma separate list of refinement values corresponding to each field in the pivot, or null if there are no refinements
-   * @param statsFields List of {@link StatsField} instances to compute for each pivot value
-   * @param facetQueries the list of facet queries hung under this pivot
-   * @param facetRanges the list of facet ranges hung under this pivot
-   */
-  private SimpleOrderedMap<List<NamedList<Object>>> processSingle
-  (List<String> pivotFields,
-   String refinements,
-   List<StatsField> statsFields,
-   final ParsedParams parsed,
-   List<FacetComponent.FacetBase> facetQueries,
-   List<RangeFacetRequest> facetRanges) throws IOException {
-
-    SolrIndexSearcher searcher = rb.req.getSearcher();
-    SimpleOrderedMap<List<NamedList<Object>>> pivotResponse = new SimpleOrderedMap<>();
-
-    String field = pivotFields.get(0);
-    SchemaField sfield = searcher.getSchema().getField(field);
-      
-    Deque<String> fnames = new LinkedList<>();
-    for( int i = pivotFields.size()-1; i>1; i-- ) {
-      fnames.push( pivotFields.get(i) );
-    }
-    
-    NamedList<Integer> facetCounts;
-    Deque<String> vnames = new LinkedList<>();
-
-    if (null != refinements) {
-      // All values, split by the field they should go to
-      List<String> refinementValuesByField
-        = PivotFacetHelper.decodeRefinementValuePath(refinements);
-
-      for( int i=refinementValuesByField.size()-1; i>0; i-- ) {
-        vnames.push(refinementValuesByField.get(i));//Only for [1] and on
-      }
-
-      String firstFieldsValues = refinementValuesByField.get(0);
-
-      facetCounts = new NamedList<>();
-      facetCounts.add(firstFieldsValues,
-                      getSubsetSize(parsed.docs, sfield, firstFieldsValues));
-    } else {
-      // no refinements needed
-      facetCounts = this.getTermCountsForPivots(field, parsed);
-    }
-    
-    if(pivotFields.size() > 1) {
-      String subField = pivotFields.get(1);
-      pivotResponse.add(parsed.key,
-                        doPivots(facetCounts, field, subField, fnames, vnames, parsed, statsFields, facetQueries, facetRanges));
-    } else {
-      pivotResponse.add(parsed.key, doPivots(facetCounts, field, null, fnames, vnames, parsed, statsFields, facetQueries, facetRanges));
-    }
-    return pivotResponse;
-  }
-  
-  /**
-   * returns the {@link StatsField} instances that should be computed for a pivot
-   * based on the 'stats' local params used.
-   *
-   * @return A list of StatsFields to compute for this pivot, or the empty list if none
-   */
-  private static List<StatsField> getTaggedStatsFields(StatsInfo statsInfo, 
-                                                       String statsLocalParam) {
-    if (null == statsLocalParam || null == statsInfo) {
-      return Collections.emptyList();
-    }
-    
-    List<StatsField> fields = new ArrayList<>(7);
-    List<String> statsAr = StrUtils.splitSmart(statsLocalParam, ',');
-
-    // TODO: for now, we only support a single tag name - we reserve using 
-    // ',' as a possible delimiter for logic related to only computing stats
-    // at certain levels -- see SOLR-6663
-    if (1 < statsAr.size()) {
-      String msg = StatsParams.STATS + " local param of " + FacetParams.FACET_PIVOT + 
-        "may not include tags separated by a comma - please use a common tag on all " + 
-        StatsParams.STATS_FIELD + " params you wish to compute under this pivot";
-      throw new SolrException(ErrorCode.BAD_REQUEST, msg);
-    }
-
-    for(String stat : statsAr) {
-      fields.addAll(statsInfo.getStatsFieldsByTag(stat));
-    }
-    return fields;
-  }
-
-  /**
-   * Recursive function to compute all the pivot counts for the values under the specified field
-   */
-  protected List<NamedList<Object>> doPivots(NamedList<Integer> superFacets,
-                                             String field, String subField,
-                                             Deque<String> fnames, Deque<String> vnames,
-                                             ParsedParams parsed, List<StatsField> statsFields,
-                                             List<FacetComponent.FacetBase> facetQueries, List<RangeFacetRequest> facetRanges)
-      throws IOException {
-
-    boolean isShard = rb.req.getParams().getBool(ShardParams.IS_SHARD, false);
-
-    SolrIndexSearcher searcher = rb.req.getSearcher();
-    // TODO: optimize to avoid converting to an external string and then having to convert back to internal below
-    SchemaField sfield = searcher.getSchema().getField(field);
-    FieldType ftype = sfield.getType();
-
-    String nextField = fnames.poll();
-
-    // re-usable BytesRefBuilder for conversion of term values to Objects
-    BytesRefBuilder termval = new BytesRefBuilder(); 
-
-    List<NamedList<Object>> values = new ArrayList<>( superFacets.size() );
-    for (Map.Entry<String, Integer> kv : superFacets) {
-      // Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though
-      if (kv.getValue() >= getMinCountForField(field)) {  
-        final String fieldValue = kv.getKey();
-        final int pivotCount = kv.getValue();
-
-        SimpleOrderedMap<Object> pivot = new SimpleOrderedMap<>();
-        pivot.add( "field", field );
-        if (null == fieldValue) {
-          pivot.add( "value", null );
-        } else {
-          ftype.readableToIndexed(fieldValue, termval);
-          pivot.add( "value", ftype.toObject(sfield, termval.get()) );
-        }
-        pivot.add( "count", pivotCount );
-
-        final DocSet subset = getSubset(parsed.docs, sfield, fieldValue);
-        
-        addPivotQueriesAndRanges(pivot, params, subset, facetQueries, facetRanges);
-
-        if( subField != null )  {
-          NamedList<Integer> facetCounts;
-          if(!vnames.isEmpty()){
-            String val = vnames.pop();
-            facetCounts = new NamedList<>();
-            facetCounts.add(val, getSubsetSize(subset,
-                                               searcher.getSchema().getField(subField),
-                                               val));
-          } else {
-            facetCounts = this.getTermCountsForPivots(subField, parsed.withDocs(subset));
-          }
-
-          if (facetCounts.size() >= 1) {
-            pivot.add( "pivot", doPivots( facetCounts, subField, nextField, fnames, vnames, parsed.withDocs(subset), statsFields, facetQueries, facetRanges) );
-          }
-        }
-        if ((isShard || 0 < pivotCount) && ! statsFields.isEmpty()) {
-          Map<String, StatsValues> stv = new LinkedHashMap<>();
-          for (StatsField statsField : statsFields) {
-            stv.put(statsField.getOutputKey(), statsField.computeLocalStatsValues(subset));
-          }
-          pivot.add("stats", StatsComponent.convertToResponse(stv));
-        }
-        values.add( pivot );
-      }
-
-    }
-    // put the field back on the list
-    fnames.push( nextField );
-    return values;
-  }
-  
-  /**
-   * Given a base docset, computes the size of the subset of documents corresponding to the specified pivotValue
-   *
-   * @param base the set of documents to evaluate relative to
-   * @param field the field type used by the pivotValue
-   * @param pivotValue String representation of the value, may be null (ie: "missing")
-   */
-  private int getSubsetSize(DocSet base, SchemaField field, String pivotValue) throws IOException {
-    FieldType ft = field.getType();
-    if ( null == pivotValue ) {
-      Query query = ft.getRangeQuery(null, field, null, null, false, false);
-      DocSet hasVal = searcher.getDocSet(query);
-      return base.andNotSize(hasVal);
-    } else {
-      Query query = ft.getFieldQuery(null, field, pivotValue);
-      return searcher.numDocs(query, base);
-    }
-  }
-
-  /**
-   * Given a base docset, computes the subset of documents corresponding to the specified pivotValue
-   *
-   * @param base the set of documents to evaluate relative to
-   * @param field the field type used by the pivotValue
-   * @param pivotValue String representation of the value, may be null (ie: "missing")
-   */
-  private DocSet getSubset(DocSet base, SchemaField field, String pivotValue) throws IOException {
-    FieldType ft = field.getType();
-    if ( null == pivotValue ) {
-      Query query = ft.getRangeQuery(null, field, null, null, false, false);
-      DocSet hasVal = searcher.getDocSet(query);
-      return base.andNot(hasVal);
-    } else {
-      Query query = ft.getFieldQuery(null, field, pivotValue);
-      return searcher.getDocSet(query, base);
-    }
-  }
-
-  /**
-   * Add facet.queries and facet.ranges to the pivot response if needed
-   * 
-   * @param pivot
-   *          Pivot in which to inject additional data
-   * @param params
-   *          Query parameters.
-   * @param docs
-   *          DocSet of the current pivot to use for computing sub-counts
-   * @param facetQueries
-   *          Tagged facet queries should have to be included, must not be null
-   * @param facetRanges
-   *          Taged facet ranges should have to be included, must not be null
-   * @throws IOException
-   *           If searcher has issues finding numDocs.
-   */
-  protected void addPivotQueriesAndRanges(NamedList<Object> pivot, SolrParams params, DocSet docs,
-                                          List<FacetComponent.FacetBase> facetQueries,
-                                          List<RangeFacetRequest> facetRanges) throws IOException {
-    assert null != facetQueries;
-    assert null != facetRanges;
-    
-    if ( ! facetQueries.isEmpty()) {
-      SimpleFacets facets = new SimpleFacets(req, docs, params);
-      NamedList<Integer> res = new SimpleOrderedMap<>();
-      for (FacetComponent.FacetBase facetQuery : facetQueries) {
-        try {
-          ParsedParams parsed = getParsedParams(params, docs, facetQuery);
-          facets.getFacetQueryCount(parsed, res);
-        } catch (SyntaxError e) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-                                  "Invalid " + FacetParams.FACET_QUERY + " (" + facetQuery.facetStr +
-                                  ") cause: " + e.getMessage(), e);
-        }
-      }
-      pivot.add(PivotListEntry.QUERIES.getName(), res);
-    }
-    if ( ! facetRanges.isEmpty()) {
-      RangeFacetProcessor rangeFacetProcessor = new RangeFacetProcessor(req, docs, params, null);
-      NamedList<Object> resOuter = new SimpleOrderedMap<>();
-      for (RangeFacetRequest rangeFacet : facetRanges) {
-        try {
-          rangeFacetProcessor.getFacetRangeCounts(rangeFacet, resOuter);
-        } catch (SyntaxError e) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-                                  "Invalid " + FacetParams.FACET_RANGE + " (" + rangeFacet.facetStr +
-                                  ") cause: " + e.getMessage(), e);
-        }
-      }
-      pivot.add(PivotListEntry.RANGES.getName(), resOuter);
-    }
-  }
-
-  private ParsedParams getParsedParams(SolrParams params, DocSet docs, FacetComponent.FacetBase facet) {
-    SolrParams wrapped = SolrParams.wrapDefaults(facet.localParams, global);
-    SolrParams required = new RequiredSolrParams(params);
-    return new ParsedParams(facet.localParams, wrapped, required, facet.facetOn, docs, facet.getKey(), facet.getTags(), -1);
-  }
-
-  private int getMinCountForField(String fieldname){
-    return params.getFieldInt(fieldname, FacetParams.FACET_PIVOT_MINCOUNT, 1);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PivotFacetValue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetValue.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetValue.java
deleted file mode 100644
index 3280c6c..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetValue.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.BitSet;
-import java.util.Date;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.util.PivotListEntry;
-
-/**
- * Models a single (value, count) pair that will exist in the collection of values for a 
- * {@link PivotFacetField} parent.  This <code>PivotFacetValue</code> may itself have a 
- * nested {@link PivotFacetField} child
- *
- * @see PivotFacetField
- * @see PivotFacetFieldValueCollection
- */
-@SuppressWarnings("rawtypes")
-public class PivotFacetValue {
-    
-  private final BitSet sourceShards = new BitSet();
-  private final PivotFacetField parentPivot;
-  private final Comparable value;
-  // child can't be final, circular ref on construction
-  private PivotFacetField childPivot = null; 
-  private int count; // mutable
-  private Map<String, StatsValues> statsValues = null;
-  // named list with objects because depending on how big the counts are we may get either a long or an int
-  private NamedList<Number> queryCounts;
-  private LinkedHashMap<String, RangeFacetRequest.DistribRangeFacet> rangeCounts;
-
-  private PivotFacetValue(PivotFacetField parent, Comparable val) { 
-    this.parentPivot = parent;
-    this.value = val;
-  }
-
-  /** 
-   * The value of the asssocated field modeled by this <code>PivotFacetValue</code>. 
-   * May be null if this <code>PivotFacetValue</code> models the count for docs 
-   * "missing" the field value.
-   *
-   * @see FacetParams#FACET_MISSING
-   */
-  public Comparable getValue() { return value; }
-
-  /** The count corrisponding to the value modeled by this <code>PivotFacetValue</code> */
-  public int getCount() { return count; }
-
-  /** 
-   * The {@link PivotFacetField} corrisponding to the nested child pivot for this 
-   * <code>PivotFacetValue</code>. May be null if this object is the leaf of a pivot.
-   */
-  public PivotFacetField getChildPivot() { return childPivot; }
-
-
-  /** 
-   * A recursive method that walks up the tree of pivot fields/values to build 
-   * a list of the String representations of the values that lead down to this 
-   * PivotFacetValue.
-   *
-   * @return a mutable List of the pivot value Strings leading down to and including 
-   *      this pivot value, will never be null but may contain nulls
-   * @see PivotFacetField#getValuePath
-   */
-  public List<String> getValuePath() {
-    List<String> out = parentPivot.getValuePath();
-
-    // Note: this code doesn't play nice with custom FieldTypes -- see SOLR-6330
-
-    if (null == value) {
-      out.add(null);
-    } else if (value instanceof Date) {
-      out.add(((Date) value).toInstant().toString());
-    } else {
-      out.add(value.toString());
-    }
-    return out;
-  }
-
-  /**
-   * A recursive method to construct a new <code>PivotFacetValue</code> object from 
-   * the contents of the {@link NamedList} provided by the specified shard, relative 
-   * to the specified field.  
-   *
-   * If the <code>NamedList</code> contains data for a child {@link PivotFacetField} 
-   * that will be recursively built as well.
-   *
-   * @see PivotFacetField#createFromListOfNamedLists
-   * @param shardNumber the id of the shard that provided this data
-   * @param rb The response builder of the current request
-   * @param parentField the parent field in the current pivot associated with this value
-   * @param pivotData the data from the specified shard for this pivot value
-   */
-  @SuppressWarnings("unchecked")
-  public static PivotFacetValue createFromNamedList(int shardNumber, ResponseBuilder rb, PivotFacetField parentField, NamedList<Object> pivotData) {
-    
-    Comparable pivotVal = null;
-    int pivotCount = 0;
-    List<NamedList<Object>> childPivotData = null;
-    NamedList<NamedList<NamedList<?>>> statsValues = null;
-    NamedList<Number> queryCounts = null;
-    SimpleOrderedMap<SimpleOrderedMap<Object>> ranges = null;
-    
-    for (int i = 0; i < pivotData.size(); i++) {
-      String key = pivotData.getName(i);
-      Object value = pivotData.getVal(i);
-      PivotListEntry entry = PivotListEntry.get(key);
-      
-      switch (entry) {
-
-      case VALUE: 
-        pivotVal = (Comparable)value;
-        break;
-      case FIELD:
-        assert parentField.field.equals(value) 
-          : "Parent Field mismatch: " + parentField.field + "!=" + value;
-        break;
-      case COUNT:
-        pivotCount = (Integer)value;
-        break;
-      case PIVOT:
-        childPivotData = (List<NamedList<Object>>)value;
-        break;
-      case STATS:
-        statsValues = (NamedList<NamedList<NamedList<?>>>) value;
-        break;
-      case QUERIES:
-        queryCounts = (NamedList<Number>) value;
-        break;
-      case RANGES:
-        ranges = (SimpleOrderedMap<SimpleOrderedMap<Object>>) value;
-        break;
-      default:
-        throw new RuntimeException("PivotListEntry contains unaccounted for item: " + entry);
-      }
-    }    
-
-    PivotFacetValue newPivotFacet = new PivotFacetValue(parentField, pivotVal);
-    newPivotFacet.count = pivotCount;
-    newPivotFacet.sourceShards.set(shardNumber);
-    if(statsValues != null) {
-      newPivotFacet.statsValues = PivotFacetHelper.mergeStats(null, statsValues, rb._statsInfo);
-    }
-    if(queryCounts != null) {
-      newPivotFacet.queryCounts = PivotFacetHelper.mergeQueryCounts(null, queryCounts);
-    }
-    if(ranges != null) {
-      newPivotFacet.rangeCounts = new LinkedHashMap<>();
-      RangeFacetRequest.DistribRangeFacet.mergeFacetRangesFromShardResponse(newPivotFacet.rangeCounts, ranges);
-    }
-    
-    newPivotFacet.childPivot = PivotFacetField.createFromListOfNamedLists(shardNumber, rb, newPivotFacet, childPivotData);
-    
-    return newPivotFacet;
-  }
-
-  /** 
-   * A <b>NON-Recursive</b> method indicating if the specified shard has already
-   * contributed to the count for this value.
-   */
-  public boolean shardHasContributed(int shardNum) {
-    return sourceShards.get(shardNum);
-  }
-  
-  /** 
-   * A recursive method for generating a NamedList from this value suitable for 
-   * including in a pivot facet response to the original distributed request.
-   *
-   * @see PivotFacetField#convertToListOfNamedLists
-   */
-  public NamedList<Object> convertToNamedList() {
-    NamedList<Object> newList = new SimpleOrderedMap<>();
-    newList.add(PivotListEntry.FIELD.getName(), parentPivot.field);
-    newList.add(PivotListEntry.VALUE.getName(), value);    
-    newList.add(PivotListEntry.COUNT.getName(), count);      
-    if(queryCounts != null) {
-      newList.add(PivotListEntry.QUERIES.getName(), queryCounts);
-    }
-    if(rangeCounts != null) {
-      SimpleOrderedMap<SimpleOrderedMap<Object>> rangeFacetOutput = new SimpleOrderedMap<>();
-      for (Map.Entry<String, RangeFacetRequest.DistribRangeFacet> entry : rangeCounts.entrySet()) {
-        String key = entry.getKey();
-        RangeFacetRequest.DistribRangeFacet value = entry.getValue();
-        rangeFacetOutput.add(key, value.rangeFacet);
-      }
-      newList.add(PivotListEntry.RANGES.getName(), rangeFacetOutput);
-    }
-    if (childPivot != null && childPivot.convertToListOfNamedLists() != null) {
-      newList.add(PivotListEntry.PIVOT.getName(), childPivot.convertToListOfNamedLists());
-    }
-    if (null != statsValues) {
-      newList.add(PivotListEntry.STATS.getName(), 
-                  StatsComponent.convertToResponse(statsValues));
-    }
-    return newList;
-  }      
-  
-  /**
-   * Merges in the count contributions from the specified shard for each.
-   * This method is recursive if the shard data includes sub-pivots
-   *
-   * @see PivotFacetField#contributeFromShard
-   * @see PivotFacetField#createFromListOfNamedLists
-   */
-  public void mergeContributionFromShard(int shardNumber, ResponseBuilder rb, NamedList<Object> value) {
-    assert null != value : "can't merge in null data";
-    
-    if (!shardHasContributed(shardNumber)) {
-      sourceShards.set(shardNumber);      
-      count += PivotFacetHelper.getCount(value);
-      NamedList<NamedList<NamedList<?>>> stats = PivotFacetHelper.getStats(value);
-      if (stats != null) {
-        statsValues = PivotFacetHelper.mergeStats(statsValues, stats, rb._statsInfo);
-      }
-      NamedList<Number> shardQueryCounts = PivotFacetHelper.getQueryCounts(value);
-      if(shardQueryCounts != null) {
-        queryCounts = PivotFacetHelper.mergeQueryCounts(queryCounts, shardQueryCounts);
-      }
-      SimpleOrderedMap<SimpleOrderedMap<Object>> shardRanges = PivotFacetHelper.getRanges(value);
-      if (shardRanges != null)  {
-        if (rangeCounts == null)  {
-          rangeCounts = new LinkedHashMap<>(shardRanges.size() / 2);
-        }
-        RangeFacetRequest.DistribRangeFacet.mergeFacetRangesFromShardResponse(rangeCounts, shardRanges);
-      }
-    }
-    
-    List<NamedList<Object>> shardChildPivots = PivotFacetHelper.getPivots(value);
-    // sub pivot -- we may not have seen this yet depending on refinement
-    if (null == childPivot) {
-      childPivot = PivotFacetField.createFromListOfNamedLists(shardNumber, rb,  this,  shardChildPivots);
-    } else {
-      childPivot.contributeFromShard(shardNumber, rb, shardChildPivots);
-    }
-  }
-
-  public String toString(){
-    return String.format(Locale.ROOT, "F:%s V:%s Co:%d Ch?:%s", 
-                         parentPivot.field, value, count, (this.childPivot !=null));
-  }
-  
-}


[50/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index c04359c..7c5c062 100644
--- a/build.gradle
+++ b/build.gradle
@@ -7,8 +7,26 @@
 allprojects {
     repositories {
     	mavenCentral()
+        maven {
+            url "https://oss.sonatype.org/content/repositories/releases"
+        }
+        maven {
+            url "http://maven.restlet.org"
+        }
+        maven {
+            url "http://repository.cloudera.com/content/repositories/releases"
+        }
+        // chinese mirror
+        maven {
+            url "http://uk.maven.org/maven2"
+        }
         jcenter() 
     }
+    configurations {
+	    all {
+	        // exclude group: 'com.google.guava', module: 'guava'
+	    }
+	}
 }
 
 
@@ -33,6 +51,7 @@ def tika_version = '1.19.1'
 def bouncycastle_version = '1.60'
 def morfologik_version = '2.1.5'
 def codehaus_jackson_version = '1.9.13'
+def codehaus_janino_version = '2.7.6'
 def jetty_version = '9.4.11.v20180605'
 def vorbis_java_version = '0.8'
 def mortbay_jetty_version = '6.1.26'
@@ -47,15 +66,15 @@ ext.library = [
 	randomizedtesting_junit4: "com.carrotsearch.randomizedtesting:junit4-ant:$randomizedtesting_version",
 	randomizedtesting_runner: "com.carrotsearch.randomizedtesting:randomizedtesting-runner:$randomizedtesting_version",
 
-	hppc: "com.carrotsearch.hppc:0.8.1",
+	hppc: "com.carrotsearch:hppc:0.8.1",
 	langdetect: "com.cybozu.labs:langdetect:1.1-20120112",
 	metadata_extractor: "com.drewnoakes:metadata-extractor:2.11.0",
 	parso: "com.epam:parso:2.0.9",
 
-	jackson_annotations: "com.fasterxml.jackson.core:$jackson_version",
-	jackson_core: "com.fasterxml.jackson.core:$jackson_version",
-	jackson_databind: "com.fasterxml.jackson.core:$jackson_version",
-	jackson_dataformat_smile: "com.fasterxml.jackson.dataformat:$jackson_version",
+	jackson_annotations: "com.fasterxml.jackson.core:jackson-annotations:$jackson_version",
+	jackson_core: "com.fasterxml.jackson.core:jackson-core:$jackson_version",
+	jackson_databind: "com.fasterxml.jackson.core:jackson-databind:$jackson_version",
+	jackson_dataformat_smile: "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:$jackson_version",
 
 	caffeine: "com.github.ben-manes.caffeine:caffeine:2.4.0",
 	curvesapi: "com.github.virtuald:curvesapi:1.04",
@@ -230,9 +249,10 @@ ext.library = [
 
 	jackson_core_asl: "org.codehaus.jackson:jackson-core-asl:$codehaus_jackson_version",
 	jackson_mapper_asl: "org.codehaus.jackson:jackson-mapper-asl:$codehaus_jackson_version",
-	commons_compiler: "org.codehaus.janino:commons-compiler:$codehaus_jackson_version",
 
-	janino: "org.codehaus.janino:janino:2.7.6",
+	commons_compiler: "org.codehaus.janino:commons-compiler:$codehaus_janino_version",
+	janino: "org.codehaus.janino:janino:$codehaus_janino_version",
+
 	stax2_api: "org.codehaus.woodstox:stax2-api:3.1.4",
 	woodstox_core_asl: "org.codehaus.woodstox:woodstox-core-asl:4.4.1",
 
@@ -261,9 +281,9 @@ ext.library = [
 	spatial4j: "org.locationtech.spatial4j:spatial4j:0.7",
 	mockito_core: "org.mockito:mockito-core:2.6.2",
 
-	jetty: "org.mortbay.jetty:jetty:$mortbay_jetty_version",
-	jetty_sslengine: "org.mortbay.jetty:jetty-sslengine:$mortbay_jetty_version",
-	jetty_util: "org.mortbay.jetty:jetty-util:$mortbay_jetty_version",
+	mortbay_jetty: "org.mortbay.jetty:jetty:$mortbay_jetty_version",
+	mortbay_jetty_sslengine: "org.mortbay.jetty:jetty-sslengine:$mortbay_jetty_version",
+	mortbay_jetty_util: "org.mortbay.jetty:jetty-util:$mortbay_jetty_version",
 
 	noggit: "org.noggit:noggit:0.8",
 	objenesis: "org.objenesis:objenesis:2.5",

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/icu/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/build.gradle b/lucene/analysis/icu/build.gradle
index 0caeccf..498652f 100644
--- a/lucene/analysis/icu/build.gradle
+++ b/lucene/analysis/icu/build.gradle
@@ -1,7 +1,7 @@
 apply plugin: 'java'
 
 dependencies {
-	compile project(':lucene:core')
+	compile project(':lucene:analysis:common')
 	compile library.icu4j
     testCompile project(':lucene:codecs')
     testCompile project(':lucene:test-framework')

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/kuromoji/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/kuromoji/build.gradle b/lucene/analysis/kuromoji/build.gradle
index f3eca85..6afffe0 100644
--- a/lucene/analysis/kuromoji/build.gradle
+++ b/lucene/analysis/kuromoji/build.gradle
@@ -1 +1,20 @@
-apply plugin: 'java'
\ No newline at end of file
+apply plugin: 'java'
+
+// def urlFile = { url, name ->
+//     File file = new File("$buildDir/download/${name}.jar")
+//     file.parentFile.mkdirs()
+//     if (!file.exists()) {
+//         new URL(url).withInputStream { downloadStream ->
+//             file.withOutputStream { fileOut ->
+//                 fileOut << downloadStream
+//             }
+//         }
+//     }
+//     files(file.absolutePath)
+// }
+
+dependencies {
+	compile project(':lucene:analysis:common')
+
+	testCompile project(':lucene:test-framework')
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/morfologik/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/morfologik/build.gradle b/lucene/analysis/morfologik/build.gradle
index f3eca85..be7f295 100644
--- a/lucene/analysis/morfologik/build.gradle
+++ b/lucene/analysis/morfologik/build.gradle
@@ -1 +1,11 @@
-apply plugin: 'java'
\ No newline at end of file
+apply plugin: 'java'
+
+dependencies {
+	compile project(':lucene:analysis:common')
+	compile library.morfologik_polish
+	compile library.morfologik_fsa
+	compile library.morfologik_stemming
+	compile library.morfologik_ukrainian_search
+
+	testCompile project(':lucene:test-framework')
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/morfologik/ivy.xml
----------------------------------------------------------------------
diff --git a/lucene/analysis/morfologik/ivy.xml b/lucene/analysis/morfologik/ivy.xml
index f0cc234..01f8285 100644
--- a/lucene/analysis/morfologik/ivy.xml
+++ b/lucene/analysis/morfologik/ivy.xml
@@ -21,7 +21,7 @@
   <configurations defaultconfmapping="compile->master">
     <conf name="compile" transitive="false"/>
   </configurations>
-  <dependencies>
+  <dependencies> 
     <dependency org="org.carrot2" name="morfologik-polish" rev="${/org.carrot2/morfologik-polish}" conf="compile"/>
     <dependency org="org.carrot2" name="morfologik-fsa" rev="${/org.carrot2/morfologik-fsa}" conf="compile"/>
     <dependency org="org.carrot2" name="morfologik-stemming" rev="${/org.carrot2/morfologik-stemming}" conf="compile"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/nori/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/nori/build.gradle b/lucene/analysis/nori/build.gradle
index f3eca85..a12f611 100644
--- a/lucene/analysis/nori/build.gradle
+++ b/lucene/analysis/nori/build.gradle
@@ -1 +1,7 @@
-apply plugin: 'java'
\ No newline at end of file
+apply plugin: 'java'
+
+dependencies {
+	compile project(':lucene:analysis:common')
+
+	testCompile project(':lucene:test-framework')
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/opennlp/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/opennlp/build.gradle b/lucene/analysis/opennlp/build.gradle
index f3eca85..644f53f 100644
--- a/lucene/analysis/opennlp/build.gradle
+++ b/lucene/analysis/opennlp/build.gradle
@@ -1 +1,8 @@
-apply plugin: 'java'
\ No newline at end of file
+apply plugin: 'java'
+
+dependencies {
+	compile project(':lucene:analysis:common')
+	compile library.opennlp_tools
+
+	testCompile project(':lucene:test-framework')
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/phonetic/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/phonetic/build.gradle b/lucene/analysis/phonetic/build.gradle
index f3eca85..bed8eee 100644
--- a/lucene/analysis/phonetic/build.gradle
+++ b/lucene/analysis/phonetic/build.gradle
@@ -1 +1,8 @@
-apply plugin: 'java'
\ No newline at end of file
+apply plugin: 'java'
+
+dependencies {
+	compile project(':lucene:analysis:common')
+	compile library.commons_codec
+
+	testCompile project(':lucene:test-framework')
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/analysis/smartcn/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/analysis/smartcn/build.gradle b/lucene/analysis/smartcn/build.gradle
index ff0b040..c61e974 100644
--- a/lucene/analysis/smartcn/build.gradle
+++ b/lucene/analysis/smartcn/build.gradle
@@ -3,4 +3,6 @@ apply plugin: 'java'
 dependencies {
 	compile project(':lucene:analysis:common')
     testCompile project(':lucene:test-framework')
+
+    testCompile project(':lucene:test-framework')
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/backward-codecs/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/build.gradle b/lucene/backward-codecs/build.gradle
index cd0844a..af193c7 100644
--- a/lucene/backward-codecs/build.gradle
+++ b/lucene/backward-codecs/build.gradle
@@ -1,5 +1,18 @@
 apply plugin: 'java'
 
+task jarTest (type: Jar) {
+    from sourceSets.test.output
+    classifier = 'test'
+}
+
+configurations {
+    testOutput
+}
+
+artifacts {
+    testOutput jarTest
+}
+
 dependencies {
     compile project(':lucene:core')
     testCompile project(':lucene:test-framework')

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/demo/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/demo/build.gradle b/lucene/demo/build.gradle
index 802b841..9fbbca0 100644
--- a/lucene/demo/build.gradle
+++ b/lucene/demo/build.gradle
@@ -8,4 +8,6 @@ dependencies {
     compile project(':lucene:analysis:common')
     compile project(':lucene:queryparser')
     compile project(':lucene:expressions')
+
+    testCompile project(':lucene:test-framework')
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/facet/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/facet/build.gradle b/lucene/facet/build.gradle
index 31bddca..08a06c3 100644
--- a/lucene/facet/build.gradle
+++ b/lucene/facet/build.gradle
@@ -4,5 +4,7 @@ apply plugin: 'java'
 dependencies {
     compile project(':lucene:core')
     compile 'com.carrotsearch:hppc:0.8.1'
+
     testCompile project(':lucene:test-framework')
+    testCompile project(':lucene:queries')
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/queryparser/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/queryparser/build.gradle b/lucene/queryparser/build.gradle
index 98b56c7..b57d570 100644
--- a/lucene/queryparser/build.gradle
+++ b/lucene/queryparser/build.gradle
@@ -1,5 +1,18 @@
 apply plugin: 'java'
 
+task jarTest (type: Jar) {
+    from sourceSets.test.output
+    classifier = 'test'
+}
+
+configurations {
+    testOutput
+}
+
+artifacts {
+    testOutput jarTest
+}
+
 dependencies {
     compile project(':lucene:core')
     compile project(':lucene:queries')

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/replicator/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/replicator/build.gradle b/lucene/replicator/build.gradle
index 36e57b2..cf5d094 100644
--- a/lucene/replicator/build.gradle
+++ b/lucene/replicator/build.gradle
@@ -1,8 +1,17 @@
 apply plugin: 'java'
 
 dependencies {
-    // compile project(':lucene:core')
-    // compile project(':lucene:queries')
-    // compile project(':lucene:sandbox')
-    // testCompile project(':lucene:test-framework')
+    compile project(':lucene:core')
+    compile project(':lucene:facet')
+    compile library.httpclient
+    compile library.httpcore
+    compile library.jetty_server
+    compile library.javax_servlet_api
+    compile library.jetty_servlet
+    compile library.jetty_util
+    compile library.jetty_io
+    compile library.jetty_continuation
+    compile library.jetty_http
+    
+    testCompile project(':lucene:test-framework')
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/spatial-extras/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/build.gradle b/lucene/spatial-extras/build.gradle
index 36e57b2..fdd4520 100644
--- a/lucene/spatial-extras/build.gradle
+++ b/lucene/spatial-extras/build.gradle
@@ -1,8 +1,14 @@
 apply plugin: 'java'
 
 dependencies {
-    // compile project(':lucene:core')
-    // compile project(':lucene:queries')
-    // compile project(':lucene:sandbox')
-    // testCompile project(':lucene:test-framework')
+    compile project(':lucene:spatial')
+    compile project(':lucene:spatial3d')
+    compile library.spatial4j
+    compile library.s2_geometry_library_java
+
+    testCompile project(':lucene:test-framework')    
+    testCompile library.jts_core 
+    testCompile library.slf4j_api
+    testCompile "$library.spatial4j:tests"
+    testCompile project(path: ':lucene:spatial3d', configuration: 'testOutput')
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/spatial/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/spatial/build.gradle b/lucene/spatial/build.gradle
index cd0844a..2ca6a3e 100644
--- a/lucene/spatial/build.gradle
+++ b/lucene/spatial/build.gradle
@@ -1,6 +1,6 @@
 apply plugin: 'java'
 
 dependencies {
-    compile project(':lucene:core')
+    compile project(':lucene:core')   
     testCompile project(':lucene:test-framework')
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/lucene/spatial3d/build.gradle
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/build.gradle b/lucene/spatial3d/build.gradle
index cd0844a..af193c7 100644
--- a/lucene/spatial3d/build.gradle
+++ b/lucene/spatial3d/build.gradle
@@ -1,5 +1,18 @@
 apply plugin: 'java'
 
+task jarTest (type: Jar) {
+    from sourceSets.test.output
+    classifier = 'test'
+}
+
+configurations {
+    testOutput
+}
+
+artifacts {
+    testOutput jarTest
+}
+
 dependencies {
     compile project(':lucene:core')
     testCompile project(':lucene:test-framework')

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/settings.gradle
----------------------------------------------------------------------
diff --git a/settings.gradle b/settings.gradle
index 2ffc67e..5016b7a 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -41,5 +41,7 @@ include 'lucene:spatial3d'
 include 'lucene:suggest'
 include 'lucene:test-framework'
 
-
-include 'solr'
\ No newline at end of file
+include 'solr:core'
+include 'solr:server'
+include 'solr:solrj'
+include 'solr:test-framework'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/build.gradle
----------------------------------------------------------------------
diff --git a/solr/core/build.gradle b/solr/core/build.gradle
new file mode 100644
index 0000000..8be91c3
--- /dev/null
+++ b/solr/core/build.gradle
@@ -0,0 +1,140 @@
+apply plugin: 'java'
+
+
+task printClasspath {
+    doLast {
+        configurations.testRuntime.each { println it }
+    }
+}
+
+dependencies {
+	compile project(':solr:server')
+	compile project(':solr:solrj')
+	compile project(':lucene:analysis:common')
+	compile project(':lucene:suggest')
+	compile project(':lucene:queries')
+	compile project(':lucene:highlighter')
+	compile project(':lucene:spatial-extras')
+	compile project(':lucene:grouping')
+	compile project(':lucene:classification')
+	compile project(':lucene:expressions')
+	compile project(':lucene:sandbox')
+	compile project(':lucene:queryparser')
+	compile project(':lucene:misc')
+	compile project(':lucene:join')
+	compile project(':lucene:codecs')
+	compile project(':lucene:backward-codecs')
+
+	compile library.commons_codec
+	compile library.commons_io
+	compile library.commons_exec
+	compile library.commons_fileupload
+	compile library.commons_cli
+	compile library.commons_lang
+	compile (library.guava) {
+		force = true
+	}
+	compile library.spatial4j
+	compile library.antlr4_runtime
+	compile library.commons_math3
+	compile library.asm
+	compile library.asm_commons
+	compile library.org_restlet
+	compile library.org_restlet_ext_servlet
+	compile library.dom4j
+	compile library.hppc
+
+	compile library.log4j_api
+	compile library.log4j_core
+	compile library.log4j_slf4j_impl	
+	compile library.log4j_1_2_api
+	compile library.disruptor
+	compile library.jcl_over_slf4j
+
+	compile library.jackson_core
+	compile library.jackson_databind
+	compile library.jackson_annotations
+	compile library.jackson_dataformat_smile
+
+	compile (library.curator_framework)
+	compile (library.curator_client)
+	compile (library.curator_recipes)
+	compile library.t_digest
+
+	compile (library.calcite_core)
+	compile (library.calcite_linq4j)
+	compile library.avatica_core
+	compile library.commons_lang3
+	compile library.eigenbase_properties
+	compile library.janino
+	compile library.commons_compiler
+	compile library.protobuf_java
+	compile library.rrd4j
+
+	//compile.hadoop
+	compile (library.hadoop_common)
+	compile (library.hadoop_hdfs)
+	compile library.hadoop_annotations	
+	compile library.hadoop_auth
+	compile library.commons_configuration
+	compile library.commons_collections
+	compile library.caffeine
+	compile library.htrace_core
+
+	testCompile library.mockito_core
+	testCompile library.byte_buddy
+	testCompile library.objenesis
+
+	//test.DfsMiniCluster
+	//TODO datcm verify this
+	testCompile "$library.hadoop_common:tests"
+	testCompile "$library.hadoop_hdfs:tests"
+	testCompile library.mortbay_jetty
+	testCompile library.mortbay_jetty_util
+	testCompile library.mortbay_jetty_sslengine
+	testCompile library.jersey_core
+	testCompile library.jersey_server
+	testCompile library.netty_all
+	testCompile library.jackson_core_asl
+	testCompile library.jackson_mapper_asl
+
+	//test.MiniKdc
+	testCompile library.hadoop_minikdc
+	testCompile library.apacheds_core
+	testCompile library.apacheds_core_api
+	testCompile library.apacheds_core_avl
+	testCompile library.apacheds_core_shared
+	testCompile library.apacheds_interceptor_kerberos
+	testCompile library.apacheds_interceptors_admin
+	testCompile library.apacheds_interceptors_authn
+	testCompile library.apacheds_interceptors_authz
+	testCompile library.apacheds_interceptors_changelog
+	testCompile library.apacheds_interceptors_collective
+	testCompile library.apacheds_interceptors_event
+	testCompile library.apacheds_interceptors_exception
+	testCompile library.apacheds_interceptors_journal
+	testCompile library.apacheds_interceptors_normalization
+	testCompile library.apacheds_interceptors_operational
+	testCompile library.apacheds_interceptors_referral
+	testCompile library.apacheds_interceptors_schema
+	testCompile library.apacheds_interceptors_subtree
+	testCompile library.apacheds_interceptors_trigger
+	testCompile library.apacheds_i18n
+	testCompile library.apacheds_jdbm_partition
+	testCompile library.apacheds_kerberos_codec
+	testCompile library.apacheds_ldif_partition
+	testCompile library.apacheds_mavibot_partition
+	testCompile library.apacheds_protocol_kerberos
+	testCompile library.apacheds_protocol_ldap
+	testCompile library.apacheds_protocol_shared
+	testCompile library.apacheds_xdbm_partition
+	testCompile library.api_all
+	testCompile library.apacheds_jdbm1
+	testCompile library.mina_core
+	testCompile library.bcprov_jdk15on
+	testCompile library.antlr
+	testCompile library.ehcache_core
+	testCompile project(':solr:test-framework')
+	testCompile project(path: ':lucene:backward-codecs', configuration: 'testOutput')
+	testCompile project(path: ':lucene:queryparser', configuration: 'testOutput')
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java b/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
deleted file mode 100644
index 37fd95b..0000000
--- a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.analysis;
-import java.io.IOException;
-
-import org.apache.lucene.analysis.TokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-
-/**
- * This class produces a special form of reversed tokens, suitable for
- * better handling of leading wildcards. Tokens from the input TokenStream
- * are reversed and prepended with a special "reversed" marker character.
- * If <code>withOriginal</code> argument is <code>true</code> then first the
- * original token is returned, and then the reversed token (with
- * <code>positionIncrement == 0</code>) is returned. Otherwise only reversed
- * tokens are returned.
- * <p>Note: this filter doubles the number of tokens in the input stream when
- * <code>withOriginal == true</code>, which proportionally increases the size
- * of postings and term dictionary in the index.
- */
-public final class ReversedWildcardFilter extends TokenFilter {
-  
-  private final boolean withOriginal;
-  private final char markerChar;
-  private final CharTermAttribute termAtt;
-  private final PositionIncrementAttribute posAtt;
-  private State save = null;
-
-  protected ReversedWildcardFilter(TokenStream input, boolean withOriginal, char markerChar) {
-    super(input);
-    this.termAtt = addAttribute(CharTermAttribute.class);
-    this.posAtt = addAttribute(PositionIncrementAttribute.class);
-    this.withOriginal = withOriginal;
-    this.markerChar = markerChar;
-  }
-
-  @Override
-  public boolean incrementToken() throws IOException {
-    if( save != null ) {
-      // clearAttributes();  // not currently necessary
-      restoreState(save);
-      save = null;
-      return true;
-    }
-
-    if (!input.incrementToken()) return false;
-
-    // pass through zero-length terms
-    int oldLen = termAtt.length();
-    if (oldLen ==0) return true;
-    int origOffset = posAtt.getPositionIncrement();
-    if (withOriginal == true){
-      posAtt.setPositionIncrement(0);
-      save = captureState();
-    }
-    char [] buffer = termAtt.resizeBuffer(oldLen + 1);
-    buffer[oldLen] = markerChar;
-    reverse(buffer, 0, oldLen + 1);
-
-    posAtt.setPositionIncrement(origOffset);
-    termAtt.copyBuffer(buffer, 0, oldLen +1);
-    return true;
-  }
-  
-
-  /**
-   * Partially reverses the given input buffer in-place from the given offset
-   * up to the given length, keeping surrogate pairs in the correct (non-reversed) order.
-   * @param buffer the input char array to reverse
-   * @param start the offset from where to reverse the buffer
-   * @param len the length in the buffer up to where the
-   *        buffer should be reversed
-   */
-  public static void reverse(final char[] buffer, final int start, final int len) {
-    /* modified version of Apache Harmony AbstractStringBuilder reverse0() */
-    if (len < 2)
-      return;
-    int end = (start + len) - 1;
-    char frontHigh = buffer[start];
-    char endLow = buffer[end];
-    boolean allowFrontSur = true, allowEndSur = true;
-    final int mid = start + (len >> 1);
-    for (int i = start; i < mid; ++i, --end) {
-      final char frontLow = buffer[i + 1];
-      final char endHigh = buffer[end - 1];
-      final boolean surAtFront = allowFrontSur
-          && Character.isSurrogatePair(frontHigh, frontLow);
-      if (surAtFront && (len < 3)) {
-        // nothing to do since surAtFront is allowed and 1 char left
-        return;
-      }
-      final boolean surAtEnd = allowEndSur
-          && Character.isSurrogatePair(endHigh, endLow);
-      allowFrontSur = allowEndSur = true;
-      if (surAtFront == surAtEnd) {
-        if (surAtFront) {
-          // both surrogates
-          buffer[end] = frontLow;
-          buffer[--end] = frontHigh;
-          buffer[i] = endHigh;
-          buffer[++i] = endLow;
-          frontHigh = buffer[i + 1];
-          endLow = buffer[end - 1];
-        } else {
-          // neither surrogates
-          buffer[end] = frontHigh;
-          buffer[i] = endLow;
-          frontHigh = frontLow;
-          endLow = endHigh;
-        }
-      } else {
-        if (surAtFront) {
-          // surrogate only at the front
-          buffer[end] = frontLow;
-          buffer[i] = endLow;
-          endLow = endHigh;
-          allowFrontSur = false;
-        } else {
-          // surrogate only at the end
-          buffer[end] = frontHigh;
-          buffer[i] = endHigh;
-          frontHigh = frontLow;
-          allowEndSur = false;
-        }
-      }
-    }
-    if ((len & 0x01) == 1 && !(allowFrontSur && allowEndSur)) {
-      // only if odd length
-      buffer[end] = allowFrontSur ? endLow : frontHigh;
-    }
-  }
-  
-  @Override
-  public void reset() throws IOException {
-    super.reset();
-    save = null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java b/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java
deleted file mode 100644
index e64957f..0000000
--- a/solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.analysis;
-import java.util.Map;
-
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.reverse.ReverseStringFilter;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-
-/**
- * Factory for {@link ReversedWildcardFilter}-s. When this factory is
- * added to an analysis chain, it will be used both for filtering the
- * tokens during indexing, and to determine the query processing of
- * this field during search.
- * <p>This class supports the following init arguments:
- * <ul>
- * <li><code>withOriginal</code> - if true, then produce both original and reversed tokens at
- * the same positions. If false, then produce only reversed tokens.</li>
- * <li><code>maxPosAsterisk</code> - maximum position (1-based) of the asterisk wildcard
- * ('*') that triggers the reversal of query term. Asterisk that occurs at
- * positions higher than this value will not cause the reversal of query term.
- * Defaults to 2, meaning that asterisks on positions 1 and 2 will cause
- * a reversal.</li>
- * <li><code>maxPosQuestion</code> - maximum position (1-based) of the question
- * mark wildcard ('?') that triggers the reversal of query term. Defaults to 1.
- * Set this to 0, and <code>maxPosAsterisk</code> to 1 to reverse only
- * pure suffix queries (i.e. ones with a single leading asterisk).</li>
- * <li><code>maxFractionAsterisk</code> - additional parameter that
- * triggers the reversal if asterisk ('*') position is less than this
- * fraction of the query token length. Defaults to 0.0f (disabled).</li>
- * <li><code>minTrailing</code> - minimum number of trailing characters in query
- * token after the last wildcard character. For good performance this should be
- * set to a value larger than 1. Defaults to 2.
- * </ul>
- * Note 1: This filter always reverses input tokens during indexing.
- * Note 2: Query tokens without wildcard characters will never be reversed.
- * <pre class="prettyprint" >
- * &lt;fieldType name="text_rvswc" class="solr.TextField" positionIncrementGap="100"&gt;
- *   &lt;analyzer type="index"&gt;
- *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
- *     &lt;filter class="solr.ReversedWildcardFilterFactory" withOriginal="true"
- *             maxPosAsterisk="2" maxPosQuestion="1" minTrailing="2" maxFractionAsterisk="0"/&gt;
- *   &lt;/analyzer&gt;
- *   &lt;analyzer type="query"&gt;
- *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
- *   &lt;/analyzer&gt;
- * &lt;/fieldType&gt;</pre>
- *
- */
-public class ReversedWildcardFilterFactory extends TokenFilterFactory {
-  
-  private char markerChar = ReverseStringFilter.START_OF_HEADING_MARKER;
-  private boolean withOriginal;
-  private int maxPosAsterisk;
-  private int maxPosQuestion;
-  private int minTrailing;
-  private float maxFractionAsterisk;
-
-  /** Creates a new ReversedWildcardFilterFactory */
-  public ReversedWildcardFilterFactory(Map<String,String> args) {
-    super(args);
-    withOriginal = getBoolean(args, "withOriginal", true);
-    maxPosAsterisk = getInt(args, "maxPosAsterisk", 2);
-    maxPosQuestion = getInt(args, "maxPosQuestion", 1);
-    minTrailing = getInt(args, "minTrailing", 2);
-    maxFractionAsterisk = getFloat(args, "maxFractionAsterisk", 0.0f);
-    if (!args.isEmpty()) {
-      throw new IllegalArgumentException("Unknown parameters: " + args);
-    }
-  }
-
-
-  @Override
-  public TokenStream create(TokenStream input) {
-    return new ReversedWildcardFilter(input, withOriginal, markerChar);
-  }
-  
-  /**
-   * This method encapsulates the logic that determines whether
-   * a query token should be reversed in order to use the
-   * reversed terms in the index.
-   * @param token input token.
-   * @return true if input token should be reversed, false otherwise.
-   */
-  public boolean shouldReverse(String token) {
-    int posQ = token.indexOf('?');
-    int posA = token.indexOf('*');
-    if (posQ == -1 && posA == -1) { // not a wildcard query
-      return false;
-    }
-    int pos;
-    int lastPos;
-    int len = token.length();
-    lastPos = token.lastIndexOf('?');
-    pos = token.lastIndexOf('*');
-    if (pos > lastPos) lastPos = pos;
-    if (posQ != -1) {
-      pos = posQ;
-      if (posA != -1) {
-        pos = Math.min(posQ, posA);
-      }
-    } else {
-      pos = posA;
-    }
-    if (len - lastPos < minTrailing)  { // too few trailing chars
-      return false;
-    }
-    if (posQ != -1 && posQ < maxPosQuestion) {  // leading '?'
-      return true;
-    }
-    if (posA != -1 && posA < maxPosAsterisk) { // leading '*'
-      return true;
-    }
-    // '*' in the leading part
-    if (maxFractionAsterisk > 0.0f && pos < (float)token.length() * maxFractionAsterisk) {
-      return true;
-    }
-    return false;
-  }
-  
-  public char getMarkerChar() {
-    return markerChar;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java b/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java
deleted file mode 100644
index 38d0d17..0000000
--- a/solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.analysis;
-
-import org.apache.lucene.analysis.Analyzer;
-
-import java.io.Reader;
-
-/**
- *
- */
-public abstract class SolrAnalyzer extends Analyzer {
-  int posIncGap = 0;
-
-  public void setPositionIncrementGap(int gap) {
-    posIncGap = gap;
-  }
-
-  @Override
-  public int getPositionIncrementGap(String fieldName) {
-    return posIncGap;
-  }
-
-  @Override
-  protected Reader initReader(String fieldName, Reader reader) {
-    return reader;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java b/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java
deleted file mode 100644
index af7e812..0000000
--- a/solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.analysis;
-
-import java.io.Reader;
-
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.MultiTermAwareComponent;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.lucene.analysis.util.TokenizerFactory;
-
-/**
- * An analyzer that uses a tokenizer and a list of token filters to
- * create a TokenStream.
- */
-public final class TokenizerChain extends SolrAnalyzer {
-  private static final CharFilterFactory[] EMPTY_CHAR_FITLERS = new CharFilterFactory[0];
-  private static final TokenFilterFactory[] EMPTY_TOKEN_FITLERS = new TokenFilterFactory[0];
-  
-  final private CharFilterFactory[] charFilters;
-  final private TokenizerFactory tokenizer;
-  final private TokenFilterFactory[] filters;
-
-  /** 
-   * Creates a new TokenizerChain w/o any CharFilterFactories.
-   *
-   * @param tokenizer Factory for the Tokenizer to use, must not be null.
-   * @param filters Factories for the TokenFilters to use - if null, will be treated as if empty.
-   */
-  public TokenizerChain(TokenizerFactory tokenizer, TokenFilterFactory[] filters) {
-    this(null,tokenizer,filters);
-  }
-
-  /** 
-   * Creates a new TokenizerChain.
-   *
-   * @param charFilters Factories for the CharFilters to use, if any - if null, will be treated as if empty.
-   * @param tokenizer Factory for the Tokenizer to use, must not be null.
-   * @param filters Factories for the TokenFilters to use if any- if null, will be treated as if empty.
-   */
-  public TokenizerChain(CharFilterFactory[] charFilters, TokenizerFactory tokenizer, TokenFilterFactory[] filters) {
-    charFilters = null == charFilters ? EMPTY_CHAR_FITLERS : charFilters;
-    filters = null == filters ? EMPTY_TOKEN_FITLERS : filters;
-    if (null == tokenizer) {
-      throw new NullPointerException("TokenizerFactory must not be null");
-    }
-    
-    this.charFilters = charFilters;
-    this.tokenizer = tokenizer;
-    this.filters = filters;
-  }
-
-  /** @return array of CharFilterFactories, may be empty but never null */
-  public CharFilterFactory[] getCharFilterFactories() { return charFilters; }
-  /** @return the TokenizerFactory in use, will never be null */
-  public TokenizerFactory getTokenizerFactory() { return tokenizer; }
-  /** @return array of TokenFilterFactories, may be empty but never null */
-  public TokenFilterFactory[] getTokenFilterFactories() { return filters; }
-
-  @Override
-  public Reader initReader(String fieldName, Reader reader) {
-    if (charFilters != null && charFilters.length > 0) {
-      Reader cs = reader;
-      for (CharFilterFactory charFilter : charFilters) {
-        cs = charFilter.create(cs);
-      }
-      reader = cs;
-    }
-    return reader;
-  }
-
-  @Override
-  protected Reader initReaderForNormalization(String fieldName, Reader reader) {
-    if (charFilters != null && charFilters.length > 0) {
-      for (CharFilterFactory charFilter : charFilters) {
-        if (charFilter instanceof MultiTermAwareComponent) {
-          charFilter = (CharFilterFactory) ((MultiTermAwareComponent) charFilter).getMultiTermComponent();
-          reader = charFilter.create(reader);
-        }
-      }
-    }
-    return reader;
-  }
-
-  @Override
-  protected TokenStreamComponents createComponents(String fieldName) {
-    Tokenizer tk = tokenizer.create(attributeFactory(fieldName));
-    TokenStream ts = tk;
-    for (TokenFilterFactory filter : filters) {
-      ts = filter.create(ts);
-    }
-    return new TokenStreamComponents(tk, ts);
-  }
-
-  @Override
-  protected TokenStream normalize(String fieldName, TokenStream in) {
-    TokenStream result = in;
-    for (TokenFilterFactory filter : filters) {
-      if (filter instanceof MultiTermAwareComponent) {
-        filter = (TokenFilterFactory) ((MultiTermAwareComponent) filter).getMultiTermComponent();
-        result = filter.create(result);
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("TokenizerChain(");
-    for (CharFilterFactory filter: charFilters) {
-      sb.append(filter);
-      sb.append(", ");
-    }
-    sb.append(tokenizer);
-    for (TokenFilterFactory filter: filters) {
-      sb.append(", ");
-      sb.append(filter);
-    }
-    sb.append(')');
-    return sb.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/analysis/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/analysis/package-info.java b/solr/core/src/java/org/apache/solr/analysis/package-info.java
deleted file mode 100644
index e8a48bf..0000000
--- a/solr/core/src/java/org/apache/solr/analysis/package-info.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Factories and classes specific to text analysis and the creation of {@link org.apache.lucene.analysis.TokenStream}s
- * <p>
- * See {@link org.apache.lucene.analysis} for additional details.
- */
-package org.apache.solr.analysis;
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/api/Api.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/api/Api.java b/solr/core/src/java/org/apache/solr/api/Api.java
deleted file mode 100644
index d2c468c..0000000
--- a/solr/core/src/java/org/apache/solr/api/Api.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.api;
-
-import java.util.Map;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.common.SpecProvider;
-import org.apache.solr.common.util.ValidatingJsonMap;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.common.util.JsonSchemaValidator;
-
-/** Every version 2 API must extend the this class. It's mostly like a request handler
- * but it has extra methods to provide the json schema of the end point
- *
- */
-public abstract class Api implements SpecProvider {
-  protected SpecProvider spec;
-  protected volatile Map<String, JsonSchemaValidator> commandSchema;
-
-  protected Api(SpecProvider spec) {
-    this.spec = spec;
-  }
-
-  /**This method helps to cache the schema validator object
-   */
-  public Map<String, JsonSchemaValidator> getCommandSchema() {
-    if (commandSchema == null) {
-      synchronized (this) {
-        if(commandSchema == null) {
-          ValidatingJsonMap commands = getSpec().getMap("commands", null);
-          commandSchema = commands != null ?
-              ImmutableMap.copyOf(ApiBag.getParsedSchema(commands)) :
-              ImmutableMap.of();
-        }
-      }
-    }
-    return commandSchema;
-  }
-
-  /** The method that gets called for each request
-   */
-  public abstract void call(SolrQueryRequest req , SolrQueryResponse rsp);
-
-  /**Get the specification of the API as a Map
-   */
-  @Override
-  public ValidatingJsonMap getSpec() {
-    return spec.getSpec();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/api/ApiBag.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/api/ApiBag.java b/solr/core/src/java/org/apache/solr/api/ApiBag.java
deleted file mode 100644
index 8a3f972..0000000
--- a/solr/core/src/java/org/apache/solr/api/ApiBag.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.api;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SpecProvider;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.JsonSchemaValidator;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.PathTrie;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.common.util.ValidatingJsonMap;
-import org.apache.solr.core.PluginBag;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.SolrRequest.SUPPORTED_METHODS;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.StrUtils.formatString;
-import static org.apache.solr.common.util.ValidatingJsonMap.ENUM_OF;
-import static org.apache.solr.common.util.ValidatingJsonMap.NOT_NULL;
-
-public class ApiBag {
-  private final boolean isCoreSpecific;
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final Map<String, PathTrie<Api>> apis = new ConcurrentHashMap<>();
-
-  public ApiBag(boolean isCoreSpecific) {
-    this.isCoreSpecific = isCoreSpecific;
-  }
-
-  public synchronized void register(Api api, Map<String, String> nameSubstitutes) {
-    try {
-      validateAndRegister(api, nameSubstitutes);
-    } catch (Exception e) {
-      log.error("Unable to register plugin:" + api.getClass().getName() + "with spec :" + Utils.toJSONString(api.getSpec()), e);
-      if (e instanceof RuntimeException) {
-        throw (RuntimeException) e;
-      } else {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-      }
-
-    }
-  }
-
-  private void validateAndRegister(Api api, Map<String, String> nameSubstitutes) {
-    ValidatingJsonMap spec = api.getSpec();
-    Api introspect = new IntrospectApi(api, isCoreSpecific);
-    List<String> methods = spec.getList("methods", ENUM_OF, SUPPORTED_METHODS);
-    for (String method : methods) {
-      PathTrie<Api> registry = apis.get(method);
-
-      if (registry == null) apis.put(method, registry = new PathTrie<>(ImmutableSet.of("_introspect")));
-      ValidatingJsonMap url = spec.getMap("url", NOT_NULL);
-      ValidatingJsonMap params = url.getMap("params", null);
-      if (params != null) {
-        for (Object o : params.keySet()) {
-          ValidatingJsonMap param = params.getMap(o.toString(), NOT_NULL);
-          param.get("type", ENUM_OF, KNOWN_TYPES);
-        }
-      }
-      List<String> paths = url.getList("paths", NOT_NULL);
-      ValidatingJsonMap parts = url.getMap("parts", null);
-      if (parts != null) {
-        Set<String> wildCardNames = getWildCardNames(paths);
-        for (Object o : parts.keySet()) {
-          if (!wildCardNames.contains(o.toString()))
-            throw new RuntimeException("" + o + " is not a valid part name");
-          ValidatingJsonMap pathMeta = parts.getMap(o.toString(), NOT_NULL);
-          pathMeta.get("type", ENUM_OF, ImmutableSet.of("enum", "string", "int", "number", "boolean"));
-        }
-      }
-      verifyCommands(api.getSpec());
-      for (String path : paths) {
-        registry.insert(path, nameSubstitutes, api);
-        registerIntrospect(nameSubstitutes, registry, path, introspect);
-      }
-    }
-  }
-
-  public static void registerIntrospect(Map<String, String> nameSubstitutes, PathTrie<Api> registry, String path, Api introspect) {
-    List<String> l = PathTrie.getPathSegments(path);
-    registerIntrospect(l, registry, nameSubstitutes, introspect);
-    int lastIdx = l.size() - 1;
-    for (int i = lastIdx; i >= 0; i--) {
-      String itemAt = l.get(i);
-      if (PathTrie.templateName(itemAt) == null) break;
-      l.remove(i);
-      if (registry.lookup(l, new HashMap<>()) != null) break;
-      registerIntrospect(l, registry, nameSubstitutes, introspect);
-    }
-  }
-
-  static void registerIntrospect(List<String> l, PathTrie<Api> registry, Map<String, String> substitutes, Api introspect) {
-    ArrayList<String> copy = new ArrayList<>(l);
-    copy.add("_introspect");
-    registry.insert(copy, substitutes, introspect);
-  }
-
-  public static class IntrospectApi extends Api {
-    Api baseApi;
-    final boolean isCoreSpecific;
-
-    public IntrospectApi(Api base, boolean isCoreSpecific) {
-      super(EMPTY_SPEC);
-      this.baseApi = base;
-      this.isCoreSpecific = isCoreSpecific;
-    }
-
-    public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-
-      String cmd = req.getParams().get("command");
-      ValidatingJsonMap result = null;
-      if (cmd == null) {
-        result = isCoreSpecific ? ValidatingJsonMap.getDeepCopy(baseApi.getSpec(), 5, true) : baseApi.getSpec();
-      } else {
-        ValidatingJsonMap specCopy = ValidatingJsonMap.getDeepCopy(baseApi.getSpec(), 5, true);
-        ValidatingJsonMap commands = specCopy.getMap("commands", null);
-        if (commands != null) {
-          ValidatingJsonMap m = commands.getMap(cmd, null);
-          if (m == null) {
-            specCopy.put("commands", Collections.singletonMap(cmd, "Command not found!"));
-          } else {
-            specCopy.put("commands", Collections.singletonMap(cmd, m));
-          }
-
-        }
-        result = specCopy;
-      }
-      if (isCoreSpecific) {
-        List<String> pieces = req.getHttpSolrCall() == null ? null : ((V2HttpCall) req.getHttpSolrCall()).pieces;
-        if (pieces != null) {
-          String prefix = "/" + pieces.get(0) + "/" + pieces.get(1);
-          List<String> paths = result.getMap("url", NOT_NULL).getList("paths", NOT_NULL);
-          result.getMap("url", NOT_NULL).put("paths",
-              paths.stream()
-                  .map(s -> prefix + s)
-                  .collect(Collectors.toList()));
-        }
-      }
-      List l = (List) rsp.getValues().get("spec");
-      if (l == null) rsp.getValues().add("spec", l = new ArrayList());
-      l.add(result);
-    }
-  }
-
-  public static Map<String, JsonSchemaValidator> getParsedSchema(ValidatingJsonMap commands) {
-    Map<String, JsonSchemaValidator> validators = new HashMap<>();
-    for (Object o : commands.entrySet()) {
-      Map.Entry cmd = (Map.Entry) o;
-      try {
-        validators.put((String) cmd.getKey(), new JsonSchemaValidator((Map) cmd.getValue()));
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in api spec", e);
-      }
-    }
-    return validators;
-  }
-
-
-  private void verifyCommands(ValidatingJsonMap spec) {
-    ValidatingJsonMap commands = spec.getMap("commands", null);
-    if (commands == null) return;
-    getParsedSchema(commands);
-
-  }
-
-  private Set<String> getWildCardNames(List<String> paths) {
-    Set<String> wildCardNames = new HashSet<>();
-    for (String path : paths) {
-      List<String> p = PathTrie.getPathSegments(path);
-      for (String s : p) {
-        String wildCard = PathTrie.templateName(s);
-        if (wildCard != null) wildCardNames.add(wildCard);
-      }
-    }
-    return wildCardNames;
-  }
-
-
-  public Api lookup(String path, String httpMethod, Map<String, String> parts) {
-    if (httpMethod == null) {
-      for (PathTrie<Api> trie : apis.values()) {
-        Api api = trie.lookup(path, parts);
-        if (api != null) return api;
-      }
-      return null;
-    } else {
-      PathTrie<Api> registry = apis.get(httpMethod);
-      if (registry == null) return null;
-      return registry.lookup(path, parts);
-    }
-  }
-
-  public static class ReqHandlerToApi extends Api implements PermissionNameProvider {
-    SolrRequestHandler rh;
-
-    public ReqHandlerToApi(SolrRequestHandler rh, SpecProvider spec) {
-      super(spec);
-      this.rh = rh;
-    }
-
-    @Override
-    public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-      rh.handleRequest(req, rsp);
-    }
-
-    @Override
-    public Name getPermissionName(AuthorizationContext ctx) {
-      if (rh instanceof PermissionNameProvider) {
-        return ((PermissionNameProvider) rh).getPermissionName(ctx);
-      }
-      return null;
-    }
-  }
-
-  public static List<Api> wrapRequestHandlers(final SolrRequestHandler rh, String... specs) {
-    ImmutableList.Builder<Api> b = ImmutableList.builder();
-    for (String spec : specs) b.add(new ReqHandlerToApi(rh, Utils.getSpec(spec)));
-    return b.build();
-  }
-
-
-  public static final SpecProvider EMPTY_SPEC = () -> ValidatingJsonMap.EMPTY;
-  public static final String HANDLER_NAME = "handlerName";
-  public static final Set<String> KNOWN_TYPES = ImmutableSet.of("string", "boolean", "list", "int", "double", "object");
-
-  public PathTrie<Api> getRegistry(String method) {
-    return apis.get(method);
-  }
-
-  public void registerLazy(PluginBag.PluginHolder<SolrRequestHandler> holder, PluginInfo info) {
-    String specName = info.attributes.get("spec");
-    if (specName == null) specName = "emptySpec";
-    register(new LazyLoadedApi(Utils.getSpec(specName), holder), Collections.singletonMap(HANDLER_NAME, info.attributes.get(NAME)));
-  }
-
-  public static SpecProvider constructSpec(PluginInfo info) {
-    Object specObj = info == null ? null : info.attributes.get("spec");
-    if (specObj == null) specObj = "emptySpec";
-    if (specObj instanceof Map) {
-      Map map = (Map) specObj;
-      return () -> ValidatingJsonMap.getDeepCopy(map, 4, false);
-    } else {
-      return Utils.getSpec((String) specObj);
-    }
-  }
-
-  public static List<CommandOperation> getCommandOperations(ContentStream stream, Map<String, JsonSchemaValidator> validators, boolean validate) {
-    List<CommandOperation> parsedCommands = null;
-    try {
-      parsedCommands = CommandOperation.readCommands(Collections.singleton(stream), new NamedList());
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to parse commands",e);
-    }
-
-    if (validators == null || !validate) {    // no validation possible because we do not have a spec
-      return parsedCommands;
-    }
-
-    List<CommandOperation> commandsCopy = CommandOperation.clone(parsedCommands);
-
-    for (CommandOperation cmd : commandsCopy) {
-      JsonSchemaValidator validator = validators.get(cmd.name);
-      if (validator == null) {
-        cmd.addError(formatString("Unknown operation ''{0}'' available ops are ''{1}''", cmd.name,
-            validators.keySet()));
-        continue;
-      } else {
-        List<String> errs = validator.validateJson(cmd.getCommandData());
-        if (errs != null){
-          // otherwise swallowed in solrj tests, and just get "Error in command payload" in test log
-          // which is quite unhelpful.
-          log.error("Command errors for {}:{}", cmd.name, errs );
-          for (String err : errs) cmd.addError(err);
-        }
-      }
-
-    }
-    List<Map> errs = CommandOperation.captureErrors(commandsCopy);
-    if (!errs.isEmpty()) {
-      throw new ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, "Error in command payload", errs);
-    }
-    return commandsCopy;
-  }
-
-  public static class ExceptionWithErrObject extends SolrException {
-    private List<Map> errs;
-
-    public ExceptionWithErrObject(ErrorCode code, String msg, List<Map> errs) {
-      super(code, msg);
-      this.errs = errs;
-    }
-
-    public List<Map> getErrs() {
-      return errs;
-    }
-
-    public String toString() {
-      return super.toString() + ", errors: " + getErrs() + ", ";
-    }
-  }
-
-  public static class LazyLoadedApi extends Api {
-
-    private final PluginBag.PluginHolder<SolrRequestHandler> holder;
-    private Api delegate;
-
-    protected LazyLoadedApi(SpecProvider specProvider, PluginBag.PluginHolder<SolrRequestHandler> lazyPluginHolder) {
-      super(specProvider);
-      this.holder = lazyPluginHolder;
-    }
-
-    @Override
-    public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-      if (!holder.isLoaded()) {
-        delegate = new ReqHandlerToApi(holder.get(), ApiBag.EMPTY_SPEC);
-      }
-      delegate.call(req, rsp);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/api/ApiSupport.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/api/ApiSupport.java b/solr/core/src/java/org/apache/solr/api/ApiSupport.java
deleted file mode 100644
index ca1e866..0000000
--- a/solr/core/src/java/org/apache/solr/api/ApiSupport.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.api;
-
-import java.util.Collection;
-
-/**The interface that is implemented by a request handler to support the V2 end point
- *
- */
-public interface ApiSupport {
-
-  /**It is possible to support multiple v2 apis by a single requesthandler
-   *
-   * @return the list of v2 api implementations
-   */
-  Collection<Api> getApis();
-
-  /**Whether this should be made available at the regular legacy path
-   */
-  default Boolean registerV1() {
-    return Boolean.TRUE;
-  }
-
-  /**Whether this request handler must be made available at the /v2/ path
-   */
-  default Boolean registerV2() {
-    return Boolean.FALSE;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
deleted file mode 100644
index d2b891e..0000000
--- a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.api;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.Supplier;
-
-import com.google.common.collect.ImmutableSet;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.util.JsonSchemaValidator;
-import org.apache.solr.common.util.PathTrie;
-import org.apache.solr.common.util.ValidatingJsonMap;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.PluginBag;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.RequestHandlerUtils;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.QueryResponseWriter;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.servlet.HttpSolrCall;
-import org.apache.solr.servlet.SolrDispatchFilter;
-import org.apache.solr.servlet.SolrRequestParsers;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.params.CommonParams.JSON;
-import static org.apache.solr.common.params.CommonParams.WT;
-import static org.apache.solr.common.util.PathTrie.getPathSegments;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.ADMIN;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.PROCESS;
-import static org.apache.solr.servlet.SolrDispatchFilter.Action.REMOTEQUERY;
-
-// class that handle the '/v2' path
-public class V2HttpCall extends HttpSolrCall {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private Api api;
-  List<String> pieces;
-  private String prefix;
-  HashMap<String, String> parts = new HashMap<>();
-  static final Set<String> knownPrefixes = ImmutableSet.of("cluster", "node", "collections", "cores", "c");
-
-  public V2HttpCall(SolrDispatchFilter solrDispatchFilter, CoreContainer cc,
-                    HttpServletRequest request, HttpServletResponse response, boolean retry) {
-    super(solrDispatchFilter, cc, request, response, retry);
-  }
-
-  protected void init() throws Exception {
-    String path = this.path;
-    final String fullPath = path = path.substring(7);//strip off '/____v2'
-    try {
-      pieces = getPathSegments(path);
-      if (pieces.size() == 0 || (pieces.size() == 1 && path.endsWith(CommonParams.INTROSPECT))) {
-        api = new Api(null) {
-          @Override
-          public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-            rsp.add("documentation", "https://lucene.apache.org/solr/guide/v2-api.html");
-            rsp.add("description", "V2 API root path");
-          }
-        };
-        initAdminRequest(path);
-        return;
-      } else {
-        prefix = pieces.get(0);
-      }
-
-      boolean isCompositeApi = false;
-      if (knownPrefixes.contains(prefix)) {
-        api = getApiInfo(cores.getRequestHandlers(), path, req.getMethod(), fullPath, parts);
-        if (api != null) {
-          isCompositeApi = api instanceof CompositeApi;
-          if (!isCompositeApi) {
-            initAdminRequest(path);
-            return;
-          }
-        }
-      }
-
-      if ("c".equals(prefix) || "collections".equals(prefix)) {
-        origCorename = pieces.get(1);
-
-        DocCollection collection = resolveDocCollection(queryParams.get(COLLECTION_PROP, origCorename));
-
-        if (collection == null) {
-          if ( ! path.endsWith(CommonParams.INTROSPECT)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such collection or alias");
-          }
-        } else {
-          boolean isPreferLeader = (path.endsWith("/update") || path.contains("/update/"));
-          core = getCoreByCollection(collection.getName(), isPreferLeader);
-          if (core == null) {
-            //this collection exists , but this node does not have a replica for that collection
-            extractRemotePath(collection.getName(), origCorename);
-            if (action == REMOTEQUERY) {
-              this.path = path = path.substring(prefix.length() + origCorename.length() + 2);
-              return;
-            }
-          }
-        }
-      } else if ("cores".equals(prefix)) {
-        origCorename = pieces.get(1);
-        core = cores.getCore(origCorename);
-      }
-      if (core == null) {
-        log.error(">> path: '" + path + "'");
-        if (path.endsWith(CommonParams.INTROSPECT)) {
-          initAdminRequest(path);
-          return;
-        } else {
-          throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "no core retrieved for " + origCorename);
-        }
-      }
-
-      this.path = path = path.substring(prefix.length() + pieces.get(1).length() + 2);
-      Api apiInfo = getApiInfo(core.getRequestHandlers(), path, req.getMethod(), fullPath, parts);
-      if (isCompositeApi && apiInfo instanceof CompositeApi) {
-        ((CompositeApi) this.api).add(apiInfo);
-      } else {
-        api = apiInfo == null ? api : apiInfo;
-      }
-      MDCLoggingContext.setCore(core);
-      parseRequest();
-
-      addCollectionParamIfNeeded(getCollectionsList());
-
-      action = PROCESS;
-      // we are done with a valid handler
-    } catch (RuntimeException rte) {
-      log.error("Error in init()", rte);
-      throw rte;
-    } finally {
-      if (action == null && api == null) action = PROCESS;
-      if (solrReq != null) solrReq.getContext().put(CommonParams.PATH, path);
-    }
-  }
-
-  private void initAdminRequest(String path) throws Exception {
-    solrReq = SolrRequestParsers.DEFAULT.parse(null, path, req);
-    solrReq.getContext().put(CoreContainer.class.getName(), cores);
-    requestType = AuthorizationContext.RequestType.ADMIN;
-    action = ADMIN;
-  }
-
-  protected void parseRequest() throws Exception {
-    config = core.getSolrConfig();
-    // get or create/cache the parser for the core
-    SolrRequestParsers parser = config.getRequestParsers();
-
-    // With a valid handler and a valid core...
-
-    if (solrReq == null) solrReq = parser.parse(core, path, req);
-  }
-
-  /**
-   * Lookup the collection from the collection string (maybe comma delimited).
-   * Also sets {@link #collectionsList} by side-effect.
-   * if {@code secondTry} is false then we'll potentially recursively try this all one more time while ensuring
-   * the alias and collection info is sync'ed from ZK.
-   */
-  protected DocCollection resolveDocCollection(String collectionStr) {
-    if (!cores.isZooKeeperAware()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Solr not running in cloud mode ");
-    }
-    ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();
-
-    Supplier<DocCollection> logic = () -> {
-      this.collectionsList = resolveCollectionListOrAlias(collectionStr); // side-effect
-      String collectionName = collectionsList.get(0); // first
-      //TODO an option to choose another collection in the list if can't find a local replica of the first?
-
-      return zkStateReader.getClusterState().getCollectionOrNull(collectionName);
-    };
-
-    DocCollection docCollection = logic.get();
-    if (docCollection != null) {
-      return docCollection;
-    }
-    // ensure our view is up to date before trying again
-    try {
-      zkStateReader.aliasesManager.update();
-      zkStateReader.forceUpdateCollection(collectionsList.get(0));
-    } catch (Exception e) {
-      log.error("Error trying to update state while resolving collection.", e);
-      //don't propagate exception on purpose
-    }
-    return logic.get();
-  }
-
-  public static Api getApiInfo(PluginBag<SolrRequestHandler> requestHandlers,
-                               String path, String method,
-                               String fullPath,
-                               Map<String, String> parts) {
-    fullPath = fullPath == null ? path : fullPath;
-    Api api = requestHandlers.v2lookup(path, method, parts);
-    if (api == null && path.endsWith(CommonParams.INTROSPECT)) {
-      // the particular http method does not have any ,
-      // just try if any other method has this path
-      api = requestHandlers.v2lookup(path, null, parts);
-    }
-
-    if (api == null) {
-      return getSubPathApi(requestHandlers, path, fullPath, new CompositeApi(null));
-    }
-
-    if (api instanceof ApiBag.IntrospectApi) {
-      final Map<String, Api> apis = new LinkedHashMap<>();
-      for (String m : SolrRequest.SUPPORTED_METHODS) {
-        Api x = requestHandlers.v2lookup(path, m, parts);
-        if (x != null) apis.put(m, x);
-      }
-      api = new CompositeApi(new Api(ApiBag.EMPTY_SPEC) {
-        @Override
-        public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-          String method = req.getParams().get("method");
-          Set<Api> added = new HashSet<>();
-          for (Map.Entry<String, Api> e : apis.entrySet()) {
-            if (method == null || e.getKey().equals(method)) {
-              if (!added.contains(e.getValue())) {
-                e.getValue().call(req, rsp);
-                added.add(e.getValue());
-              }
-            }
-          }
-          RequestHandlerUtils.addExperimentalFormatWarning(rsp);
-        }
-      });
-      getSubPathApi(requestHandlers,path, fullPath, (CompositeApi) api);
-    }
-
-
-    return api;
-  }
-
-  private static CompositeApi getSubPathApi(PluginBag<SolrRequestHandler> requestHandlers, String path, String fullPath, CompositeApi compositeApi) {
-
-    String newPath = path.endsWith(CommonParams.INTROSPECT) ? path.substring(0, path.length() - CommonParams.INTROSPECT.length()) : path;
-    Map<String, Set<String>> subpaths = new LinkedHashMap<>();
-
-    getSubPaths(newPath, requestHandlers.getApiBag(), subpaths);
-    final Map<String, Set<String>> subPaths = subpaths;
-    if (subPaths.isEmpty()) return null;
-    return compositeApi.add(new Api(() -> ValidatingJsonMap.EMPTY) {
-      @Override
-      public void call(SolrQueryRequest req1, SolrQueryResponse rsp) {
-        String prefix = null;
-        prefix = fullPath.endsWith(CommonParams.INTROSPECT) ?
-            fullPath.substring(0, fullPath.length() - CommonParams.INTROSPECT.length()) :
-            fullPath;
-        LinkedHashMap<String, Set<String>> result = new LinkedHashMap<>(subPaths.size());
-        for (Map.Entry<String, Set<String>> e : subPaths.entrySet()) {
-          if (e.getKey().endsWith(CommonParams.INTROSPECT)) continue;
-          result.put(prefix + e.getKey(), e.getValue());
-        }
-
-        Map m = (Map) rsp.getValues().get("availableSubPaths");
-        if(m != null){
-          m.putAll(result);
-        } else {
-          rsp.add("availableSubPaths", result);
-        }
-      }
-    });
-  }
-
-  private static void getSubPaths(String path, ApiBag bag, Map<String, Set<String>> pathsVsMethod) {
-    for (SolrRequest.METHOD m : SolrRequest.METHOD.values()) {
-      PathTrie<Api> registry = bag.getRegistry(m.toString());
-      if (registry != null) {
-        HashSet<String> subPaths = new HashSet<>();
-        registry.lookup(path, new HashMap<>(), subPaths);
-        for (String subPath : subPaths) {
-          Set<String> supportedMethods = pathsVsMethod.get(subPath);
-          if (supportedMethods == null) pathsVsMethod.put(subPath, supportedMethods = new HashSet<>());
-          supportedMethods.add(m.toString());
-        }
-      }
-    }
-  }
-
-  public static class CompositeApi extends Api {
-    private LinkedList<Api> apis = new LinkedList<>();
-
-    public CompositeApi(Api api) {
-      super(ApiBag.EMPTY_SPEC);
-      if (api != null) apis.add(api);
-    }
-
-    @Override
-    public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-      for (Api api : apis) {
-        api.call(req, rsp);
-      }
-
-    }
-
-    public CompositeApi add(Api api) {
-      apis.add(api);
-      return this;
-    }
-  }
-
-  @Override
-  protected void handleAdmin(SolrQueryResponse solrResp) {
-    try {
-      api.call(this.solrReq, solrResp);
-    } catch (Exception e) {
-      solrResp.setException(e);
-    }
-  }
-
-  @Override
-  protected void execute(SolrQueryResponse rsp) {
-    SolrCore.preDecorateResponse(solrReq, rsp);
-    if (api == null) {
-      rsp.setException(new SolrException(SolrException.ErrorCode.NOT_FOUND,
-          "Cannot find correspond api for the path : " + solrReq.getContext().get(CommonParams.PATH)));
-    } else {
-      try {
-        api.call(solrReq, rsp);
-      } catch (Exception e) {
-        rsp.setException(e);
-      }
-    }
-
-    SolrCore.postDecorateResponse(handler, solrReq, rsp);
-  }
-
-  @Override
-  protected Object _getHandler() {
-    return api;
-  }
-
-  public Map<String,String> getUrlParts(){
-    return parts;
-  }
-
-  @Override
-  protected QueryResponseWriter getResponseWriter() {
-    String wt = solrReq.getParams().get(WT, JSON);
-    if (core != null) return core.getResponseWriters().get(wt);
-    return SolrCore.DEFAULT_RESPONSE_WRITERS.get(wt);
-  }
-
-  @Override
-  protected ValidatingJsonMap getSpec() {
-    return api == null ? null : api.getSpec();
-  }
-
-  @Override
-  protected Map<String, JsonSchemaValidator> getValidators() {
-    return api == null ? null : api.getCommandSchema();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/api/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/api/package-info.java b/solr/core/src/java/org/apache/solr/api/package-info.java
deleted file mode 100644
index c3574c7..0000000
--- a/solr/core/src/java/org/apache/solr/api/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Commonly used classes for Solr V2 API.
- */
-package org.apache.solr.api;
-


[52/52] [abbrv] lucene-solr:jira/gradle: Adding more dependency for solrj to make the test pass

Posted by da...@apache.org.
Adding more dependency for solrj to make the test pass


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c9cb4fe9
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c9cb4fe9
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c9cb4fe9

Branch: refs/heads/jira/gradle
Commit: c9cb4fe961f7472cd9081839fd3510f6d166a0d2
Parents: 0ae21ad
Author: Cao Manh Dat <da...@apache.org>
Authored: Tue Oct 23 07:03:43 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Tue Oct 23 07:03:43 2018 +0700

----------------------------------------------------------------------
 build.gradle                             | 12 +++++----
 settings.gradle                          |  3 ++-
 solr/core/build.gradle                   | 10 +++----
 solr/example/example-DIH/build.gradle    |  6 +++++
 solr/solrj/build.gradle                  |  1 +
 solr/solrj/src/test/resources/log4j2.xml | 39 +++++++++++++++++++++++++++
 6 files changed, 58 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c9cb4fe9/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 7c5c062..df21ce8 100644
--- a/build.gradle
+++ b/build.gradle
@@ -22,13 +22,15 @@ allprojects {
         }
         jcenter() 
     }
-    configurations {
-	    all {
-	        // exclude group: 'com.google.guava', module: 'guava'
-	    }
-	}
 }
 
+subprojects {
+	plugins.withType(JavaPlugin) {
+		test {
+			systemProperty 'java.security.egd', 'file:/dev/./urandom'
+		}
+	}
+}
 
 // These versions are defined here because they represent
 // a dependency version which should match across multiple

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c9cb4fe9/settings.gradle
----------------------------------------------------------------------
diff --git a/settings.gradle b/settings.gradle
index 5016b7a..56554a7 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -44,4 +44,5 @@ include 'lucene:test-framework'
 include 'solr:core'
 include 'solr:server'
 include 'solr:solrj'
-include 'solr:test-framework'
\ No newline at end of file
+include 'solr:test-framework'
+include 'solr:example:example-DIH'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c9cb4fe9/solr/core/build.gradle
----------------------------------------------------------------------
diff --git a/solr/core/build.gradle b/solr/core/build.gradle
index 8be91c3..e8cc58e 100644
--- a/solr/core/build.gradle
+++ b/solr/core/build.gradle
@@ -1,16 +1,12 @@
 apply plugin: 'java'
 
-
-task printClasspath {
-    doLast {
-        configurations.testRuntime.each { println it }
-    }
-}
-
 dependencies {
 	compile project(':solr:server')
 	compile project(':solr:solrj')
 	compile project(':lucene:analysis:common')
+	compile project(':lucene:analysis:phonetic')
+	compile project(':lucene:analysis:kuromoji')
+	compile project(':lucene:analysis:nori')
 	compile project(':lucene:suggest')
 	compile project(':lucene:queries')
 	compile project(':lucene:highlighter')

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c9cb4fe9/solr/example/example-DIH/build.gradle
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/build.gradle b/solr/example/example-DIH/build.gradle
new file mode 100644
index 0000000..962acf8
--- /dev/null
+++ b/solr/example/example-DIH/build.gradle
@@ -0,0 +1,6 @@
+apply plugin: 'java'
+
+dependencies {
+	compile library.hsqldb
+	compile library.derby
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c9cb4fe9/solr/solrj/build.gradle
----------------------------------------------------------------------
diff --git a/solr/solrj/build.gradle b/solr/solrj/build.gradle
index 86c1f59..5932045 100644
--- a/solr/solrj/build.gradle
+++ b/solr/solrj/build.gradle
@@ -18,4 +18,5 @@ dependencies {
 	testCompile library.byte_buddy
 	testCompile library.objenesis
 	testCompile project(':solr:test-framework')
+	testCompile project(':solr:example:example-DIH')
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c9cb4fe9/solr/solrj/src/test/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/resources/log4j2.xml b/solr/solrj/src/test/resources/log4j2.xml
new file mode 100644
index 0000000..7d0ebf7
--- /dev/null
+++ b/solr/solrj/src/test/resources/log4j2.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<Configuration>
+  <Appenders>
+    <Console name="STDERR" target="SYSTEM_ERR">
+      <PatternLayout>
+        <Pattern>
+          %-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n
+        </Pattern>
+      </PatternLayout>
+    </Console>
+  </Appenders>
+  <Loggers>
+    <Logger name="org.apache.zookeeper" level="WARN"/>
+    <Logger name="org.apache.hadoop" level="WARN"/>
+    <Logger name="org.apache.directory" level="WARN"/>
+    <Logger name="org.apache.solr.hadoop" level="INFO"/>
+
+    <Root level="INFO">
+      <AppenderRef ref="STDERR"/>
+    </Root>
+  </Loggers>
+</Configuration>


[18/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
deleted file mode 100644
index 654b166..0000000
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ /dev/null
@@ -1,1826 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.zip.Adler32;
-import java.util.zip.Checksum;
-import java.util.zip.DeflaterOutputStream;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.io.output.CloseShieldOutputStream;
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.SegmentCommitInfo;
-import org.apache.lucene.index.SegmentInfos;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.RateLimiter;
-import org.apache.lucene.util.Version;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.FastOutputStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.core.CloseHook;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.IndexDeletionPolicyWrapper;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrDeletionPolicy;
-import org.apache.solr.core.SolrEventListener;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.backup.repository.LocalFileSystemRepository;
-import org.apache.solr.handler.IndexFetcher.IndexFetchResult;
-import org.apache.solr.metrics.MetricsMap;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.SolrIndexWriter;
-import org.apache.solr.update.VersionInfo;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.NumberUtils;
-import org.apache.solr.util.PropertiesInputStream;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.core.Config.assertWarnOrFail;
-
-/**
- * <p> A Handler which provides a REST API for replication and serves replication requests from Slaves. </p>
- * <p>When running on the master, it provides the following commands <ol> <li>Get the current replicable index version
- * (command=indexversion)</li> <li>Get the list of files for a given index version
- * (command=filelist&amp;indexversion=&lt;VERSION&gt;)</li> <li>Get full or a part (chunk) of a given index or a config
- * file (command=filecontent&amp;file=&lt;FILE_NAME&gt;) You can optionally specify an offset and length to get that
- * chunk of the file. You can request a configuration file by using "cf" parameter instead of the "file" parameter.</li>
- * <li>Get status/statistics (command=details)</li> </ol> <p>When running on the slave, it provides the following
- * commands <ol> <li>Perform an index fetch now (command=snappull)</li> <li>Get status/statistics (command=details)</li>
- * <li>Abort an index fetch (command=abort)</li> <li>Enable/Disable polling the master for new versions (command=enablepoll
- * or command=disablepoll)</li> </ol>
- *
- *
- * @since solr 1.4
- */
-public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAware {
-
-  public static final String PATH = "/replication";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  SolrCore core;
-  
-  private volatile boolean closed = false;
-
-  private static final class CommitVersionInfo {
-    public final long version;
-    public final long generation;
-    private CommitVersionInfo(long g, long v) {
-      generation = g;
-      version = v;
-    }
-    /**
-     * builds a CommitVersionInfo data for the specified IndexCommit.
-     * Will never be null, ut version and generation may be zero if
-     * there are problems extracting them from the commit data
-     */
-    public static CommitVersionInfo build(IndexCommit commit) {
-      long generation = commit.getGeneration();
-      long version = 0;
-      try {
-        final Map<String,String> commitData = commit.getUserData();
-        String commitTime = commitData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
-        if (commitTime != null) {
-          try {
-            version = Long.parseLong(commitTime);
-          } catch (NumberFormatException e) {
-            log.warn("Version in commitData was not formatted correctly: " + commitTime, e);
-          }
-        }
-      } catch (IOException e) {
-        log.warn("Unable to get version from commitData, commit: " + commit, e);
-      }
-      return new CommitVersionInfo(generation, version);
-    }
-
-    public String toString() {
-      return "generation=" + generation + ",version=" + version;
-    }
-  }
-
-  private IndexFetcher pollingIndexFetcher;
-
-  private ReentrantLock indexFetchLock = new ReentrantLock();
-
-  private ExecutorService restoreExecutor = ExecutorUtil.newMDCAwareSingleThreadExecutor(
-      new DefaultSolrThreadFactory("restoreExecutor"));
-
-  private volatile Future<Boolean> restoreFuture;
-
-  private volatile String currentRestoreName;
-
-  private String includeConfFiles;
-
-  private NamedList<String> confFileNameAlias = new NamedList<>();
-
-  private boolean isMaster = false;
-
-  private boolean isSlave = false;
-
-  private boolean replicateOnOptimize = false;
-
-  private boolean replicateOnCommit = false;
-
-  private boolean replicateOnStart = false;
-
-  private ScheduledExecutorService executorService;
-
-  private volatile long executorStartTime;
-
-  private int numberBackupsToKeep = 0; //zero: do not delete old backups
-
-  private int numTimesReplicated = 0;
-
-  private final Map<String, FileInfo> confFileInfoCache = new HashMap<>();
-
-  private Long reserveCommitDuration = readIntervalMs("00:00:10");
-
-  volatile IndexCommit indexCommitPoint;
-
-  volatile NamedList<?> snapShootDetails;
-
-  private AtomicBoolean replicationEnabled = new AtomicBoolean(true);
-
-  private Long pollIntervalNs;
-  private String pollIntervalStr;
-
-  private PollListener pollListener;
-  public interface PollListener {
-    void onComplete(SolrCore solrCore, IndexFetchResult fetchResult) throws IOException;
-  }
-
-  /**
-   * Disable the timer task for polling
-   */
-  private AtomicBoolean pollDisabled = new AtomicBoolean(false);
-
-  String getPollInterval() {
-    return pollIntervalStr;
-  }
-
-  public void setPollListener(PollListener pollListener) {
-    this.pollListener = pollListener;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    rsp.setHttpCaching(false);
-    final SolrParams solrParams = req.getParams();
-    String command = solrParams.get(COMMAND);
-    if (command == null) {
-      rsp.add(STATUS, OK_STATUS);
-      rsp.add("message", "No command");
-      return;
-    }
-    // This command does not give the current index version of the master
-    // It gives the current 'replicateable' index version
-    if (command.equals(CMD_INDEX_VERSION)) {
-      IndexCommit commitPoint = indexCommitPoint;  // make a copy so it won't change
-
-      if (commitPoint == null) {
-        // if this handler is 'lazy', we may not have tracked the last commit
-        // because our commit listener is registered on inform
-        commitPoint = core.getDeletionPolicy().getLatestCommit();
-      }
-
-      if (commitPoint != null && replicationEnabled.get()) {
-        //
-        // There is a race condition here.  The commit point may be changed / deleted by the time
-        // we get around to reserving it.  This is a very small window though, and should not result
-        // in a catastrophic failure, but will result in the client getting an empty file list for
-        // the CMD_GET_FILE_LIST command.
-        //
-        core.getDeletionPolicy().setReserveDuration(commitPoint.getGeneration(), reserveCommitDuration);
-        rsp.add(CMD_INDEX_VERSION, IndexDeletionPolicyWrapper.getCommitTimestamp(commitPoint));
-        rsp.add(GENERATION, commitPoint.getGeneration());
-      } else {
-        // This happens when replication is not configured to happen after startup and no commit/optimize
-        // has happened yet.
-        rsp.add(CMD_INDEX_VERSION, 0L);
-        rsp.add(GENERATION, 0L);
-      }
-    } else if (command.equals(CMD_GET_FILE)) {
-      getFileStream(solrParams, rsp);
-    } else if (command.equals(CMD_GET_FILE_LIST)) {
-      getFileList(solrParams, rsp);
-    } else if (command.equalsIgnoreCase(CMD_BACKUP)) {
-      doSnapShoot(new ModifiableSolrParams(solrParams), rsp, req);
-      rsp.add(STATUS, OK_STATUS);
-    } else if (command.equalsIgnoreCase(CMD_RESTORE)) {
-      restore(new ModifiableSolrParams(solrParams), rsp, req);
-      rsp.add(STATUS, OK_STATUS);
-    } else if (command.equalsIgnoreCase(CMD_RESTORE_STATUS)) {
-      rsp.add(CMD_RESTORE_STATUS, getRestoreStatus());
-    } else if (command.equalsIgnoreCase(CMD_DELETE_BACKUP)) {
-      deleteSnapshot(new ModifiableSolrParams(solrParams));
-      rsp.add(STATUS, OK_STATUS);
-    } else if (command.equalsIgnoreCase(CMD_FETCH_INDEX)) {
-      String masterUrl = solrParams.get(MASTER_URL);
-      if (!isSlave && masterUrl == null) {
-        rsp.add(STATUS,ERR_STATUS);
-        rsp.add("message","No slave configured or no 'masterUrl' Specified");
-        return;
-      }
-      final SolrParams paramsCopy = new ModifiableSolrParams(solrParams);
-      Thread fetchThread = new Thread(() -> doFetch(paramsCopy, false), "explicit-fetchindex-cmd") ;
-      fetchThread.setDaemon(false);
-      fetchThread.start();
-      if (solrParams.getBool(WAIT, false)) {
-        fetchThread.join();
-      }
-      rsp.add(STATUS, OK_STATUS);
-    } else if (command.equalsIgnoreCase(CMD_DISABLE_POLL)) {
-      if (pollingIndexFetcher != null){
-        disablePoll();
-        rsp.add(STATUS, OK_STATUS);
-      } else {
-        rsp.add(STATUS, ERR_STATUS);
-        rsp.add("message","No slave configured");
-      }
-    } else if (command.equalsIgnoreCase(CMD_ENABLE_POLL)) {
-      if (pollingIndexFetcher != null){
-        enablePoll();
-        rsp.add(STATUS, OK_STATUS);
-      }else {
-        rsp.add(STATUS,ERR_STATUS);
-        rsp.add("message","No slave configured");
-      }
-    } else if (command.equalsIgnoreCase(CMD_ABORT_FETCH)) {
-      if (abortFetch()){
-        rsp.add(STATUS, OK_STATUS);
-      } else {
-        rsp.add(STATUS,ERR_STATUS);
-        rsp.add("message","No slave configured");
-      }
-    } else if (command.equals(CMD_SHOW_COMMITS)) {
-      rsp.add(CMD_SHOW_COMMITS, getCommits());
-    } else if (command.equals(CMD_DETAILS)) {
-      rsp.add(CMD_DETAILS, getReplicationDetails(solrParams.getBool("slave", true)));
-    } else if (CMD_ENABLE_REPL.equalsIgnoreCase(command)) {
-      replicationEnabled.set(true);
-      rsp.add(STATUS, OK_STATUS);
-    } else if (CMD_DISABLE_REPL.equalsIgnoreCase(command)) {
-      replicationEnabled.set(false);
-      rsp.add(STATUS, OK_STATUS);
-    }
-  }
-
-  public boolean abortFetch() {
-    IndexFetcher fetcher = currentIndexFetcher;
-    if (fetcher != null){
-      fetcher.abortFetch();
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  private void deleteSnapshot(ModifiableSolrParams params) {
-    String name = params.get(NAME);
-    if(name == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Missing mandatory param: name");
-    }
-
-    SnapShooter snapShooter = new SnapShooter(core, params.get(CoreAdminParams.BACKUP_LOCATION), params.get(NAME));
-    snapShooter.validateDeleteSnapshot();
-    snapShooter.deleteSnapAsync(this);
-  }
-
-  private List<NamedList<Object>> getCommits() {
-    Map<Long, IndexCommit> commits = core.getDeletionPolicy().getCommits();
-    List<NamedList<Object>> l = new ArrayList<>();
-
-    for (IndexCommit c : commits.values()) {
-      try {
-        NamedList<Object> nl = new NamedList<>();
-        nl.add("indexVersion", IndexDeletionPolicyWrapper.getCommitTimestamp(c));
-        nl.add(GENERATION, c.getGeneration());
-        List<String> commitList = new ArrayList<>(c.getFileNames().size());
-        commitList.addAll(c.getFileNames());
-        Collections.sort(commitList);
-        nl.add(CMD_GET_FILE_LIST, commitList);
-        l.add(nl);
-      } catch (IOException e) {
-        log.warn("Exception while reading files for commit " + c, e);
-      }
-    }
-    return l;
-  }
-
-  static Long getCheckSum(Checksum checksum, File f) {
-    FileInputStream fis = null;
-    checksum.reset();
-    byte[] buffer = new byte[1024 * 1024];
-    int bytesRead;
-    try {
-      fis = new FileInputStream(f);
-      while ((bytesRead = fis.read(buffer)) >= 0)
-        checksum.update(buffer, 0, bytesRead);
-      return checksum.getValue();
-    } catch (Exception e) {
-      log.warn("Exception in finding checksum of " + f, e);
-    } finally {
-      IOUtils.closeQuietly(fis);
-    }
-    return null;
-  }
-
-  private volatile IndexFetcher currentIndexFetcher;
-
-  public IndexFetchResult doFetch(SolrParams solrParams, boolean forceReplication) {
-    String masterUrl = solrParams == null ? null : solrParams.get(MASTER_URL);
-    if (!indexFetchLock.tryLock())
-      return IndexFetchResult.LOCK_OBTAIN_FAILED;
-    if (core.getCoreContainer().isShutDown()) {
-      log.warn("I was asked to replicate but CoreContainer is shutting down");
-      return IndexFetchResult.CONTAINER_IS_SHUTTING_DOWN; 
-    }
-    try {
-      if (masterUrl != null) {
-        if (currentIndexFetcher != null && currentIndexFetcher != pollingIndexFetcher) {
-          currentIndexFetcher.destroy();
-        }
-        currentIndexFetcher = new IndexFetcher(solrParams.toNamedList(), this, core);
-      } else {
-        currentIndexFetcher = pollingIndexFetcher;
-      }
-      return currentIndexFetcher.fetchLatestIndex(forceReplication);
-    } catch (Exception e) {
-      SolrException.log(log, "Index fetch failed ", e);
-      if (currentIndexFetcher != pollingIndexFetcher) {
-        currentIndexFetcher.destroy();
-      }
-      return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
-    } finally {
-      if (pollingIndexFetcher != null) {
-       if( currentIndexFetcher != pollingIndexFetcher) {
-         currentIndexFetcher.destroy();
-       }
-        currentIndexFetcher = pollingIndexFetcher;
-      }
-      indexFetchLock.unlock();
-    }
-  }
-
-  boolean isReplicating() {
-    return indexFetchLock.isLocked();
-  }
-
-  private void restore(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest req) throws IOException {
-    if (restoreFuture != null && !restoreFuture.isDone()) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Restore in progress. Cannot run multiple restore operations" +
-          "for the same core");
-    }
-    String name = params.get(NAME);
-    String location = params.get(CoreAdminParams.BACKUP_LOCATION);
-
-    String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY);
-    CoreContainer cc = core.getCoreContainer();
-    BackupRepository repo = null;
-    if (repoName != null) {
-      repo = cc.newBackupRepository(Optional.of(repoName));
-      location = repo.getBackupLocation(location);
-      if (location == null) {
-        throw new IllegalArgumentException("location is required");
-      }
-    } else {
-      repo = new LocalFileSystemRepository();
-    }
-
-    //If location is not provided then assume that the restore index is present inside the data directory.
-    if (location == null) {
-      location = core.getDataDir();
-    }
-
-    URI locationUri = repo.createURI(location);
-
-    //If name is not provided then look for the last unnamed( the ones with the snapshot.timestamp format)
-    //snapshot folder since we allow snapshots to be taken without providing a name. Pick the latest timestamp.
-    if (name == null) {
-      String[] filePaths = repo.listAll(locationUri);
-      List<OldBackupDirectory> dirs = new ArrayList<>();
-      for (String f : filePaths) {
-        OldBackupDirectory obd = new OldBackupDirectory(locationUri, f);
-        if (obd.getTimestamp().isPresent()) {
-          dirs.add(obd);
-        }
-      }
-      Collections.sort(dirs);
-      if (dirs.size() == 0) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "No backup name specified and none found in " + core.getDataDir());
-      }
-      name = dirs.get(0).getDirName();
-    } else {
-      //"snapshot." is prefixed by snapshooter
-      name = "snapshot." + name;
-    }
-
-    RestoreCore restoreCore = new RestoreCore(repo, core, locationUri, name);
-    try {
-      MDC.put("RestoreCore.core", core.getName());
-      MDC.put("RestoreCore.backupLocation", location);
-      MDC.put("RestoreCore.backupName", name);
-      restoreFuture = restoreExecutor.submit(restoreCore);
-      currentRestoreName = name;
-    } finally {
-      MDC.remove("RestoreCore.core");
-      MDC.remove("RestoreCore.backupLocation");
-      MDC.remove("RestoreCore.backupName");
-    }
-  }
-
-  private NamedList<Object> getRestoreStatus() {
-    NamedList<Object> status = new SimpleOrderedMap<>();
-
-    if (restoreFuture == null) {
-      status.add(STATUS, "No restore actions in progress");
-      return status;
-    }
-
-    status.add("snapshotName", currentRestoreName);
-    if (restoreFuture.isDone()) {
-      try {
-        boolean success = restoreFuture.get();
-        if (success) {
-          status.add(STATUS, SUCCESS);
-        } else {
-          status.add(STATUS, FAILED);
-        }
-      } catch (Exception e) {
-        status.add(STATUS, FAILED);
-        status.add(EXCEPTION, e.getMessage());
-      }
-    } else {
-      status.add(STATUS, "In Progress");
-    }
-    return status;
-  }
-
-  private void doSnapShoot(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest req) {
-    try {
-      int numberToKeep = params.getInt(NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM, 0);
-      if (numberToKeep > 0 && numberBackupsToKeep > 0) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot use " + NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM +
-            " if " + NUMBER_BACKUPS_TO_KEEP_INIT_PARAM + " was specified in the configuration.");
-      }
-      numberToKeep = Math.max(numberToKeep, numberBackupsToKeep);
-      if (numberToKeep < 1) {
-        numberToKeep = Integer.MAX_VALUE;
-      }
-
-      String location = params.get(CoreAdminParams.BACKUP_LOCATION);
-      String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY);
-      CoreContainer cc = core.getCoreContainer();
-      BackupRepository repo = null;
-      if (repoName != null) {
-        repo = cc.newBackupRepository(Optional.of(repoName));
-        location = repo.getBackupLocation(location);
-        if (location == null) {
-          throw new IllegalArgumentException("location is required");
-        }
-      } else {
-        repo = new LocalFileSystemRepository();
-        if (location == null) {
-          location = core.getDataDir();
-        } else {
-          location = core.getCoreDescriptor().getInstanceDir().resolve(location).normalize().toString();
-        }
-      }
-
-      // small race here before the commit point is saved
-      URI locationUri = repo.createURI(location);
-      String commitName = params.get(CoreAdminParams.COMMIT_NAME);
-      SnapShooter snapShooter = new SnapShooter(repo, core, locationUri, params.get(NAME), commitName);
-      snapShooter.validateCreateSnapshot();
-      snapShooter.createSnapAsync(numberToKeep, (nl) -> snapShootDetails = nl);
-    } catch (Exception e) {
-      log.error("Exception during creating a snapshot", e);
-      rsp.add("exception", e);
-    }
-  }
-
-  /**
-   * This method adds an Object of FileStream to the response . The FileStream implements a custom protocol which is
-   * understood by IndexFetcher.FileFetcher
-   *
-   * @see IndexFetcher.LocalFsFileFetcher
-   * @see IndexFetcher.DirectoryFileFetcher
-   */
-  private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp) {
-    ModifiableSolrParams rawParams = new ModifiableSolrParams(solrParams);
-    rawParams.set(CommonParams.WT, FILE_STREAM);
-
-    String cfileName = solrParams.get(CONF_FILE_SHORT);
-    String tlogFileName = solrParams.get(TLOG_FILE);
-    if (cfileName != null) {
-      rsp.add(FILE_STREAM, new LocalFsConfFileStream(solrParams));
-    } else if (tlogFileName != null) {
-      rsp.add(FILE_STREAM, new LocalFsTlogFileStream(solrParams));
-    } else {
-      rsp.add(FILE_STREAM, new DirectoryFileStream(solrParams));
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private void getFileList(SolrParams solrParams, SolrQueryResponse rsp) {
-    String v = solrParams.get(GENERATION);
-    if (v == null) {
-      rsp.add("status", "no index generation specified");
-      return;
-    }
-    long gen = Long.parseLong(v);
-    IndexCommit commit = core.getDeletionPolicy().getCommitPoint(gen);
-
-    //System.out.println("ask for files for gen:" + commit.getGeneration() + core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName());
-    if (commit == null) {
-      rsp.add("status", "invalid index generation");
-      return;
-    }
-
-    // reserve the indexcommit for sometime
-    core.getDeletionPolicy().setReserveDuration(gen, reserveCommitDuration);
-    List<Map<String, Object>> result = new ArrayList<>();
-    Directory dir = null;
-    try {
-      dir = core.getDirectoryFactory().get(core.getNewIndexDir(), DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
-      SegmentInfos infos = SegmentInfos.readCommit(dir, commit.getSegmentsFileName());
-      for (SegmentCommitInfo commitInfo : infos) {
-        for (String file : commitInfo.files()) {
-          Map<String, Object> fileMeta = new HashMap<>();
-          fileMeta.put(NAME, file);
-          fileMeta.put(SIZE, dir.fileLength(file));
-
-          try (final IndexInput in = dir.openInput(file, IOContext.READONCE)) {
-            try {
-              long checksum = CodecUtil.retrieveChecksum(in);
-              fileMeta.put(CHECKSUM, checksum);
-            } catch (Exception e) {
-              log.warn("Could not read checksum from index file: " + file, e);
-            }
-          }
-
-          result.add(fileMeta);
-        }
-      }
-
-      // add the segments_N file
-
-      Map<String, Object> fileMeta = new HashMap<>();
-      fileMeta.put(NAME, infos.getSegmentsFileName());
-      fileMeta.put(SIZE, dir.fileLength(infos.getSegmentsFileName()));
-      if (infos.getId() != null) {
-        try (final IndexInput in = dir.openInput(infos.getSegmentsFileName(), IOContext.READONCE)) {
-          try {
-            fileMeta.put(CHECKSUM, CodecUtil.retrieveChecksum(in));
-          } catch (Exception e) {
-            log.warn("Could not read checksum from index file: " + infos.getSegmentsFileName(), e);
-          }
-        }
-      }
-      result.add(fileMeta);
-    } catch (IOException e) {
-      rsp.add("status", "unable to get file names for given index generation");
-      rsp.add(EXCEPTION, e);
-      log.error("Unable to get file names for indexCommit generation: " + gen, e);
-    } finally {
-      if (dir != null) {
-        try {
-          core.getDirectoryFactory().release(dir);
-        } catch (IOException e) {
-          SolrException.log(log, "Could not release directory after fetching file list", e);
-        }
-      }
-    }
-    rsp.add(CMD_GET_FILE_LIST, result);
-
-    if (solrParams.getBool(TLOG_FILES, false)) {
-      try {
-        List<Map<String, Object>> tlogfiles = getTlogFileList(commit);
-        log.info("Adding tlog files to list: " + tlogfiles);
-        rsp.add(TLOG_FILES, tlogfiles);
-      }
-      catch (IOException e) {
-        rsp.add("status", "unable to get tlog file names for given index generation");
-        rsp.add(EXCEPTION, e);
-        log.error("Unable to get tlog file names for indexCommit generation: " + gen, e);
-      }
-    }
-
-    if (confFileNameAlias.size() < 1 || core.getCoreContainer().isZooKeeperAware())
-      return;
-    log.debug("Adding config files to list: " + includeConfFiles);
-    //if configuration files need to be included get their details
-    rsp.add(CONF_FILES, getConfFileInfoFromCache(confFileNameAlias, confFileInfoCache));
-  }
-
-  /**
-   * Retrieves the list of tlog files associated to a commit point.
-   */
-  List<Map<String, Object>> getTlogFileList(IndexCommit commit) throws IOException {
-    long maxVersion = this.getMaxVersion(commit);
-    CdcrUpdateLog ulog = (CdcrUpdateLog) core.getUpdateHandler().getUpdateLog();
-    String[] logList = ulog.getLogList(new File(ulog.getLogDir()));
-    List<Map<String, Object>> tlogFiles = new ArrayList<>();
-    for (String fileName : logList) {
-      // filter out tlogs that are older than the current index commit generation, so that the list of tlog files is
-      // in synch with the latest index commit point
-      long startVersion = Math.abs(Long.parseLong(fileName.substring(fileName.lastIndexOf('.') + 1)));
-      if (startVersion < maxVersion) {
-        Map<String, Object> fileMeta = new HashMap<>();
-        fileMeta.put(NAME, fileName);
-        fileMeta.put(SIZE, new File(ulog.getLogDir(), fileName).length());
-        tlogFiles.add(fileMeta);
-      }
-    }
-    return tlogFiles;
-  }
-
-  /**
-   * Retrieves the maximum version number from an index commit.
-   */
-  private long getMaxVersion(IndexCommit commit) throws IOException {
-    try (DirectoryReader reader = DirectoryReader.open(commit)) {
-      IndexSearcher searcher = new IndexSearcher(reader);
-      VersionInfo vinfo = core.getUpdateHandler().getUpdateLog().getVersionInfo();
-      return Math.abs(vinfo.getMaxVersionFromIndex(searcher));
-    }
-  }
-
-  /**
-   * For configuration files, checksum of the file is included because, unlike index files, they may have same content
-   * but different timestamps.
-   * <p/>
-   * The local conf files information is cached so that everytime it does not have to compute the checksum. The cache is
-   * refreshed only if the lastModified of the file changes
-   */
-  List<Map<String, Object>> getConfFileInfoFromCache(NamedList<String> nameAndAlias,
-                                                     final Map<String, FileInfo> confFileInfoCache) {
-    List<Map<String, Object>> confFiles = new ArrayList<>();
-    synchronized (confFileInfoCache) {
-      File confDir = new File(core.getResourceLoader().getConfigDir());
-      Checksum checksum = null;
-      for (int i = 0; i < nameAndAlias.size(); i++) {
-        String cf = nameAndAlias.getName(i);
-        File f = new File(confDir, cf);
-        if (!f.exists() || f.isDirectory()) continue; //must not happen
-        FileInfo info = confFileInfoCache.get(cf);
-        if (info == null || info.lastmodified != f.lastModified() || info.size != f.length()) {
-          if (checksum == null) checksum = new Adler32();
-          info = new FileInfo(f.lastModified(), cf, f.length(), getCheckSum(checksum, f));
-          confFileInfoCache.put(cf, info);
-        }
-        Map<String, Object> m = info.getAsMap();
-        if (nameAndAlias.getVal(i) != null) m.put(ALIAS, nameAndAlias.getVal(i));
-        confFiles.add(m);
-      }
-    }
-    return confFiles;
-  }
-
-  static class FileInfo {
-    long lastmodified;
-    String name;
-    long size;
-    long checksum;
-
-    public FileInfo(long lasmodified, String name, long size, long checksum) {
-      this.lastmodified = lasmodified;
-      this.name = name;
-      this.size = size;
-      this.checksum = checksum;
-    }
-
-    Map<String, Object> getAsMap() {
-      Map<String, Object> map = new HashMap<>();
-      map.put(NAME, name);
-      map.put(SIZE, size);
-      map.put(CHECKSUM, checksum);
-      return map;
-    }
-  }
-
-  void disablePoll() {
-    if (isSlave) {
-      pollDisabled.set(true);
-      log.info("inside disable poll, value of pollDisabled = " + pollDisabled);
-    }
-  }
-
-  void enablePoll() {
-    if (isSlave) {
-      pollDisabled.set(false);
-      log.info("inside enable poll, value of pollDisabled = " + pollDisabled);
-    }
-  }
-
-  boolean isPollingDisabled() {
-    return pollDisabled.get();
-  }
-
-  @SuppressForbidden(reason = "Need currentTimeMillis, to output next execution time in replication details")
-  private void markScheduledExecutionStart() {
-    executorStartTime = System.currentTimeMillis();
-  }
-
-  private Date getNextScheduledExecTime() {
-    Date nextTime = null;
-    if (executorStartTime > 0)
-      nextTime = new Date(executorStartTime + TimeUnit.MILLISECONDS.convert(pollIntervalNs, TimeUnit.NANOSECONDS));
-    return nextTime;
-  }
-
-  int getTimesReplicatedSinceStartup() {
-    return numTimesReplicated;
-  }
-
-  void setTimesReplicatedSinceStartup() {
-    numTimesReplicated++;
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.REPLICATION;
-  }
-
-  @Override
-  public String getDescription() {
-    return "ReplicationHandler provides replication of index and configuration files from Master to Slaves";
-  }
-
-  /**
-   * returns the CommitVersionInfo for the current searcher, or null on error.
-   */
-  private CommitVersionInfo getIndexVersion() {
-    try {
-      return core.withSearcher(searcher -> CommitVersionInfo.build(searcher.getIndexReader().getIndexCommit()));
-    } catch (IOException e) {
-      log.warn("Unable to get index commit: ", e);
-      return null;
-    }
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registry, String tag, String scope) {
-    super.initializeMetrics(manager, registry, tag, scope);
-
-    manager.registerGauge(this, registry, () -> (core != null && !core.isClosed() ? NumberUtils.readableSize(core.getIndexSize()) : ""),
-        tag, true, "indexSize", getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> (core != null && !core.isClosed() ? getIndexVersion().toString() : ""),
-        tag, true, "indexVersion", getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> (core != null && !core.isClosed() ? getIndexVersion().generation : 0),
-        tag, true, GENERATION, getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> (core != null && !core.isClosed() ? core.getIndexDir() : ""),
-        tag, true, "indexPath", getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> isMaster,
-        tag, true, "isMaster", getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> isSlave,
-        tag, true, "isSlave", getCategory().toString(), scope);
-    final MetricsMap fetcherMap = new MetricsMap((detailed, map) -> {
-      IndexFetcher fetcher = currentIndexFetcher;
-      if (fetcher != null) {
-        map.put(MASTER_URL, fetcher.getMasterUrl());
-        if (getPollInterval() != null) {
-          map.put(POLL_INTERVAL, getPollInterval());
-        }
-        map.put("isPollingDisabled", isPollingDisabled());
-        map.put("isReplicating", isReplicating());
-        long elapsed = fetcher.getReplicationTimeElapsed();
-        long val = fetcher.getTotalBytesDownloaded();
-        if (elapsed > 0) {
-          map.put("timeElapsed", elapsed);
-          map.put("bytesDownloaded", val);
-          map.put("downloadSpeed", val / elapsed);
-        }
-        Properties props = loadReplicationProperties();
-        addVal(map, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class);
-        addVal(map, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class);
-        addVal(map, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Date.class);
-        addVal(map, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class);
-        addVal(map, IndexFetcher.TIMES_FAILED, props, Integer.class);
-        addVal(map, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class);
-        addVal(map, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class);
-        addVal(map, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class);
-        addVal(map, IndexFetcher.CONF_FILES_REPLICATED, props, String.class);
-      }
-    });
-    manager.registerGauge(this, registry, fetcherMap, tag, true, "fetcher", getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> isMaster && includeConfFiles != null ? includeConfFiles : "",
-        tag, true, "confFilesToReplicate", getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> isMaster ? getReplicateAfterStrings() : Collections.<String>emptyList(),
-        tag, true, REPLICATE_AFTER, getCategory().toString(), scope);
-    manager.registerGauge(this, registry, () -> isMaster && replicationEnabled.get(),
-        tag, true, "replicationEnabled", getCategory().toString(), scope);
-  }
-
-  /**
-   * Used for showing statistics and progress information.
-   */
-  private NamedList<Object> getReplicationDetails(boolean showSlaveDetails) {
-    NamedList<Object> details = new SimpleOrderedMap<>();
-    NamedList<Object> master = new SimpleOrderedMap<>();
-    NamedList<Object> slave = new SimpleOrderedMap<>();
-
-    details.add("indexSize", NumberUtils.readableSize(core.getIndexSize()));
-    details.add("indexPath", core.getIndexDir());
-    details.add(CMD_SHOW_COMMITS, getCommits());
-    details.add("isMaster", String.valueOf(isMaster));
-    details.add("isSlave", String.valueOf(isSlave));
-    CommitVersionInfo vInfo = getIndexVersion();
-    details.add("indexVersion", null == vInfo ? 0 : vInfo.version);
-    details.add(GENERATION, null == vInfo ? 0 : vInfo.generation);
-
-    IndexCommit commit = indexCommitPoint;  // make a copy so it won't change
-
-    if (isMaster) {
-      if (includeConfFiles != null) master.add(CONF_FILES, includeConfFiles);
-      master.add(REPLICATE_AFTER, getReplicateAfterStrings());
-      master.add("replicationEnabled", String.valueOf(replicationEnabled.get()));
-    }
-
-    if (isMaster && commit != null) {
-      CommitVersionInfo repCommitInfo = CommitVersionInfo.build(commit);
-      master.add("replicableVersion", repCommitInfo.version);
-      master.add("replicableGeneration", repCommitInfo.generation);
-    }
-
-    IndexFetcher fetcher = currentIndexFetcher;
-    if (fetcher != null) {
-      Properties props = loadReplicationProperties();
-      if (showSlaveDetails) {
-        try {
-          NamedList nl = fetcher.getDetails();
-          slave.add("masterDetails", nl.get(CMD_DETAILS));
-        } catch (Exception e) {
-          log.warn(
-              "Exception while invoking 'details' method for replication on master ",
-              e);
-          slave.add(ERR_STATUS, "invalid_master");
-        }
-      }
-      slave.add(MASTER_URL, fetcher.getMasterUrl());
-      if (getPollInterval() != null) {
-        slave.add(POLL_INTERVAL, getPollInterval());
-      }
-      Date nextScheduled = getNextScheduledExecTime();
-      if (nextScheduled != null && !isPollingDisabled()) {
-        slave.add(NEXT_EXECUTION_AT, nextScheduled.toString());
-      } else if (isPollingDisabled()) {
-        slave.add(NEXT_EXECUTION_AT, "Polling disabled");
-      }
-      addVal(slave, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class);
-      addVal(slave, IndexFetcher.INDEX_REPLICATED_AT_LIST, props, List.class);
-      addVal(slave, IndexFetcher.REPLICATION_FAILED_AT_LIST, props, List.class);
-      addVal(slave, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class);
-      addVal(slave, IndexFetcher.CONF_FILES_REPLICATED, props, Integer.class);
-      addVal(slave, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class);
-      addVal(slave, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Integer.class);
-      addVal(slave, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class);
-      addVal(slave, IndexFetcher.TIMES_FAILED, props, Integer.class);
-      addVal(slave, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class);
-      addVal(slave, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class);
-
-      slave.add("currentDate", new Date().toString());
-      slave.add("isPollingDisabled", String.valueOf(isPollingDisabled()));
-      boolean isReplicating = isReplicating();
-      slave.add("isReplicating", String.valueOf(isReplicating));
-      if (isReplicating) {
-        try {
-          long bytesToDownload = 0;
-          List<String> filesToDownload = new ArrayList<>();
-          for (Map<String, Object> file : fetcher.getFilesToDownload()) {
-            filesToDownload.add((String) file.get(NAME));
-            bytesToDownload += (Long) file.get(SIZE);
-          }
-
-          //get list of conf files to download
-          for (Map<String, Object> file : fetcher.getConfFilesToDownload()) {
-            filesToDownload.add((String) file.get(NAME));
-            bytesToDownload += (Long) file.get(SIZE);
-          }
-
-          slave.add("filesToDownload", filesToDownload);
-          slave.add("numFilesToDownload", String.valueOf(filesToDownload.size()));
-          slave.add("bytesToDownload", NumberUtils.readableSize(bytesToDownload));
-
-          long bytesDownloaded = 0;
-          List<String> filesDownloaded = new ArrayList<>();
-          for (Map<String, Object> file : fetcher.getFilesDownloaded()) {
-            filesDownloaded.add((String) file.get(NAME));
-            bytesDownloaded += (Long) file.get(SIZE);
-          }
-
-          //get list of conf files downloaded
-          for (Map<String, Object> file : fetcher.getConfFilesDownloaded()) {
-            filesDownloaded.add((String) file.get(NAME));
-            bytesDownloaded += (Long) file.get(SIZE);
-          }
-
-          Map<String, Object> currentFile = fetcher.getCurrentFile();
-          String currFile = null;
-          long currFileSize = 0, currFileSizeDownloaded = 0;
-          float percentDownloaded = 0;
-          if (currentFile != null) {
-            currFile = (String) currentFile.get(NAME);
-            currFileSize = (Long) currentFile.get(SIZE);
-            if (currentFile.containsKey("bytesDownloaded")) {
-              currFileSizeDownloaded = (Long) currentFile.get("bytesDownloaded");
-              bytesDownloaded += currFileSizeDownloaded;
-              if (currFileSize > 0)
-                percentDownloaded = (currFileSizeDownloaded * 100) / currFileSize;
-            }
-          }
-          slave.add("filesDownloaded", filesDownloaded);
-          slave.add("numFilesDownloaded", String.valueOf(filesDownloaded.size()));
-
-          long estimatedTimeRemaining = 0;
-
-          Date replicationStartTimeStamp = fetcher.getReplicationStartTimeStamp();
-          if (replicationStartTimeStamp != null) {
-            slave.add("replicationStartTime", replicationStartTimeStamp.toString());
-          }
-          long elapsed = fetcher.getReplicationTimeElapsed();
-          slave.add("timeElapsed", String.valueOf(elapsed) + "s");
-
-          if (bytesDownloaded > 0)
-            estimatedTimeRemaining = ((bytesToDownload - bytesDownloaded) * elapsed) / bytesDownloaded;
-          float totalPercent = 0;
-          long downloadSpeed = 0;
-          if (bytesToDownload > 0)
-            totalPercent = (bytesDownloaded * 100) / bytesToDownload;
-          if (elapsed > 0)
-            downloadSpeed = (bytesDownloaded / elapsed);
-          if (currFile != null)
-            slave.add("currentFile", currFile);
-          slave.add("currentFileSize", NumberUtils.readableSize(currFileSize));
-          slave.add("currentFileSizeDownloaded", NumberUtils.readableSize(currFileSizeDownloaded));
-          slave.add("currentFileSizePercent", String.valueOf(percentDownloaded));
-          slave.add("bytesDownloaded", NumberUtils.readableSize(bytesDownloaded));
-          slave.add("totalPercent", String.valueOf(totalPercent));
-          slave.add("timeRemaining", String.valueOf(estimatedTimeRemaining) + "s");
-          slave.add("downloadSpeed", NumberUtils.readableSize(downloadSpeed));
-        } catch (Exception e) {
-          log.error("Exception while writing replication details: ", e);
-        }
-      }
-    }
-
-    if (isMaster)
-      details.add("master", master);
-    if (slave.size() > 0)
-      details.add("slave", slave);
-
-    NamedList snapshotStats = snapShootDetails;
-    if (snapshotStats != null)
-      details.add(CMD_BACKUP, snapshotStats);
-
-    return details;
-  }
-
-  private void addVal(NamedList<Object> nl, String key, Properties props, Class clzz) {
-    Object val = formatVal(key, props, clzz);
-    if (val != null) {
-      nl.add(key, val);
-    }
-  }
-
-  private void addVal(Map<String, Object> map, String key, Properties props, Class clzz) {
-    Object val = formatVal(key, props, clzz);
-    if (val != null) {
-      map.put(key, val);
-    }
-  }
-
-  private Object formatVal(String key, Properties props, Class clzz) {
-    String s = props.getProperty(key);
-    if (s == null || s.trim().length() == 0) return null;
-    if (clzz == Date.class) {
-      try {
-        Long l = Long.parseLong(s);
-        return new Date(l).toString();
-      } catch (NumberFormatException e) {
-        return null;
-      }
-    } else if (clzz == List.class) {
-      String ss[] = s.split(",");
-      List<String> l = new ArrayList<>();
-      for (String s1 : ss) {
-        l.add(new Date(Long.parseLong(s1)).toString());
-      }
-      return l;
-    } else {
-      return s;
-    }
-  }
-
-  private List<String> getReplicateAfterStrings() {
-    List<String> replicateAfter = new ArrayList<>();
-    if (replicateOnCommit)
-      replicateAfter.add("commit");
-    if (replicateOnOptimize)
-      replicateAfter.add("optimize");
-    if (replicateOnStart)
-      replicateAfter.add("startup");
-    return replicateAfter;
-  }
-
-  Properties loadReplicationProperties() {
-    Directory dir = null;
-    try {
-      try {
-        dir = core.getDirectoryFactory().get(core.getDataDir(),
-            DirContext.META_DATA, core.getSolrConfig().indexConfig.lockType);
-        IndexInput input;
-        try {
-          input = dir.openInput(
-            IndexFetcher.REPLICATION_PROPERTIES, IOContext.DEFAULT);
-        } catch (FileNotFoundException | NoSuchFileException e) {
-          return new Properties();
-        }
-
-        try {
-          final InputStream is = new PropertiesInputStream(input);
-          Properties props = new Properties();
-          props.load(new InputStreamReader(is, StandardCharsets.UTF_8));
-          return props;
-        } finally {
-          input.close();
-        }
-      } finally {
-        if (dir != null) {
-          core.getDirectoryFactory().release(dir);
-        }
-      }
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    }
-  }
-
-
-//  void refreshCommitpoint() {
-//    IndexCommit commitPoint = core.getDeletionPolicy().getLatestCommit();
-//    if(replicateOnCommit || (replicateOnOptimize && commitPoint.getSegmentCount() == 1)) {
-//      indexCommitPoint = commitPoint;
-//    }
-//  }
-
-  private void setupPolling(String intervalStr) {
-    pollIntervalStr = intervalStr;
-    pollIntervalNs = readIntervalNs(pollIntervalStr);
-    if (pollIntervalNs == null || pollIntervalNs <= 0) {
-      log.info(" No value set for 'pollInterval'. Timer Task not started.");
-      return;
-    }
-
-    Runnable task = () -> {
-      if (pollDisabled.get()) {
-        log.info("Poll disabled");
-        return;
-      }
-      try {
-        log.debug("Polling for index modifications");
-        markScheduledExecutionStart();
-        IndexFetchResult fetchResult = doFetch(null, false);
-        if (pollListener != null) pollListener.onComplete(core, fetchResult);
-      } catch (Exception e) {
-        log.error("Exception in fetching index", e);
-      }
-    };
-    executorService = Executors.newSingleThreadScheduledExecutor(
-        new DefaultSolrThreadFactory("indexFetcher"));
-    // Randomize initial delay, with a minimum of 1ms
-    long initialDelayNs = new Random().nextLong() % pollIntervalNs
-        + TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
-    executorService.scheduleAtFixedRate(task, initialDelayNs, pollIntervalNs, TimeUnit.NANOSECONDS);
-    log.info("Poll scheduled at an interval of {}ms",
-        TimeUnit.MILLISECONDS.convert(pollIntervalNs, TimeUnit.NANOSECONDS));
-  }
-
-  @Override
-  @SuppressWarnings({"unchecked", "resource"})
-  public void inform(SolrCore core) {
-    this.core = core;
-    registerCloseHook();
-    Long deprecatedReserveCommitDuration = null;
-    Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
-    if(nbtk!=null) {
-      numberBackupsToKeep = Integer.parseInt(nbtk.toString());
-    } else {
-      numberBackupsToKeep = 0;
-    }
-    NamedList slave = (NamedList) initArgs.get("slave");
-    boolean enableSlave = isEnabled( slave );
-    if (enableSlave) {
-      currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(slave, this, core);
-      setupPolling((String) slave.get(POLL_INTERVAL));
-      isSlave = true;
-    }
-    NamedList master = (NamedList) initArgs.get("master");
-    boolean enableMaster = isEnabled( master );
-
-    if (enableMaster || (enableSlave && !currentIndexFetcher.fetchFromLeader)) {
-      if (core.getCoreContainer().getZkController() != null) {
-        log.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" +
-            " intend this behavior, it usually indicates a mis-configuration. Master setting is " +
-            Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave));
-      }
-    }
-
-    if (!enableSlave && !enableMaster) {
-      enableMaster = true;
-      master = new NamedList<>();
-    }
-
-    if (enableMaster) {
-      includeConfFiles = (String) master.get(CONF_FILES);
-      if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
-        List<String> files = Arrays.asList(includeConfFiles.split(","));
-        for (String file : files) {
-          if (file.trim().length() == 0) continue;
-          String[] strs = file.trim().split(":");
-          // if there is an alias add it or it is null
-          confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
-        }
-        log.info("Replication enabled for following config files: " + includeConfFiles);
-      }
-      List backup = master.getAll("backupAfter");
-      boolean backupOnCommit = backup.contains("commit");
-      boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
-      List replicateAfter = master.getAll(REPLICATE_AFTER);
-      replicateOnCommit = replicateAfter.contains("commit");
-      replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");
-
-      if (!replicateOnCommit && ! replicateOnOptimize) {
-        replicateOnCommit = true;
-      }
-
-      // if we only want to replicate on optimize, we need the deletion policy to
-      // save the last optimized commit point.
-      if (replicateOnOptimize) {
-        IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
-        IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
-        if (policy instanceof SolrDeletionPolicy) {
-          SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy)policy;
-          if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
-            solrPolicy.setMaxOptimizedCommitsToKeep(1);
-          }
-        } else {
-          log.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
-        }
-      }
-
-      if (replicateOnOptimize || backupOnOptimize) {
-        core.getUpdateHandler().registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
-      }
-      if (replicateOnCommit || backupOnCommit) {
-        replicateOnCommit = true;
-        core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
-      }
-      if (replicateAfter.contains("startup")) {
-        replicateOnStart = true;
-        RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
-        try {
-          DirectoryReader reader = (s == null) ? null : s.get().getIndexReader();
-          if (reader!=null && reader.getIndexCommit() != null && reader.getIndexCommit().getGeneration() != 1L) {
-            try {
-              if(replicateOnOptimize){
-                Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
-                for (IndexCommit ic : commits) {
-                  if(ic.getSegmentCount() == 1){
-                    if(indexCommitPoint == null || indexCommitPoint.getGeneration() < ic.getGeneration()) indexCommitPoint = ic;
-                  }
-                }
-              } else{
-                indexCommitPoint = reader.getIndexCommit();
-              }
-            } finally {
-              // We don't need to save commit points for replication, the SolrDeletionPolicy
-              // always saves the last commit point (and the last optimized commit point, if needed)
-              /***
-              if(indexCommitPoint != null){
-                core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
-              }
-              ***/
-            }
-          }
-
-          // ensure the writer is init'd so that we have a list of commit points
-          RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
-          iw.decref();
-
-        } catch (IOException e) {
-          log.warn("Unable to get IndexCommit on startup", e);
-        } finally {
-          if (s!=null) s.decref();
-        }
-      }
-      String reserve = (String) master.get(RESERVE);
-      if (reserve != null && !reserve.trim().equals("")) {
-        reserveCommitDuration = readIntervalMs(reserve);
-        deprecatedReserveCommitDuration = reserveCommitDuration;
-        // remove this error check & backcompat logic when Version.LUCENE_7_1_0 is removed
-        assertWarnOrFail(
-          "Beginning with Solr 7.1, master."+RESERVE + " is deprecated and should now be configured directly on the ReplicationHandler.",
-          (null == reserve),
-          core.getSolrConfig().luceneMatchVersion.onOrAfter(Version.LUCENE_7_1_0));
-      }
-      isMaster = true;
-    }
-
-    {
-      final String reserve = (String) initArgs.get(RESERVE);
-      if (reserve != null && !reserve.trim().equals("")) {
-        reserveCommitDuration = readIntervalMs(reserve);
-        if (deprecatedReserveCommitDuration != null) {
-          throw new IllegalArgumentException("'master."+RESERVE+"' and '"+RESERVE+"' are mutually exclusive.");
-        }
-      }
-    }
-    log.info("Commits will be reserved for " + reserveCommitDuration + "ms.");
-  }
-
-  // check master or slave is enabled
-  private boolean isEnabled( NamedList params ){
-    if( params == null ) return false;
-    Object enable = params.get( "enable" );
-    if( enable == null ) return true;
-    if( enable instanceof String )
-      return StrUtils.parseBool( (String)enable );
-    return Boolean.TRUE.equals( enable );
-  }
-
-  /**
-   * register a closehook
-   */
-  private void registerCloseHook() {
-    core.addCloseHook(new CloseHook() {
-      @Override
-      public void preClose(SolrCore core) {
-        if (executorService != null) executorService.shutdown(); // we don't wait for shutdown - this can deadlock core reload
-      }
-
-      @Override
-      public void postClose(SolrCore core) {
-        if (pollingIndexFetcher != null) {
-          pollingIndexFetcher.destroy();
-        }
-        if (currentIndexFetcher != null && currentIndexFetcher != pollingIndexFetcher) {
-          currentIndexFetcher.destroy();
-        }
-      }
-    });
-
-    core.addCloseHook(new CloseHook() {
-      @Override
-      public void preClose(SolrCore core) {
-        ExecutorUtil.shutdownAndAwaitTermination(restoreExecutor);
-        if (restoreFuture != null) {
-          restoreFuture.cancel(false);
-        }
-      }
-
-      @Override
-      public void postClose(SolrCore core) {}
-    });
-  }
-
-  public void close() {
-    if (executorService != null) executorService.shutdown();
-    if (pollingIndexFetcher != null) {
-      pollingIndexFetcher.destroy();
-    }
-    if (currentIndexFetcher != null && currentIndexFetcher != pollingIndexFetcher) {
-      currentIndexFetcher.destroy();
-    }
-    ExecutorUtil.shutdownAndAwaitTermination(restoreExecutor);
-    if (restoreFuture != null) {
-      restoreFuture.cancel(false);
-    }
-  }
-
-  /**
-   * Register a listener for postcommit/optimize
-   *
-   * @param snapshoot do a snapshoot
-   * @param getCommit get a commitpoint also
-   *
-   * @return an instance of the eventlistener
-   */
-  private SolrEventListener getEventListener(final boolean snapshoot, final boolean getCommit) {
-    return new SolrEventListener() {
-      @Override
-      public void init(NamedList args) {/*no op*/ }
-
-      /**
-       * This refreshes the latest replicateable index commit and optionally can create Snapshots as well
-       */
-      @Override
-      public void postCommit() {
-        IndexCommit currentCommitPoint = core.getDeletionPolicy().getLatestCommit();
-
-        if (getCommit) {
-          // IndexCommit oldCommitPoint = indexCommitPoint;
-          indexCommitPoint = currentCommitPoint;
-
-          // We don't need to save commit points for replication, the SolrDeletionPolicy
-          // always saves the last commit point (and the last optimized commit point, if needed)
-          /***
-          if (indexCommitPoint != null) {
-            core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
-          }
-          if(oldCommitPoint != null){
-            core.getDeletionPolicy().releaseCommitPointAndExtendReserve(oldCommitPoint.getGeneration());
-          }
-          ***/
-        }
-        if (snapshoot) {
-          try {
-            int numberToKeep = numberBackupsToKeep;
-            if (numberToKeep < 1) {
-              numberToKeep = Integer.MAX_VALUE;
-            }
-            SnapShooter snapShooter = new SnapShooter(core, null, null);
-            snapShooter.validateCreateSnapshot();
-            snapShooter.createSnapAsync(numberToKeep, (nl) -> snapShootDetails = nl);
-          } catch (Exception e) {
-            log.error("Exception while snapshooting", e);
-          }
-        }
-      }
-
-      @Override
-      public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) { /*no op*/}
-
-      @Override
-      public void postSoftCommit() {
-
-      }
-    };
-  }
-
-  /**This class is used to read and send files in the lucene index
-   *
-   */
-  private class DirectoryFileStream implements SolrCore.RawWriter {
-    protected SolrParams params;
-
-    protected FastOutputStream fos;
-
-    protected Long indexGen;
-    protected IndexDeletionPolicyWrapper delPolicy;
-
-    protected String fileName;
-    protected String cfileName;
-    protected String tlogFileName;
-    protected String sOffset;
-    protected String sLen;
-    protected String compress;
-    protected boolean useChecksum;
-
-    protected long offset = -1;
-    protected int len = -1;
-
-    protected Checksum checksum;
-
-    private RateLimiter rateLimiter;
-
-    byte[] buf;
-
-    public DirectoryFileStream(SolrParams solrParams) {
-      params = solrParams;
-      delPolicy = core.getDeletionPolicy();
-
-      fileName = validateFilenameOrError(params.get(FILE));
-      cfileName = validateFilenameOrError(params.get(CONF_FILE_SHORT));
-      tlogFileName = validateFilenameOrError(params.get(TLOG_FILE));
-      
-      sOffset = params.get(OFFSET);
-      sLen = params.get(LEN);
-      compress = params.get(COMPRESSION);
-      useChecksum = params.getBool(CHECKSUM, false);
-      indexGen = params.getLong(GENERATION);
-      if (useChecksum) {
-        checksum = new Adler32();
-      }
-      //No throttle if MAX_WRITE_PER_SECOND is not specified
-      double maxWriteMBPerSec = params.getDouble(MAX_WRITE_PER_SECOND, Double.MAX_VALUE);
-      rateLimiter = new RateLimiter.SimpleRateLimiter(maxWriteMBPerSec);
-    }
-
-    // Throw exception on directory traversal attempts 
-    protected String validateFilenameOrError(String filename) {
-      if (filename != null) {
-        Path filePath = Paths.get(filename);
-        filePath.forEach(subpath -> {
-          if ("..".equals(subpath.toString())) {
-            throw new SolrException(ErrorCode.FORBIDDEN, "File name cannot contain ..");
-          }
-        });
-        if (filePath.isAbsolute()) {
-          throw new SolrException(ErrorCode.FORBIDDEN, "File name must be relative");
-        }
-        return filename;
-      } else return null;
-    }
-
-    protected void initWrite() throws IOException {
-      if (sOffset != null) offset = Long.parseLong(sOffset);
-      if (sLen != null) len = Integer.parseInt(sLen);
-      if (fileName == null && cfileName == null && tlogFileName == null) {
-        // no filename do nothing
-        writeNothingAndFlush();
-      }
-      buf = new byte[(len == -1 || len > PACKET_SZ) ? PACKET_SZ : len];
-
-      //reserve commit point till write is complete
-      if(indexGen != null) {
-        delPolicy.saveCommitPoint(indexGen);
-      }
-    }
-
-    protected void createOutputStream(OutputStream out) {
-      out = new CloseShieldOutputStream(out); // DeflaterOutputStream requires a close call, but don't close the request outputstream
-      if (Boolean.parseBoolean(compress)) {
-        fos = new FastOutputStream(new DeflaterOutputStream(out));
-      } else {
-        fos = new FastOutputStream(out);
-      }
-    }
-
-    protected void extendReserveAndReleaseCommitPoint() {
-      if(indexGen != null) {
-        //Reserve the commit point for another 10s for the next file to be to fetched.
-        //We need to keep extending the commit reservation between requests so that the replica can fetch
-        //all the files correctly.
-        delPolicy.setReserveDuration(indexGen, reserveCommitDuration);
-
-        //release the commit point as the write is complete
-        delPolicy.releaseCommitPoint(indexGen);
-      }
-
-    }
-    public void write(OutputStream out) throws IOException {
-      createOutputStream(out);
-
-      IndexInput in = null;
-      try {
-        initWrite();
-
-        Directory dir = core.withSearcher(searcher -> searcher.getIndexReader().directory());
-        in = dir.openInput(fileName, IOContext.READONCE);
-        // if offset is mentioned move the pointer to that point
-        if (offset != -1) in.seek(offset);
-
-        long filelen = dir.fileLength(fileName);
-        long maxBytesBeforePause = 0;
-
-        while (true) {
-          offset = offset == -1 ? 0 : offset;
-          int read = (int) Math.min(buf.length, filelen - offset);
-          in.readBytes(buf, 0, read);
-
-          fos.writeInt(read);
-          if (useChecksum) {
-            checksum.reset();
-            checksum.update(buf, 0, read);
-            fos.writeLong(checksum.getValue());
-          }
-          fos.write(buf, 0, read);
-          fos.flush();
-          log.debug("Wrote {} bytes for file {}", offset + read, fileName);
-
-          //Pause if necessary
-          maxBytesBeforePause += read;
-          if (maxBytesBeforePause >= rateLimiter.getMinPauseCheckBytes()) {
-            rateLimiter.pause(maxBytesBeforePause);
-            maxBytesBeforePause = 0;
-          }
-          if (read != buf.length) {
-            writeNothingAndFlush();
-            fos.close(); // we close because DeflaterOutputStream requires a close call, but but the request outputstream is protected
-            break;
-          }
-          offset += read;
-          in.seek(offset);
-        }
-      } catch (IOException e) {
-        log.warn("Exception while writing response for params: " + params, e);
-      } finally {
-        if (in != null) {
-          in.close();
-        }
-        extendReserveAndReleaseCommitPoint();
-      }
-    }
-
-
-    /**
-     * Used to write a marker for EOF
-     */
-    protected void writeNothingAndFlush() throws IOException {
-      fos.writeInt(0);
-      fos.flush();
-    }
-  }
-
-  /**This is used to write files in the conf directory.
-   */
-  private abstract class LocalFsFileStream extends DirectoryFileStream {
-
-    private File file;
-
-    public LocalFsFileStream(SolrParams solrParams) {
-      super(solrParams);
-      this.file = this.initFile();
-    }
-
-    protected abstract File initFile();
-
-    @Override
-    public void write(OutputStream out) throws IOException {
-      createOutputStream(out);
-      FileInputStream inputStream = null;
-      try {
-        initWrite();
-
-        if (file.exists() && file.canRead()) {
-          inputStream = new FileInputStream(file);
-          FileChannel channel = inputStream.getChannel();
-          //if offset is mentioned move the pointer to that point
-          if (offset != -1)
-            channel.position(offset);
-          ByteBuffer bb = ByteBuffer.wrap(buf);
-
-          while (true) {
-            bb.clear();
-            long bytesRead = channel.read(bb);
-            if (bytesRead <= 0) {
-              writeNothingAndFlush();
-              fos.close(); // we close because DeflaterOutputStream requires a close call, but but the request outputstream is protected
-              break;
-            }
-            fos.writeInt((int) bytesRead);
-            if (useChecksum) {
-              checksum.reset();
-              checksum.update(buf, 0, (int) bytesRead);
-              fos.writeLong(checksum.getValue());
-            }
-            fos.write(buf, 0, (int) bytesRead);
-            fos.flush();
-          }
-        } else {
-          writeNothingAndFlush();
-        }
-      } catch (IOException e) {
-        log.warn("Exception while writing response for params: " + params, e);
-      } finally {
-        IOUtils.closeQuietly(inputStream);
-        extendReserveAndReleaseCommitPoint();
-      }
-    }
-  }
-
-  private class LocalFsTlogFileStream extends LocalFsFileStream {
-
-    public LocalFsTlogFileStream(SolrParams solrParams) {
-      super(solrParams);
-    }
-
-    protected File initFile() {
-      //if it is a tlog file read from tlog directory
-      return new File(core.getUpdateHandler().getUpdateLog().getLogDir(), tlogFileName);
-    }
-
-  }
-
-  private class LocalFsConfFileStream extends LocalFsFileStream {
-
-    public LocalFsConfFileStream(SolrParams solrParams) {
-      super(solrParams);
-    }
-
-    protected File initFile() {
-      //if it is a conf file read from config directory
-      return new File(core.getResourceLoader().getConfigDir(), cfileName);
-    }
-
-  }
-
-  private static Long readIntervalMs(String interval) {
-    return TimeUnit.MILLISECONDS.convert(readIntervalNs(interval), TimeUnit.NANOSECONDS);
-  }
-
-  private static Long readIntervalNs(String interval) {
-    if (interval == null)
-      return null;
-    int result = 0;
-    Matcher m = INTERVAL_PATTERN.matcher(interval.trim());
-    if (m.find()) {
-      String hr = m.group(1);
-      String min = m.group(2);
-      String sec = m.group(3);
-      result = 0;
-      try {
-        if (sec != null && sec.length() > 0)
-          result += Integer.parseInt(sec);
-        if (min != null && min.length() > 0)
-          result += (60 * Integer.parseInt(min));
-        if (hr != null && hr.length() > 0)
-          result += (60 * 60 * Integer.parseInt(hr));
-        return TimeUnit.NANOSECONDS.convert(result, TimeUnit.SECONDS);
-      } catch (NumberFormatException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, INTERVAL_ERR_MSG);
-      }
-    } else {
-      throw new SolrException(ErrorCode.SERVER_ERROR, INTERVAL_ERR_MSG);
-    }
-  }
-
-  private static final String SUCCESS = "success";
-
-  private static final String FAILED = "failed";
-
-  private static final String EXCEPTION = "exception";
-
-  public static final String MASTER_URL = "masterUrl";
-
-  public static final String FETCH_FROM_LEADER = "fetchFromLeader";
-
-  // in case of TLOG replica, if masterVersion = zero, don't do commit
-  // otherwise updates from current tlog won't copied over properly to the new tlog, leading to data loss
-  public static final String SKIP_COMMIT_ON_MASTER_VERSION_ZERO = "skipCommitOnMasterVersionZero";
-
-  public static final String STATUS = "status";
-
-  public static final String COMMAND = "command";
-
-  public static final String CMD_DETAILS = "details";
-
-  public static final String CMD_BACKUP = "backup";
-
-  public static final String CMD_RESTORE = "restore";
-
-  public static final String CMD_RESTORE_STATUS = "restorestatus";
-
-  public static final String CMD_FETCH_INDEX = "fetchindex";
-
-  public static final String CMD_ABORT_FETCH = "abortfetch";
-
-  public static final String CMD_GET_FILE_LIST = "filelist";
-
-  public static final String CMD_GET_FILE = "filecontent";
-
-  public static final String CMD_DISABLE_POLL = "disablepoll";
-
-  public static final String CMD_DISABLE_REPL = "disablereplication";
-
-  public static final String CMD_ENABLE_REPL = "enablereplication";
-
-  public static final String CMD_ENABLE_POLL = "enablepoll";
-
-  public static final String CMD_INDEX_VERSION = "indexversion";
-
-  public static final String CMD_SHOW_COMMITS = "commits";
-
-  public static final String CMD_DELETE_BACKUP = "deletebackup";
-
-  public static final String GENERATION = "generation";
-
-  public static final String OFFSET = "offset";
-
-  public static final String LEN = "len";
-
-  public static final String FILE = "file";
-
-  public static final String SIZE = "size";
-
-  public static final String MAX_WRITE_PER_SECOND = "maxWriteMBPerSec";
-
-  public static final String CONF_FILE_SHORT = "cf";
-
-  public static final String TLOG_FILE = "tlogFile";
-
-  public static final String CHECKSUM = "checksum";
-
-  public static final String ALIAS = "alias";
-
-  public static final String CONF_CHECKSUM = "confchecksum";
-
-  public static final String CONF_FILES = "confFiles";
-
-  public static final String TLOG_FILES = "tlogFiles";
-
-  public static final String REPLICATE_AFTER = "replicateAfter";
-
-  public static final String FILE_STREAM = "filestream";
-
-  public static final String POLL_INTERVAL = "pollInterval";
-
-  public static final String INTERVAL_ERR_MSG = "The " + POLL_INTERVAL + " must be in this format 'HH:mm:ss'";
-
-  private static final Pattern INTERVAL_PATTERN = Pattern.compile("(\\d*?):(\\d*?):(\\d*)");
-
-  public static final int PACKET_SZ = 1024 * 1024; // 1MB
-
-  public static final String RESERVE = "commitReserveDuration";
-
-  public static final String COMPRESSION = "compression";
-
-  public static final String EXTERNAL = "external";
-
-  public static final String INTERNAL = "internal";
-
-  public static final String ERR_STATUS = "ERROR";
-
-  public static final String OK_STATUS = "OK";
-
-  public static final String NEXT_EXECUTION_AT = "nextExecutionAt";
-
-  public static final String NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM = "numberToKeep";
-
-  public static final String NUMBER_BACKUPS_TO_KEEP_INIT_PARAM = "maxNumberOfBackups";
-
-  /**
-   * Boolean param for tests that can be specified when using
-   * {@link #CMD_FETCH_INDEX} to force the current request to block until
-   * the fetch is complete.  <b>NOTE:</b> This param is not advised for
-   * non-test code, since the the duration of the fetch for non-trivial
-   * indexes will likeley cause the request to time out.
-   *
-   * @lucene.internal
-   */
-  public static final String WAIT = "wait";
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
deleted file mode 100644
index a398eb7..0000000
--- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import com.codahale.metrics.MetricRegistry;
-import com.google.common.collect.ImmutableList;
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.Meter;
-import com.codahale.metrics.Timer;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.core.PluginBag;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.metrics.MetricsMap;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.util.SolrPluginUtils;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.api.ApiSupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.core.RequestParams.USEPARAM;
-
-/**
- *
- */
-public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoBean, SolrMetricProducer, NestedRequestHandler,ApiSupport {
-
-  protected NamedList initArgs = null;
-  protected SolrParams defaults;
-  protected SolrParams appends;
-  protected SolrParams invariants;
-  protected boolean httpCaching = true;
-
-  // Statistics
-  private Meter numErrors = new Meter();
-  private Meter numServerErrors = new Meter();
-  private Meter numClientErrors = new Meter();
-  private Meter numTimeouts = new Meter();
-  private Counter requests = new Counter();
-  private final Map<String, Counter> shardPurposes = new ConcurrentHashMap<>();
-  private Timer requestTimes = new Timer();
-  private Counter totalTime = new Counter();
-
-  private final long handlerStart;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private PluginInfo pluginInfo;
-
-  private Set<String> metricNames = ConcurrentHashMap.newKeySet();
-  private MetricRegistry registry;
-  protected String registryName;
-  protected SolrMetricManager metricManager;
-
-
-  @SuppressForbidden(reason = "Need currentTimeMillis, used only for stats output")
-  public RequestHandlerBase() {
-    handlerStart = System.currentTimeMillis();
-  }
-
-  /**
-   * Initializes the {@link org.apache.solr.request.SolrRequestHandler} by creating three {@link org.apache.solr.common.params.SolrParams} named.
-   * <table border="1" summary="table of parameters">
-   * <tr><th>Name</th><th>Description</th></tr>
-   * <tr><td>defaults</td><td>Contains all of the named arguments contained within the list element named "defaults".</td></tr>
-   * <tr><td>appends</td><td>Contains all of the named arguments contained within the list element named "appends".</td></tr>
-   * <tr><td>invariants</td><td>Contains all of the named arguments contained within the list element named "invariants".</td></tr>
-   * </table>
-   *
-   * Example:
-   * <pre>
-   * &lt;lst name="defaults"&gt;
-   * &lt;str name="echoParams"&gt;explicit&lt;/str&gt;
-   * &lt;str name="qf"&gt;text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0&lt;/str&gt;
-   * &lt;str name="mm"&gt;2&lt;-1 5&lt;-2 6&lt;90%&lt;/str&gt;
-   * &lt;str name="bq"&gt;incubationdate_dt:[* TO NOW/DAY-1MONTH]^2.2&lt;/str&gt;
-   * &lt;/lst&gt;
-   * &lt;lst name="appends"&gt;
-   * &lt;str name="fq"&gt;inStock:true&lt;/str&gt;
-   * &lt;/lst&gt;
-   *
-   * &lt;lst name="invariants"&gt;
-   * &lt;str name="facet.field"&gt;cat&lt;/str&gt;
-   * &lt;str name="facet.field"&gt;manu_exact&lt;/str&gt;
-   * &lt;str name="facet.query"&gt;price:[* TO 500]&lt;/str&gt;
-   * &lt;str name="facet.query"&gt;price:[500 TO *]&lt;/str&gt;
-   * &lt;/lst&gt;
-   * </pre>
-   *
-   *
-   * @param args The {@link org.apache.solr.common.util.NamedList} to initialize from
-   *
-   * @see #handleRequest(org.apache.solr.request.SolrQueryRequest, org.apache.solr.response.SolrQueryResponse)
-   * @see #handleRequestBody(org.apache.solr.request.SolrQueryRequest, org.apache.solr.response.SolrQueryResponse)
-   * @see org.apache.solr.util.SolrPluginUtils#setDefaults(org.apache.solr.request.SolrQueryRequest, org.apache.solr.common.params.SolrParams, org.apache.solr.common.params.SolrParams, org.apache.solr.common.params.SolrParams)
-   * @see NamedList#toSolrParams()
-   *
-   * See also the example solrconfig.xml located in the Solr codebase (example/solr/conf).
-   */
-  @Override
-  public void init(NamedList args) {
-    initArgs = args;
-
-    if( args != null ) {
-      defaults = getSolrParamsFromNamedList(args, "defaults");
-      appends = getSolrParamsFromNamedList(args, "appends");
-      invariants = getSolrParamsFromNamedList(args, "invariants");
-    }
-    
-    if (initArgs != null) {
-      Object caching = initArgs.get("httpCaching");
-      httpCaching = caching != null ? Boolean.parseBoolean(caching.toString()) : true;
-    }
-
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registryName, String tag, final String scope) {
-    this.metricManager = manager;
-    this.registryName = registryName;
-    this.registry = manager.registry(registryName);
-    numErrors = manager.meter(this, registryName, "errors", getCategory().toString(), scope);
-    numServerErrors = manager.meter(this, registryName, "serverErrors", getCategory().toString(), scope);
-    numClientErrors = manager.meter(this, registryName, "clientErrors", getCategory().toString(), scope);
-    numTimeouts = manager.meter(this, registryName, "timeouts", getCategory().toString(), scope);
-    requests = manager.counter(this, registryName, "requests", getCategory().toString(), scope);
-    MetricsMap metricsMap = new MetricsMap((detail, map) ->
-      shardPurposes.forEach((k, v) -> map.put(k, v.getCount())));
-    manager.registerGauge(this, registryName, metricsMap, tag, true, "shardRequests", getCategory().toString(), scope);
-    requestTimes = manager.timer(this, registryName, "requestTimes", getCategory().toString(), scope);
-    totalTime = manager.counter(this, registryName, "totalTime", getCategory().toString(), scope);
-    manager.registerGauge(this, registryName, () -> handlerStart, tag, true, "handlerStart", getCategory().toString(), scope);
-  }
-
-  public static SolrParams getSolrParamsFromNamedList(NamedList args, String key) {
-    Object o = args.get(key);
-    if (o != null && o instanceof NamedList) {
-      return ((NamedList) o).toSolrParams();
-    }
-    return null;
-  }
-
-  public NamedList getInitArgs() {
-    return initArgs;
-  }
-  
-  public abstract void handleRequestBody( SolrQueryRequest req, SolrQueryResponse rsp ) throws Exception;
-
-  @Override
-  public void handleRequest(SolrQueryRequest req, SolrQueryResponse rsp) {
-    requests.inc();
-    if (req.getParams().getBool(ShardParams.IS_SHARD, false)) {
-      shardPurposes.computeIfAbsent("total", name -> new Counter()).inc();
-      int purpose = req.getParams().getInt(ShardParams.SHARDS_PURPOSE, 0);
-      if (purpose != 0) {
-        String[] names = SolrPluginUtils.getRequestPurposeNames(purpose);
-        for (String n : names) {
-          shardPurposes.computeIfAbsent(n, name -> new Counter()).inc();
-        }
-      }
-    }
-    Timer.Context timer = requestTimes.time();
-    try {
-      if(pluginInfo != null && pluginInfo.attributes.containsKey(USEPARAM)) req.getContext().put(USEPARAM,pluginInfo.attributes.get(USEPARAM));
-      SolrPluginUtils.setDefaults(this, req, defaults, appends, invariants);
-      req.getContext().remove(USEPARAM);
-      rsp.setHttpCaching(httpCaching);
-      handleRequestBody( req, rsp );
-      // count timeouts
-      NamedList header = rsp.getResponseHeader();
-      if(header != null) {
-        Object partialResults = header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY);
-        boolean timedOut = partialResults == null ? false : (Boolean)partialResults;
-        if( timedOut ) {
-          numTimeouts.mark();
-          rsp.setHttpCaching(false);
-        }
-      }
-    } catch (Exception e) {
-      if (req.getCore() != null) {
-        boolean isTragic = req.getCore().getCoreContainer().checkTragicException(req.getCore());
-        if (isTragic) {
-          if (e instanceof SolrException) {
-            // Tragic exceptions should always throw a server error
-            assert ((SolrException) e).code() == 500;
-          } else {
-            // wrap it in a solr exception
-            e = new SolrException(SolrException.ErrorCode.SERVER_ERROR, e.getMessage(), e);
-          }
-        }
-      }
-      boolean incrementErrors = true;
-      boolean isServerError = true;
-      if (e instanceof SolrException) {
-        SolrException se = (SolrException)e;
-        if (se.code() == SolrException.ErrorCode.CONFLICT.code) {
-          incrementErrors = false;
-        } else if (se.code() >= 400 && se.code() < 500) {
-          isServerError = false;
-        }
-      } else {
-        if (e instanceof SyntaxError) {
-          isServerError = false;
-          e = new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-        }
-      }
-
-      rsp.setException(e);
-
-      if (incrementErrors) {
-        SolrException.log(log, e);
-
-        numErrors.mark();
-        if (isServerError) {
-          numServerErrors.mark();
-        } else {
-          numClientErrors.mark();
-        }
-      }
-    } finally {
-      long elapsed = timer.stop();
-      totalTime.inc(elapsed);
-    }
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getName() {
-    return this.getClass().getName();
-  }
-
-  @Override
-  public abstract String getDescription();
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-
-  @Override
-  public Set<String> getMetricNames() {
-    return metricNames;
-  }
-
-  @Override
-  public MetricRegistry getMetricRegistry() {
-    return registry;
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String subPath) {
-    return null;
-  }
-
-
-  /**
-   * Get the request handler registered to a given name.
-   *
-   * This function is thread safe.
-   */
-  public static SolrRequestHandler getRequestHandler(String handlerName, PluginBag<SolrRequestHandler> reqHandlers) {
-    if(handlerName == null) return null;
-    SolrRequestHandler handler = reqHandlers.get(handlerName);
-    int idx = 0;
-    if(handler == null) {
-      for (; ; ) {
-        idx = handlerName.indexOf('/', idx+1);
-        if (idx > 0) {
-          String firstPart = handlerName.substring(0, idx);
-          handler = reqHandlers.get(firstPart);
-          if (handler == null) continue;
-          if (handler instanceof NestedRequestHandler) {
-            return ((NestedRequestHandler) handler).getSubHandler(handlerName.substring(idx));
-          }
-        } else {
-          break;
-        }
-      }
-    }
-    return handler;
-  }
-
-  public void setPluginInfo(PluginInfo pluginInfo){
-    if(this.pluginInfo==null) this.pluginInfo = pluginInfo;
-  }
-
-  public PluginInfo getPluginInfo(){
-    return  pluginInfo;
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ImmutableList.of(new ApiBag.ReqHandlerToApi(this, ApiBag.constructSpec(pluginInfo)));
-  }
-}
-
-


[25/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java b/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
deleted file mode 100644
index 6636a8f..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
+++ /dev/null
@@ -1,553 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import javax.management.MBeanServer;
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import java.io.ByteArrayInputStream;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import com.google.common.base.Strings;
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.logging.LogWatcherConfig;
-import org.apache.solr.metrics.reporters.SolrJmxReporter;
-import org.apache.solr.update.UpdateShardHandlerConfig;
-import org.apache.solr.util.DOMUtil;
-import org.apache.solr.util.JmxUtil;
-import org.apache.solr.util.PropertiesUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.InputSource;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-/**
- *
- */
-public class SolrXmlConfig {
-
-  public final static String SOLR_XML_FILE = "solr.xml";
-  public final static String SOLR_DATA_HOME = "solr.data.home";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static NodeConfig fromConfig(Config config) {
-
-    checkForIllegalConfig(config);
-
-    config.substituteProperties();
-
-    CloudConfig cloudConfig = null;
-    UpdateShardHandlerConfig deprecatedUpdateConfig = null;
-
-    if (config.getNodeList("solr/solrcloud", false).getLength() > 0) {
-      NamedList<Object> cloudSection = readNodeListAsNamedList(config, "solr/solrcloud/*[@name]", "<solrcloud>");
-      deprecatedUpdateConfig = loadUpdateConfig(cloudSection, false);
-      cloudConfig = fillSolrCloudSection(cloudSection);
-    }
-
-    NamedList<Object> entries = readNodeListAsNamedList(config, "solr/*[@name]", "<solr>");
-    String nodeName = (String) entries.remove("nodeName");
-    if (Strings.isNullOrEmpty(nodeName) && cloudConfig != null)
-      nodeName = cloudConfig.getHost();
-
-    UpdateShardHandlerConfig updateConfig;
-    if (deprecatedUpdateConfig == null) {
-      updateConfig = loadUpdateConfig(readNodeListAsNamedList(config, "solr/updateshardhandler/*[@name]", "<updateshardhandler>"), true);
-    }
-    else {
-      updateConfig = loadUpdateConfig(readNodeListAsNamedList(config, "solr/updateshardhandler/*[@name]", "<updateshardhandler>"), false);
-      if (updateConfig != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "UpdateShardHandler configuration defined twice in solr.xml");
-      }
-      updateConfig = deprecatedUpdateConfig;
-    }
-
-    NodeConfig.NodeConfigBuilder configBuilder = new NodeConfig.NodeConfigBuilder(nodeName, config.getResourceLoader());
-    configBuilder.setUpdateShardHandlerConfig(updateConfig);
-    configBuilder.setShardHandlerFactoryConfig(getShardHandlerFactoryPluginInfo(config));
-    configBuilder.setSolrCoreCacheFactoryConfig(getTransientCoreCacheFactoryPluginInfo(config));
-    configBuilder.setLogWatcherConfig(loadLogWatcherConfig(config, "solr/logging/*[@name]", "solr/logging/watcher/*[@name]"));
-    configBuilder.setSolrProperties(loadProperties(config));
-    if (cloudConfig != null)
-      configBuilder.setCloudConfig(cloudConfig);
-    configBuilder.setBackupRepositoryPlugins(getBackupRepositoryPluginInfos(config));
-    configBuilder.setMetricsConfig(getMetricsConfig(config));
-    return fillSolrSection(configBuilder, entries);
-  }
-
-  public static NodeConfig fromFile(SolrResourceLoader loader, Path configFile) {
-
-    log.info("Loading container configuration from {}", configFile);
-
-    if (!Files.exists(configFile)) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "solr.xml does not exist in " + configFile.getParent() + " cannot start Solr");
-    }
-
-    try (InputStream inputStream = Files.newInputStream(configFile)) {
-      return fromInputStream(loader, inputStream);
-    } catch (SolrException exc) {
-      throw exc;
-    } catch (Exception exc) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Could not load SOLR configuration", exc);
-    }
-  }
-
-  public static NodeConfig fromString(SolrResourceLoader loader, String xml) {
-    return fromInputStream(loader, new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)));
-  }
-
-  public static NodeConfig fromInputStream(SolrResourceLoader loader, InputStream is) {
-    try {
-      byte[] buf = IOUtils.toByteArray(is);
-      try (ByteArrayInputStream dup = new ByteArrayInputStream(buf)) {
-        Config config = new Config(loader, null, new InputSource(dup), null, false);
-        return fromConfig(config);
-      }
-    } catch (SolrException exc) {
-      throw exc;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-    }
-  }
-
-  public static NodeConfig fromSolrHome(SolrResourceLoader loader, Path solrHome) {
-    return fromFile(loader, solrHome.resolve(SOLR_XML_FILE));
-  }
-
-  public static NodeConfig fromSolrHome(Path solrHome) {
-    SolrResourceLoader loader = new SolrResourceLoader(solrHome);
-    return fromSolrHome(loader, solrHome);
-  }
-
-  private static void checkForIllegalConfig(Config config) {
-    failIfFound(config, "solr/@coreLoadThreads");
-    failIfFound(config, "solr/@persistent");
-    failIfFound(config, "solr/@sharedLib");
-    failIfFound(config, "solr/@zkHost");
-    failIfFound(config, "solr/cores");
-
-    assertSingleInstance("solrcloud", config);
-    assertSingleInstance("logging", config);
-    assertSingleInstance("logging/watcher", config);
-    assertSingleInstance("backup", config);
-  }
-
-  private static void assertSingleInstance(String section, Config config) {
-    if (config.getNodeList("/solr/" + section, false).getLength() > 1)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Multiple instances of " + section + " section found in solr.xml");
-  }
-
-  private static void failIfFound(Config config, String xPath) {
-
-    if (config.getVal(xPath, false) != null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Should not have found " + xPath +
-          "\n. Please upgrade your solr.xml: https://lucene.apache.org/solr/guide/format-of-solr-xml.html");
-    }
-  }
-
-  private static Properties loadProperties(Config config) {
-    try {
-      Node node = ((NodeList) config.evaluate("solr", XPathConstants.NODESET)).item(0);
-      XPath xpath = config.getXPath();
-      NodeList props = (NodeList) xpath.evaluate("property", node, XPathConstants.NODESET);
-      Properties properties = new Properties();
-      for (int i = 0; i < props.getLength(); i++) {
-        Node prop = props.item(i);
-        properties.setProperty(DOMUtil.getAttr(prop, NAME),
-            PropertiesUtil.substituteProperty(DOMUtil.getAttr(prop, "value"), null));
-      }
-      return properties;
-    }
-    catch (XPathExpressionException e) {
-      log.warn("Error parsing solr.xml: " + e.getMessage());
-      return null;
-    }
-  }
-
-  private static NamedList<Object> readNodeListAsNamedList(Config config, String path, String section) {
-    NodeList nodes = config.getNodeList(path, false);
-    if (nodes == null) {
-      return null;
-    }
-    return checkForDuplicates(section, DOMUtil.nodesToNamedList(nodes));
-  }
-
-  private static NamedList<Object> checkForDuplicates(String section, NamedList<Object> nl) {
-    Set<String> keys = new HashSet<>();
-    for (Map.Entry<String, Object> entry : nl) {
-      if (!keys.add(entry.getKey()))
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            section + " section of solr.xml contains duplicated '" + entry.getKey() + "'");
-    }
-    return nl;
-  }
-
-  private static int parseInt(String field, String value) {
-    try {
-      return Integer.parseInt(value);
-    }
-    catch (NumberFormatException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error parsing '" + field + "', value '" + value + "' cannot be parsed as int");
-    }
-  }
-
-  private static NodeConfig fillSolrSection(NodeConfig.NodeConfigBuilder builder, NamedList<Object> nl) {
-
-    for (Map.Entry<String, Object> entry : nl) {
-      String name = entry.getKey();
-      if (entry.getValue() == null)
-        continue;
-      String value = entry.getValue().toString();
-      switch (name) {
-        case "adminHandler":
-          builder.setCoreAdminHandlerClass(value);
-          break;
-        case "collectionsHandler":
-          builder.setCollectionsAdminHandlerClass(value);
-          break;
-        case "healthCheckHandler":
-          builder.setHealthCheckHandlerClass(value);
-          break;
-        case "infoHandler":
-          builder.setInfoHandlerClass(value);
-          break;
-        case "configSetsHandler":
-          builder.setConfigSetsHandlerClass(value);
-          break;
-        case "coreRootDirectory":
-          builder.setCoreRootDirectory(value);
-          break;
-        case "solrDataHome":
-          builder.setSolrDataHome(value);
-          break;
-        case "managementPath":
-          builder.setManagementPath(value);
-          break;
-        case "sharedLib":
-          builder.setSharedLibDirectory(value);
-          break;
-        case "configSetBaseDir":
-          builder.setConfigSetBaseDirectory(value);
-          break;
-        case "shareSchema":
-          builder.setUseSchemaCache(Boolean.parseBoolean(value));
-          break;
-        case "coreLoadThreads":
-          builder.setCoreLoadThreads(parseInt(name, value));
-          break;
-        case "replayUpdatesThreads":
-          builder.setReplayUpdatesThreads(parseInt(name, value));
-          break;
-        case "transientCacheSize":
-          builder.setTransientCacheSize(parseInt(name, value));
-          break;
-        default:
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown configuration value in solr.xml: " + name);
-      }
-    }
-
-    return builder.build();
-  }
-
-  private static UpdateShardHandlerConfig loadUpdateConfig(NamedList<Object> nl, boolean alwaysDefine) {
-
-    if (nl == null && !alwaysDefine)
-      return null;
-
-    if (nl == null)
-      return UpdateShardHandlerConfig.DEFAULT;
-
-    boolean defined = false;
-
-    int maxUpdateConnections = HttpClientUtil.DEFAULT_MAXCONNECTIONS;
-    int maxUpdateConnectionsPerHost = HttpClientUtil.DEFAULT_MAXCONNECTIONSPERHOST;
-    int distributedSocketTimeout = HttpClientUtil.DEFAULT_SO_TIMEOUT;
-    int distributedConnectionTimeout = HttpClientUtil.DEFAULT_CONNECT_TIMEOUT;
-    String metricNameStrategy = UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY;
-    int maxRecoveryThreads = UpdateShardHandlerConfig.DEFAULT_MAXRECOVERYTHREADS;
-
-    Object muc = nl.remove("maxUpdateConnections");
-    if (muc != null) {
-      maxUpdateConnections = parseInt("maxUpdateConnections", muc.toString());
-      defined = true;
-    }
-
-    Object mucph = nl.remove("maxUpdateConnectionsPerHost");
-    if (mucph != null) {
-      maxUpdateConnectionsPerHost = parseInt("maxUpdateConnectionsPerHost", mucph.toString());
-      defined = true;
-    }
-
-    Object dst = nl.remove("distribUpdateSoTimeout");
-    if (dst != null) {
-      distributedSocketTimeout = parseInt("distribUpdateSoTimeout", dst.toString());
-      defined = true;
-    }
-
-    Object dct = nl.remove("distribUpdateConnTimeout");
-    if (dct != null) {
-      distributedConnectionTimeout = parseInt("distribUpdateConnTimeout", dct.toString());
-      defined = true;
-    }
-
-    Object mns = nl.remove("metricNameStrategy");
-    if (mns != null)  {
-      metricNameStrategy = mns.toString();
-      defined = true;
-    }
-
-    Object mrt = nl.remove("maxRecoveryThreads");
-    if (mrt != null)  {
-      maxRecoveryThreads = parseInt("maxRecoveryThreads", mrt.toString());
-      defined = true;
-    }
-
-    if (!defined && !alwaysDefine)
-      return null;
-
-    return new UpdateShardHandlerConfig(maxUpdateConnections, maxUpdateConnectionsPerHost, distributedSocketTimeout,
-                                        distributedConnectionTimeout, metricNameStrategy, maxRecoveryThreads);
-
-  }
-
-  private static String removeValue(NamedList<Object> nl, String key) {
-    Object value = nl.remove(key);
-    if (value == null)
-      return null;
-    return value.toString();
-  }
-
-  private static String required(String section, String key, String value) {
-    if (value != null)
-      return value;
-    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, section + " section missing required entry '" + key + "'");
-  }
-
-  private static CloudConfig fillSolrCloudSection(NamedList<Object> nl) {
-
-    String hostName = required("solrcloud", "host", removeValue(nl, "host"));
-    int hostPort = parseInt("hostPort", required("solrcloud", "hostPort", removeValue(nl, "hostPort")));
-    String hostContext = required("solrcloud", "hostContext", removeValue(nl, "hostContext"));
-
-    CloudConfig.CloudConfigBuilder builder = new CloudConfig.CloudConfigBuilder(hostName, hostPort, hostContext);
-
-    for (Map.Entry<String, Object> entry : nl) {
-      String name = entry.getKey();
-      if (entry.getValue() == null)
-        continue;
-      String value = entry.getValue().toString();
-      switch (name) {
-        case "leaderVoteWait":
-          builder.setLeaderVoteWait(parseInt(name, value));
-          break;
-        case "leaderConflictResolveWait":
-          builder.setLeaderConflictResolveWait(parseInt(name, value));
-          break;
-        case "zkClientTimeout":
-          builder.setZkClientTimeout(parseInt(name, value));
-          break;
-        case "autoReplicaFailoverBadNodeExpiration": case "autoReplicaFailoverWorkLoopDelay":
-          //TODO remove this in Solr 8.0
-          log.info("Configuration parameter " + name + " is ignored");
-          break;
-        case "autoReplicaFailoverWaitAfterExpiration":
-          builder.setAutoReplicaFailoverWaitAfterExpiration(parseInt(name, value));
-          break;
-        case "zkHost":
-          builder.setZkHost(value);
-          break;
-        case "genericCoreNodeNames":
-          builder.setUseGenericCoreNames(Boolean.parseBoolean(value));
-          break;
-        case "zkACLProvider":
-          builder.setZkACLProviderClass(value);
-          break;
-        case "zkCredentialsProvider":
-          builder.setZkCredentialsProviderClass(value);
-          break;
-        case "createCollectionWaitTimeTillActive":
-          builder.setCreateCollectionWaitTimeTillActive(parseInt(name, value));
-          break;
-        case "createCollectionCheckLeaderActive":
-          builder.setCreateCollectionCheckLeaderActive(Boolean.parseBoolean(value));
-          break;
-        default:
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown configuration parameter in <solrcloud> section of solr.xml: " + name);
-      }
-    }
-
-    return builder.build();
-  }
-
-  private static LogWatcherConfig loadLogWatcherConfig(Config config, String loggingPath, String watcherPath) {
-
-    String loggingClass = null;
-    boolean enabled = true;
-    int watcherQueueSize = 50;
-    String watcherThreshold = null;
-
-    for (Map.Entry<String, Object> entry : readNodeListAsNamedList(config, loggingPath, "<logging>")) {
-      String name = entry.getKey();
-      String value = entry.getValue().toString();
-      switch (name) {
-        case "class":
-          loggingClass = value; break;
-        case "enabled":
-          enabled = Boolean.parseBoolean(value); break;
-        default:
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown value in logwatcher config: " + name);
-      }
-    }
-
-    for (Map.Entry<String, Object> entry : readNodeListAsNamedList(config, watcherPath, "<watcher>")) {
-      String name = entry.getKey();
-      String value = entry.getValue().toString();
-      switch (name) {
-        case "size":
-          watcherQueueSize = parseInt(name, value); break;
-        case "threshold":
-          watcherThreshold = value; break;
-        default:
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown value in logwatcher config: " + name);
-      }
-    }
-
-    return new LogWatcherConfig(enabled, loggingClass, watcherThreshold, watcherQueueSize);
-
-  }
-
-  private static PluginInfo getShardHandlerFactoryPluginInfo(Config config) {
-    Node node = config.getNode("solr/shardHandlerFactory", false);
-    return (node == null) ? null : new PluginInfo(node, "shardHandlerFactory", false, true);
-  }
-
-  private static PluginInfo[] getBackupRepositoryPluginInfos(Config config) {
-    NodeList nodes = (NodeList) config.evaluate("solr/backup/repository", XPathConstants.NODESET);
-    if (nodes == null || nodes.getLength() == 0)
-      return new PluginInfo[0];
-    PluginInfo[] configs = new PluginInfo[nodes.getLength()];
-    for (int i = 0; i < nodes.getLength(); i++) {
-      configs[i] = new PluginInfo(nodes.item(i), "BackupRepositoryFactory", true, true);
-    }
-    return configs;
-  }
-
-  private static MetricsConfig getMetricsConfig(Config config) {
-    MetricsConfig.MetricsConfigBuilder builder = new MetricsConfig.MetricsConfigBuilder();
-    Node node = config.getNode("solr/metrics/suppliers/counter", false);
-    if (node != null) {
-      builder = builder.setCounterSupplier(new PluginInfo(node, "counterSupplier", false, false));
-    }
-    node = config.getNode("solr/metrics/suppliers/meter", false);
-    if (node != null) {
-      builder = builder.setMeterSupplier(new PluginInfo(node, "meterSupplier", false, false));
-    }
-    node = config.getNode("solr/metrics/suppliers/timer", false);
-    if (node != null) {
-      builder = builder.setTimerSupplier(new PluginInfo(node, "timerSupplier", false, false));
-    }
-    node = config.getNode("solr/metrics/suppliers/histogram", false);
-    if (node != null) {
-      builder = builder.setHistogramSupplier(new PluginInfo(node, "histogramSupplier", false, false));
-    }
-    node = config.getNode("solr/metrics/history", false);
-    if (node != null) {
-      builder = builder.setHistoryHandler(new PluginInfo(node, "history", false, false));
-    }
-    PluginInfo[] reporterPlugins = getMetricReporterPluginInfos(config);
-    Set<String> hiddenSysProps = getHiddenSysProps(config);
-    return builder
-        .setMetricReporterPlugins(reporterPlugins)
-        .setHiddenSysProps(hiddenSysProps)
-        .build();
-  }
-
-  private static PluginInfo[] getMetricReporterPluginInfos(Config config) {
-    NodeList nodes = (NodeList) config.evaluate("solr/metrics/reporter", XPathConstants.NODESET);
-    List<PluginInfo> configs = new ArrayList<>();
-    boolean hasJmxReporter = false;
-    if (nodes != null && nodes.getLength() > 0) {
-      for (int i = 0; i < nodes.getLength(); i++) {
-        // we don't require class in order to support predefined replica and node reporter classes
-        PluginInfo info = new PluginInfo(nodes.item(i), "SolrMetricReporter", true, false);
-        String clazz = info.className;
-        if (clazz != null && clazz.equals(SolrJmxReporter.class.getName())) {
-          hasJmxReporter = true;
-        }
-        configs.add(info);
-      }
-    }
-    // if there's an MBean server running but there was no JMX reporter then add a default one
-    MBeanServer mBeanServer = JmxUtil.findFirstMBeanServer();
-    if (mBeanServer != null && !hasJmxReporter) {
-      log.info("MBean server found: " + mBeanServer + ", but no JMX reporters were configured - adding default JMX reporter.");
-      Map<String,Object> attributes = new HashMap<>();
-      attributes.put("name", "default");
-      attributes.put("class", SolrJmxReporter.class.getName());
-      PluginInfo defaultPlugin = new PluginInfo("reporter", attributes);
-      configs.add(defaultPlugin);
-    }
-    return configs.toArray(new PluginInfo[configs.size()]);
-  }
-
-  private static Set<String> getHiddenSysProps(Config config) {
-    NodeList nodes = (NodeList) config.evaluate("solr/metrics/hiddenSysProps/str", XPathConstants.NODESET);
-    if (nodes == null || nodes.getLength() == 0) {
-      return NodeConfig.NodeConfigBuilder.DEFAULT_HIDDEN_SYS_PROPS;
-    }
-    Set<String> props = new HashSet<>();
-    for (int i = 0; i < nodes.getLength(); i++) {
-      String prop = DOMUtil.getText(nodes.item(i));
-      if (prop != null && !prop.trim().isEmpty()) {
-        props.add(prop.trim());
-      }
-    }
-    if (props.isEmpty()) {
-      return NodeConfig.NodeConfigBuilder.DEFAULT_HIDDEN_SYS_PROPS;
-    } else {
-      return props;
-    }
-  }
-
-  private static PluginInfo getTransientCoreCacheFactoryPluginInfo(Config config) {
-    Node node = config.getNode("solr/transientCoreCacheFactory", false);
-    return (node == null) ? null : new PluginInfo(node, "transientCoreCacheFactory", false, true);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
deleted file mode 100644
index 1bc4914..0000000
--- a/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.File;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.AtomicMoveNotSupportedException;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
-import java.util.Locale;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.NativeFSLockFactory;
-import org.apache.lucene.store.NoLockFactory;
-import org.apache.lucene.store.SimpleFSLockFactory;
-import org.apache.lucene.store.SingleInstanceLockFactory;
-import org.apache.solr.common.SolrException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Directory provider which mimics original Solr 
- * {@link org.apache.lucene.store.FSDirectory} based behavior.
- * 
- * File based DirectoryFactory implementations generally extend
- * this class.
- * 
- */
-public class StandardDirectoryFactory extends CachingDirectoryFactory {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    // we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
-    return FSDirectory.open(new File(path).toPath(), lockFactory);
-  }
-  
-  @Override
-  protected LockFactory createLockFactory(String rawLockType) throws IOException {
-    if (null == rawLockType) {
-      rawLockType = DirectoryFactory.LOCK_TYPE_NATIVE;
-      log.warn("No lockType configured, assuming '"+rawLockType+"'.");
-    }
-    final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
-    switch (lockType) {
-      case DirectoryFactory.LOCK_TYPE_SIMPLE:
-        return SimpleFSLockFactory.INSTANCE;
-      case DirectoryFactory.LOCK_TYPE_NATIVE:
-        return NativeFSLockFactory.INSTANCE;
-      case DirectoryFactory.LOCK_TYPE_SINGLE:
-        return new SingleInstanceLockFactory();
-      case DirectoryFactory.LOCK_TYPE_NONE:
-        return NoLockFactory.INSTANCE;
-      default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Unrecognized lockType: " + rawLockType);
-    }
-  }
-  
-  @Override
-  public String normalize(String path) throws IOException {
-    String cpath = new File(path).getCanonicalPath();
-    
-    return super.normalize(cpath);
-  }
-  
-  @Override
-  public boolean exists(String path) throws IOException {
-    // we go by the persistent storage ... 
-    File dirFile = new File(path);
-    return dirFile.canRead() && dirFile.list().length > 0;
-  }
-  
-  public boolean isPersistent() {
-    return true;
-  }
-  
-  @Override
-  public boolean isAbsolute(String path) {
-    // back compat
-    return new File(path).isAbsolute();
-  }
-  
-  @Override
-  protected void removeDirectory(CacheValue cacheValue) throws IOException {
-    File dirFile = new File(cacheValue.path);
-    FileUtils.deleteDirectory(dirFile);
-  }
-  
-  /**
-   * Override for more efficient moves.
-   * 
-   * Intended for use with replication - use
-   * carefully - some Directory wrappers will
-   * cache files for example.
-   * 
-   * You should first {@link Directory#sync(java.util.Collection)} any file that will be 
-   * moved or avoid cached files through settings.
-   * 
-   * @throws IOException
-   *           If there is a low-level I/O error.
-   */
-  @Override
-  public void move(Directory fromDir, Directory toDir, String fileName, IOContext ioContext)
-      throws IOException {
-    
-    Directory baseFromDir = getBaseDir(fromDir);
-    Directory baseToDir = getBaseDir(toDir);
-    
-    if (baseFromDir instanceof FSDirectory && baseToDir instanceof FSDirectory) {
-  
-      Path path1 = ((FSDirectory) baseFromDir).getDirectory().toAbsolutePath();
-      Path path2 = ((FSDirectory) baseToDir).getDirectory().toAbsolutePath();
-      
-      try {
-        Files.move(path1.resolve(fileName), path2.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
-      } catch (AtomicMoveNotSupportedException e) {
-        Files.move(path1.resolve(fileName), path2.resolve(fileName));
-      }
-      return;
-    }
-
-    super.move(fromDir, toDir, fileName, ioContext);
-  }
-
-  // perform an atomic rename if possible
-  public void renameWithOverwrite(Directory dir, String fileName, String toName) throws IOException {
-    Directory baseDir = getBaseDir(dir);
-    if (baseDir instanceof FSDirectory) {
-      Path path = ((FSDirectory) baseDir).getDirectory().toAbsolutePath();
-      try {
-        Files.move(path.resolve(fileName),
-            path.resolve(toName), StandardCopyOption.ATOMIC_MOVE,
-            StandardCopyOption.REPLACE_EXISTING);
-      } catch (AtomicMoveNotSupportedException e) {
-        Files.move(FileSystems.getDefault().getPath(path.toString(), fileName),
-            FileSystems.getDefault().getPath(path.toString(), toName), StandardCopyOption.REPLACE_EXISTING);
-      }
-    } else {
-      super.renameWithOverwrite(dir, fileName, toName);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/StandardIndexReaderFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/StandardIndexReaderFactory.java b/solr/core/src/java/org/apache/solr/core/StandardIndexReaderFactory.java
deleted file mode 100644
index 9ad4003..0000000
--- a/solr/core/src/java/org/apache/solr/core/StandardIndexReaderFactory.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.IOException;
-
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.Directory;
-
-/**
- * Default IndexReaderFactory implementation. Returns a standard Lucene
- * {@link DirectoryReader}.
- * 
- * @see DirectoryReader#open(Directory)
- */
-public class StandardIndexReaderFactory extends IndexReaderFactory {
-  
-  @Override
-  public DirectoryReader newReader(Directory indexDir, SolrCore core) throws IOException {
-    return DirectoryReader.open(indexDir);
-  }
-
-  @Override
-  public DirectoryReader newReader(IndexWriter writer, SolrCore core) throws IOException {
-    return DirectoryReader.open(writer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCache.java b/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCache.java
deleted file mode 100644
index 63df02b..0000000
--- a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCache.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core;
-
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Observable;
-import java.util.Set;
-
-import org.apache.http.annotation.Experimental;
-
-/**
- * The base class for custom transient core maintenance. Any custom plugin that want's to take control of transient
- * caches (i.e. any core defined with transient=true) should override this class.
- *
- * Register your plugin in solr.xml similarly to:
- *
- *   &lt;transientCoreCacheFactory name="transientCoreCacheFactory" class="TransientSolrCoreCacheFactoryDefault"&gt;
- *        &lt;int name="transientCacheSize"&gt;4&lt;/int&gt;
- *   &lt;/transientCoreCacheFactory&gt;
- *
- *
- * WARNING: There is quite a bit of higher-level locking done by the CoreContainer to avoid various race conditions
- *          etc. You should _only_ manipulate them within the method calls designed to change them. E.g.
- *          only add to the transient core descriptors in addTransientDescriptor etc.
- *          
- *          Trust the higher-level code (mainly SolrCores and CoreContainer) to call the appropriate operations when
- *          necessary and to coordinate shutting down cores, manipulating the internal structures and the like..
- *          
- *          The only real action you should _initiate_ is to close a core for whatever reason, and do that by 
- *          calling notifyObservers(coreToClose); The observer will call back to removeCore(name) at the appropriate 
- *          time. There is no need to directly remove the core _at that time_ from the transientCores list, a call
- *          will come back to this class when CoreContainer is closing this core.
- *          
- *          CoreDescriptors are read-once. During "core discovery" all valid descriptors are enumerated and added to
- *          the appropriate list. Thereafter, they are NOT re-read from disk. In those situations where you want
- *          to re-define the coreDescriptor, maintain a "side list" of changed core descriptors. Then override
- *          getTransientDescriptor to return your new core descriptor. NOTE: assuming you've already closed the
- *          core, the _next_ time that core is required getTransientDescriptor will be called and if you return the
- *          new core descriptor your re-definition should be honored. You'll have to maintain this list for the
- *          duration of this Solr instance running. If you persist the coreDescriptor, then next time Solr starts
- *          up the new definition will be read.
- *          
- *
- *  If you need to manipulate the return, for instance block a core from being loaded for some period of time, override
- *  say getTransientDescriptor and return null.
- *  
- *  In particular, DO NOT reach into the transientCores structure from a method called to manipulate core descriptors
- *  or vice-versa.
- */
-public abstract class TransientSolrCoreCache extends Observable {
-
-  // Gets the core container that encloses this cache.
-  public abstract CoreContainer getContainer();
-
-  // Add the newly-opened core to the list of open cores.
-  public abstract SolrCore addCore(String name, SolrCore core);
-
-  // Return the names of all possible cores, whether they are currently loaded or not.
-  public abstract Set<String> getAllCoreNames();
-  
-  // Return the names of all currently loaded cores
-  public abstract Set<String> getLoadedCoreNames();
-
-  // Remove a core from the internal structures, presumably it 
-  // being closed. If the core is re-opened, it will be readded by CoreContainer.
-  public abstract SolrCore removeCore(String name);
-
-  // Get the core associated with the name. Return null if you don't want this core to be used.
-  public abstract SolrCore getCore(String name);
-
-  // reutrn true if the cache contains the named core.
-  public abstract boolean containsCore(String name);
-  
-  // This method will be called when the container is to be shut down. It should return all
-  // transient solr cores and clear any internal structures that hold them.
-  public abstract Collection<SolrCore> prepareForShutdown();
-
-  // These methods allow the implementation to maintain control over the core descriptors.
-  
-  // This method will only be called during core discovery at startup.
-  public abstract void addTransientDescriptor(String rawName, CoreDescriptor cd);
-  
-  // This method is used when opening cores and the like. If you want to change a core's descriptor, override this
-  // method and return the current core descriptor.
-  public abstract CoreDescriptor getTransientDescriptor(String name);
-
-
-  // Remove the core descriptor from your list of transient descriptors.
-  public abstract CoreDescriptor removeTransientDescriptor(String name);
-
-  // Find all the names a specific core is mapped to. Should not return null, return empty set instead.
-  @Experimental
-  public List<String> getNamesForCore(SolrCore core) {
-    return Collections.emptyList();
-  }
-  
-  /**
-   * Must be called in order to free resources!
-   */
-  public abstract void close();
-
-
-  // These two methods allow custom implementations to communicate arbitrary information as necessary.
-  public abstract int getStatus(String coreName);
-  public abstract void setStatus(String coreName, int status);
-}
-
-
-  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheDefault.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheDefault.java b/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheDefault.java
deleted file mode 100644
index e1fd748..0000000
--- a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheDefault.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Observer;
-import java.util.Set;
-
-import org.apache.solr.common.util.NamedList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TransientSolrCoreCacheDefault extends TransientSolrCoreCache {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private int cacheSize = NodeConfig.NodeConfigBuilder.DEFAULT_TRANSIENT_CACHE_SIZE;
-
-  protected Observer observer;
-  protected CoreContainer coreContainer;
-
-  protected final Map<String, CoreDescriptor> transientDescriptors = new LinkedHashMap<>();
-
-  //WARNING! The _only_ place you put anything into the list of transient cores is with the putTransientCore method!
-  protected Map<String, SolrCore> transientCores = new LinkedHashMap<>(); // For "lazily loaded" cores
-
-  /**
-   * @param container The enclosing CoreContainer. It allows us to access everything we need.
-   */
-  public TransientSolrCoreCacheDefault(final CoreContainer container) {
-    this.coreContainer = container;
-    this.observer= coreContainer.solrCores;
-    
-    NodeConfig cfg = container.getNodeConfig();
-    if (cfg.getTransientCachePluginInfo() == null) {
-      // Still handle just having transientCacheSize defined in the body of solr.xml  not in a transient handler clause.
-      // deprecate this for 7.0?
-      this.cacheSize = cfg.getTransientCacheSize();
-    } else {
-      NamedList args = cfg.getTransientCachePluginInfo().initArgs;
-      Object obj = args.get("transientCacheSize");
-      if (obj != null) {
-        this.cacheSize = (int) obj;
-      }
-    }
-    doInit();
-  }
-  // This just moves the 
-  private void doInit() {
-    NodeConfig cfg = coreContainer.getNodeConfig();
-    if (cfg.getTransientCachePluginInfo() == null) {
-      // Still handle just having transientCacheSize defined in the body of solr.xml not in a transient handler clause.
-      this.cacheSize = cfg.getTransientCacheSize();
-    } else {
-      NamedList args = cfg.getTransientCachePluginInfo().initArgs;
-      Object obj = args.get("transientCacheSize");
-      if (obj != null) {
-        this.cacheSize = (int) obj;
-      }
-    }
-
-    log.info("Allocating transient cache for {} transient cores", cacheSize);
-    addObserver(this.observer);
-    // it's possible for cache
-    if (cacheSize < 0) { // Trap old flag
-      cacheSize = Integer.MAX_VALUE;
-    }
-    // Now don't allow ridiculous allocations here, if the size is > 1,000, we'll just deal with
-    // adding cores as they're opened. This blows up with the marker value of -1.
-    transientCores = new LinkedHashMap<String, SolrCore>(Math.min(cacheSize, 1000), 0.75f, true) {
-      @Override
-      protected boolean removeEldestEntry(Map.Entry<String, SolrCore> eldest) {
-        if (size() > cacheSize) {
-          SolrCore coreToClose = eldest.getValue();
-          setChanged();
-          notifyObservers(coreToClose);
-          log.info("Closing transient core [{}]", coreToClose.getName());
-          return true;
-        }
-        return false;
-      }
-    };
-  }
-
-  
-  @Override
-  public Collection<SolrCore> prepareForShutdown() {
-    // Returna copy of the values
-    List<SolrCore> ret = new ArrayList(transientCores.values());
-    transientCores.clear();
-    return ret;
-  }
-
-  @Override
-  public CoreContainer getContainer() { return this.coreContainer; }
-
-  @Override
-  public SolrCore addCore(String name, SolrCore core) {
-    return transientCores.put(name, core);
-  }
-
-  @Override
-  public Set<String> getAllCoreNames() {
-    return transientDescriptors.keySet();
-  }
-  
-  @Override
-  public Set<String> getLoadedCoreNames() {
-    return transientCores.keySet();
-  }
-
-  // Remove a core from the internal structures, presumably it 
-  // being closed. If the core is re-opened, it will be readded by CoreContainer.
-  @Override
-  public SolrCore removeCore(String name) {
-    return transientCores.remove(name);
-  }
-
-  // Get the core associated with the name. Return null if you don't want this core to be used.
-  @Override
-  public SolrCore getCore(String name) {
-    return transientCores.get(name);
-  }
-
-  @Override
-  public boolean containsCore(String name) {
-    return transientCores.containsKey(name);
-  }
-
-  // These methods allow the implementation to maintain control over the core descriptors.
-
-
-  // This method will only be called during core discovery at startup.
-  @Override
-  public void addTransientDescriptor(String rawName, CoreDescriptor cd) {
-    transientDescriptors.put(rawName, cd);
-  }
-
-  // This method is used when opening cores and the like. If you want to change a core's descriptor, override this
-  // method and return the current core descriptor.
-  @Override
-  public CoreDescriptor getTransientDescriptor(String name) {
-    return transientDescriptors.get(name);
-  }
-
-  @Override
-  public CoreDescriptor removeTransientDescriptor(String name) {
-    return transientDescriptors.remove(name);
-  }
-
-  @Override
-  public List<String> getNamesForCore(SolrCore core) {
-    List<String> ret = new ArrayList<>();
-    for (Map.Entry<String, SolrCore> entry : transientCores.entrySet()) {
-      if (core == entry.getValue()) {
-        ret.add(entry.getKey());
-      }
-    }
-    return ret;
-  }
-
-  /**
-   * Must be called in order to free resources!
-   */
-  @Override
-  public void close() {
-    deleteObserver(this.observer);
-  }
-
-
-  // For custom implementations to communicate arbitrary information as necessary.
-  @Override
-  public int getStatus(String coreName) { return 0; } //no_op for default handler.
-
-  @Override
-  public void setStatus(String coreName, int status) {} //no_op for default handler.
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactory.java b/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactory.java
deleted file mode 100644
index b3b8cf0..0000000
--- a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactory.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.Locale;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * An interface that allows custom transient caches to be maintained with different implementations
- */
-public abstract class TransientSolrCoreCacheFactory {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private CoreContainer coreContainer = null;
-
-  public abstract TransientSolrCoreCache getTransientSolrCoreCache();
-  /**
-   * Create a new TransientSolrCoreCacheFactory instance
-   *
-   * @param loader a SolrResourceLoader used to find the TransientSolrCacheFactory classes
-   * @param coreContainer CoreContainer that encloses all the Solr cores.              
-   * @return a new, initialized TransientSolrCoreCache instance
-   */
-
-  public static TransientSolrCoreCacheFactory newInstance(SolrResourceLoader loader, CoreContainer coreContainer) {
-    PluginInfo info = coreContainer.getConfig().getTransientCachePluginInfo();
-    if (info == null) { // definition not in our solr.xml file, use default
-      info = DEFAULT_TRANSIENT_SOLR_CACHE_INFO;
-    }
-
-    try {
-      // According to the docs, this returns a TransientSolrCoreCacheFactory with the default c'tor
-      TransientSolrCoreCacheFactory tccf = loader.findClass(info.className, TransientSolrCoreCacheFactory.class).newInstance(); 
-      
-      // OK, now we call it's init method.
-      if (PluginInfoInitialized.class.isAssignableFrom(tccf.getClass()))
-        PluginInfoInitialized.class.cast(tccf).init(info);
-      tccf.setCoreContainer(coreContainer);
-      return tccf;
-    } catch (Exception e) {
-      // Many things could cuse this, bad solrconfig, mis-typed class name, whatever. However, this should not
-      // keep the enclosing coreContainer from instantiating, so log an error and continue.
-      log.error(String.format(Locale.ROOT, "Error instantiating TransientSolrCoreCacheFactory class [%s]: %s",
-          info.className, e.getMessage()));
-      return null;
-    }
-
-  }
-  public static final PluginInfo DEFAULT_TRANSIENT_SOLR_CACHE_INFO =
-      new PluginInfo("transientSolrCoreCacheFactory",
-          ImmutableMap.of("class", TransientSolrCoreCacheFactoryDefault.class.getName(), 
-              "name", TransientSolrCoreCacheFactory.class.getName()),
-          null, Collections.<PluginInfo>emptyList());
-
-
-  // Need this because the plugin framework doesn't require a PluginINfo in the init method, don't see a way to
-  // pass additional parameters and we need this when we create the transient core cache, it's _really_ important.
-  public void setCoreContainer(CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-  }
-
-  public CoreContainer getCoreContainer() {
-    return coreContainer;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactoryDefault.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactoryDefault.java b/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactoryDefault.java
deleted file mode 100644
index 722ab9c..0000000
--- a/solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactoryDefault.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-public class TransientSolrCoreCacheFactoryDefault extends TransientSolrCoreCacheFactory {
-
-  TransientSolrCoreCache transientSolrCoreCache = null;
-
-  @Override
-  public TransientSolrCoreCache getTransientSolrCoreCache() {
-    if (transientSolrCoreCache == null) {
-      transientSolrCoreCache = new TransientSolrCoreCacheDefault(getCoreContainer());
-    }
-
-    return transientSolrCoreCache;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/ZkContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ZkContainer.java b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
deleted file mode 100644
index 34e5764..0000000
--- a/solr/core/src/java/org/apache/solr/core/ZkContainer.java
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeoutException;
-import java.util.function.Predicate;
-
-import org.apache.solr.cloud.CurrentCoreDescriptorProvider;
-import org.apache.solr.cloud.SolrZkServer;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ZkContainer {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  protected ZkController zkController;
-  private SolrZkServer zkServer;
-
-  private ExecutorService coreZkRegister = ExecutorUtil.newMDCAwareCachedThreadPool(
-      new DefaultSolrThreadFactory("coreZkRegister") );
-  
-  // see ZkController.zkRunOnly
-  private boolean zkRunOnly = Boolean.getBoolean("zkRunOnly"); // expert
-  
-  public ZkContainer() {
-    
-  }
-
-  public void initZooKeeper(final CoreContainer cc, String solrHome, CloudConfig config) {
-
-    ZkController zkController = null;
-
-    String zkRun = System.getProperty("zkRun");
-
-    if (zkRun != null && config == null)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Cannot start Solr in cloud mode - no cloud config provided");
-    
-    if (config == null)
-        return;  // not in zk mode
-
-    String zookeeperHost = config.getZkHost();
-
-    // zookeeper in quorum mode currently causes a failure when trying to
-    // register log4j mbeans.  See SOLR-2369
-    // TODO: remove after updating to an slf4j based zookeeper
-    System.setProperty("zookeeper.jmx.log4j.disable", "true");
-
-    if (zkRun != null) {
-      String zkDataHome = System.getProperty("zkServerDataDir", Paths.get(solrHome).resolve("zoo_data").toString());
-      String zkConfHome = System.getProperty("zkServerConfDir", solrHome);
-      zkServer = new SolrZkServer(stripChroot(zkRun), stripChroot(config.getZkHost()), zkDataHome, zkConfHome, config.getSolrHostPort());
-      zkServer.parseConfig();
-      zkServer.start();
-      
-      // set client from server config if not already set
-      if (zookeeperHost == null) {
-        zookeeperHost = zkServer.getClientString();
-      }
-    }
-
-    int zkClientConnectTimeout = 30000;
-
-    if (zookeeperHost != null) {
-
-      // we are ZooKeeper enabled
-      try {
-        // If this is an ensemble, allow for a long connect time for other servers to come up
-        if (zkRun != null && zkServer.getServers().size() > 1) {
-          zkClientConnectTimeout = 24 * 60 * 60 * 1000;  // 1 day for embedded ensemble
-          log.info("Zookeeper client=" + zookeeperHost + "  Waiting for a quorum.");
-        } else {
-          log.info("Zookeeper client=" + zookeeperHost);          
-        }
-        String confDir = System.getProperty("bootstrap_confdir");
-        boolean boostrapConf = Boolean.getBoolean("bootstrap_conf");  
-        
-        if(!ZkController.checkChrootPath(zookeeperHost, (confDir!=null) || boostrapConf || zkRunOnly)) {
-          throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-              "A chroot was specified in ZkHost but the znode doesn't exist. " + zookeeperHost);
-        }
-        zkController = new ZkController(cc, zookeeperHost, zkClientConnectTimeout, config,
-            new CurrentCoreDescriptorProvider() {
-
-              @Override
-              public List<CoreDescriptor> getCurrentDescriptors() {
-                List<CoreDescriptor> descriptors = new ArrayList<>(
-                    cc.getLoadedCoreNames().size());
-                Collection<SolrCore> cores = cc.getCores();
-                for (SolrCore core : cores) {
-                  descriptors.add(core.getCoreDescriptor());
-                }
-                return descriptors;
-              }
-            });
-
-
-        if (zkRun != null && zkServer.getServers().size() > 1 && confDir == null && boostrapConf == false) {
-          // we are part of an ensemble and we are not uploading the config - pause to give the config time
-          // to get up
-          Thread.sleep(10000);
-        }
-        
-        if(confDir != null) {
-          Path configPath = Paths.get(confDir);
-          if (!Files.isDirectory(configPath))
-            throw new IllegalArgumentException("bootstrap_confdir must be a directory of configuration files");
-
-          String confName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX+ZkController.CONFIGNAME_PROP, "configuration1");
-          ZkConfigManager configManager = new ZkConfigManager(zkController.getZkClient());
-          configManager.uploadConfigDir(configPath, confName);
-        }
-
-
-        
-        if(boostrapConf) {
-          ZkController.bootstrapConf(zkController.getZkClient(), cc, solrHome);
-        }
-        
-      } catch (InterruptedException e) {
-        // Restore the interrupted status
-        Thread.currentThread().interrupt();
-        log.error("", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-            "", e);
-      } catch (TimeoutException e) {
-        log.error("Could not connect to ZooKeeper", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-            "", e);
-      } catch (IOException | KeeperException e) {
-        log.error("", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-            "", e);
-      }
-
-
-    }
-    this.zkController = zkController;
-  }
-  
-  private String stripChroot(String zkRun) {
-    if (zkRun == null || zkRun.trim().length() == 0 || zkRun.lastIndexOf('/') < 0) return zkRun;
-    return zkRun.substring(0, zkRun.lastIndexOf('/'));
-  }
-
-  public static Predicate<CoreDescriptor> testing_beforeRegisterInZk;
-
-  public void registerInZk(final SolrCore core, boolean background, boolean skipRecovery) {
-    Runnable r = () -> {
-      MDCLoggingContext.setCore(core);
-      try {
-        try {
-          if (testing_beforeRegisterInZk != null) {
-            testing_beforeRegisterInZk.test(core.getCoreDescriptor());
-          }
-          zkController.register(core.getName(), core.getCoreDescriptor(), skipRecovery);
-        } catch (InterruptedException e) {
-          // Restore the interrupted status
-          Thread.currentThread().interrupt();
-          SolrException.log(log, "", e);
-        } catch (Exception e) {
-          try {
-            zkController.publish(core.getCoreDescriptor(), Replica.State.DOWN);
-          } catch (InterruptedException e1) {
-            Thread.currentThread().interrupt();
-            log.error("", e1);
-          } catch (Exception e1) {
-            log.error("", e1);
-          }
-          SolrException.log(log, "", e);
-        }
-      } finally {
-        MDCLoggingContext.clear();
-      }
-    };
-
-    if (zkController != null) {
-      if (background) {
-        coreZkRegister.execute(r);
-      } else {
-        MDCLoggingContext.setCore(core);
-        try {
-          r.run();
-        } finally {
-          MDCLoggingContext.clear();
-        }
-      }
-    }
-  }
-  
-  public ZkController getZkController() {
-    return zkController;
-  }
-
-  public void close() {
-    
-    try {
-      if (zkController != null) {
-        zkController.close();
-      }
-    } finally {
-      try {
-        if (zkServer != null) {
-          zkServer.stop();
-        }
-      } finally {
-        ExecutorUtil.shutdownAndAwaitTermination(coreZkRegister);
-      }
-    }
-    
-  }
-
-  public ExecutorService getCoreZkRegisterExecutorService() {
-    return coreZkRegister;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
deleted file mode 100644
index afba4b1..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core.backup;
-
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.Reader;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-
-import com.google.common.base.Preconditions;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.backup.repository.BackupRepository.PathType;
-import org.apache.solr.util.PropertiesInputStream;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements functionality to create a backup with extension points provided to integrate with different
- * types of file-systems.
- */
-public class BackupManager {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final String COLLECTION_PROPS_FILE = "collection_state.json";
-  public static final String BACKUP_PROPS_FILE = "backup.properties";
-  public static final String ZK_STATE_DIR = "zk_backup";
-  public static final String CONFIG_STATE_DIR = "configs";
-
-  // Backup properties
-  public static final String COLLECTION_NAME_PROP = "collection";
-  public static final String BACKUP_NAME_PROP = "backupName";
-  public static final String INDEX_VERSION_PROP = "index.version";
-  public static final String START_TIME_PROP = "startTime";
-
-  protected final ZkStateReader zkStateReader;
-  protected final BackupRepository repository;
-
-  public BackupManager(BackupRepository repository, ZkStateReader zkStateReader) {
-    this.repository = Objects.requireNonNull(repository);
-    this.zkStateReader = Objects.requireNonNull(zkStateReader);
-  }
-
-  /**
-   * @return The version of this backup implementation.
-   */
-  public final String getVersion() {
-    return "1.0";
-  }
-
-  /**
-   * This method returns the configuration parameters for the specified backup.
-   *
-   * @param backupLoc The base path used to store the backup data.
-   * @param backupId  The unique name for the backup whose configuration params are required.
-   * @return the configuration parameters for the specified backup.
-   * @throws IOException In case of errors.
-   */
-  public Properties readBackupProperties(URI backupLoc, String backupId) throws IOException {
-    Objects.requireNonNull(backupLoc);
-    Objects.requireNonNull(backupId);
-
-    // Backup location
-    URI backupPath = repository.resolve(backupLoc, backupId);
-    if (!repository.exists(backupPath)) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Couldn't restore since doesn't exist: " + backupPath);
-    }
-
-    Properties props = new Properties();
-    try (Reader is = new InputStreamReader(new PropertiesInputStream(
-        repository.openInput(backupPath, BACKUP_PROPS_FILE, IOContext.DEFAULT)), StandardCharsets.UTF_8)) {
-      props.load(is);
-      return props;
-    }
-  }
-
-  /**
-   * This method stores the backup properties at the specified location in the repository.
-   *
-   * @param backupLoc  The base path used to store the backup data.
-   * @param backupId  The unique name for the backup whose configuration params are required.
-   * @param props The backup properties
-   * @throws IOException in case of I/O error
-   */
-  public void writeBackupProperties(URI backupLoc, String backupId, Properties props) throws IOException {
-    URI dest = repository.resolve(backupLoc, backupId, BACKUP_PROPS_FILE);
-    try (Writer propsWriter = new OutputStreamWriter(repository.createOutput(dest), StandardCharsets.UTF_8)) {
-      props.store(propsWriter, "Backup properties file");
-    }
-  }
-
-  /**
-   * This method reads the meta-data information for the backed-up collection.
-   *
-   * @param backupLoc The base path used to store the backup data.
-   * @param backupId The unique name for the backup.
-   * @param collectionName The name of the collection whose meta-data is to be returned.
-   * @return the meta-data information for the backed-up collection.
-   * @throws IOException in case of errors.
-   */
-  public DocCollection readCollectionState(URI backupLoc, String backupId, String collectionName) throws IOException {
-    Objects.requireNonNull(collectionName);
-
-    URI zkStateDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR);
-    try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) {
-      byte[] arr = new byte[(int) is.length()]; // probably ok since the json file should be small.
-      is.readBytes(arr, 0, (int) is.length());
-      ClusterState c_state = ClusterState.load(-1, arr, Collections.emptySet());
-      return c_state.getCollection(collectionName);
-    }
-  }
-
-  /**
-   * This method writes the collection meta-data to the specified location in the repository.
-   *
-   * @param backupLoc The base path used to store the backup data.
-   * @param backupId  The unique name for the backup.
-   * @param collectionName The name of the collection whose meta-data is being stored.
-   * @param collectionState The collection meta-data to be stored.
-   * @throws IOException in case of I/O errors.
-   */
-  public void writeCollectionState(URI backupLoc, String backupId, String collectionName,
-                                   DocCollection collectionState) throws IOException {
-    URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, COLLECTION_PROPS_FILE);
-    try (OutputStream collectionStateOs = repository.createOutput(dest)) {
-      collectionStateOs.write(Utils.toJSON(Collections.singletonMap(collectionName, collectionState)));
-    }
-  }
-
-  /**
-   * This method uploads the Solr configuration files to the desired location in Zookeeper.
-   *
-   * @param backupLoc  The base path used to store the backup data.
-   * @param backupId  The unique name for the backup.
-   * @param sourceConfigName The name of the config to be copied
-   * @param targetConfigName  The name of the config to be created.
-   * @throws IOException in case of I/O errors.
-   */
-  public void uploadConfigDir(URI backupLoc, String backupId, String sourceConfigName, String targetConfigName)
-      throws IOException {
-    URI source = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, sourceConfigName);
-    String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + targetConfigName;
-    uploadToZk(zkStateReader.getZkClient(), source, zkPath);
-  }
-
-  /**
-   * This method stores the contents of a specified Solr config at the specified location in repository.
-   *
-   * @param backupLoc  The base path used to store the backup data.
-   * @param backupId  The unique name for the backup.
-   * @param configName The name of the config to be saved.
-   * @throws IOException in case of I/O errors.
-   */
-  public void downloadConfigDir(URI backupLoc, String backupId, String configName) throws IOException {
-    URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
-    repository.createDirectory(repository.resolve(backupLoc, backupId, ZK_STATE_DIR));
-    repository.createDirectory(repository.resolve(backupLoc, backupId, ZK_STATE_DIR, CONFIG_STATE_DIR));
-    repository.createDirectory(dest);
-
-    downloadFromZK(zkStateReader.getZkClient(), ZkConfigManager.CONFIGS_ZKNODE + "/" + configName, dest);
-  }
-
-  public void uploadCollectionProperties(URI backupLoc, String backupId, String collectionName) throws IOException {
-    URI sourceDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR);
-    URI source = repository.resolve(sourceDir, ZkStateReader.COLLECTION_PROPS_ZKNODE);
-    if (!repository.exists(source)) {
-      // No collection properties to restore
-      return;
-    }
-    String zkPath = ZkStateReader.COLLECTIONS_ZKNODE + '/' + collectionName + '/' + ZkStateReader.COLLECTION_PROPS_ZKNODE;
-
-    try (IndexInput is = repository.openInput(sourceDir, ZkStateReader.COLLECTION_PROPS_ZKNODE, IOContext.DEFAULT)) {
-      byte[] arr = new byte[(int) is.length()];
-      is.readBytes(arr, 0, (int) is.length());
-      zkStateReader.getZkClient().create(zkPath, arr, CreateMode.PERSISTENT, true);
-    } catch (KeeperException | InterruptedException e) {
-      throw new IOException("Error uploading file to zookeeper path " + source.toString() + " to " + zkPath,
-          SolrZkClient.checkInterrupted(e));
-    }
-  }
-
-  public void downloadCollectionProperties(URI backupLoc, String backupId, String collectionName) throws IOException {
-    URI dest = repository.resolve(backupLoc, backupId, ZK_STATE_DIR, ZkStateReader.COLLECTION_PROPS_ZKNODE);
-    String zkPath = ZkStateReader.COLLECTIONS_ZKNODE + '/' + collectionName + '/' + ZkStateReader.COLLECTION_PROPS_ZKNODE;
-
-
-    try {
-      if (!zkStateReader.getZkClient().exists(zkPath, true)) {
-        // Nothing to back up
-        return;
-      }
-
-      try (OutputStream os = repository.createOutput(dest)) {
-        byte[] data = zkStateReader.getZkClient().getData(zkPath, null, null, true);
-        os.write(data);
-      }
-    } catch (KeeperException | InterruptedException e) {
-      throw new IOException("Error downloading file from zookeeper path " + zkPath + " to " + dest.toString(),
-          SolrZkClient.checkInterrupted(e));
-    }
-  }
-
-  private void downloadFromZK(SolrZkClient zkClient, String zkPath, URI dir) throws IOException {
-    try {
-      if (!repository.exists(dir)) {
-        repository.createDirectory(dir);
-      }
-      List<String> files = zkClient.getChildren(zkPath, null, true);
-      for (String file : files) {
-        List<String> children = zkClient.getChildren(zkPath + "/" + file, null, true);
-        if (children.size() == 0) {
-          log.debug("Writing file {}", file);
-          byte[] data = zkClient.getData(zkPath + "/" + file, null, null, true);
-          try (OutputStream os = repository.createOutput(repository.resolve(dir, file))) {
-            os.write(data);
-          }
-        } else {
-          downloadFromZK(zkClient, zkPath + "/" + file, repository.resolve(dir, file));
-        }
-      }
-    } catch (KeeperException | InterruptedException e) {
-      throw new IOException("Error downloading files from zookeeper path " + zkPath + " to " + dir.toString(),
-          SolrZkClient.checkInterrupted(e));
-    }
-  }
-
-  private void uploadToZk(SolrZkClient zkClient, URI sourceDir, String destZkPath) throws IOException {
-    Preconditions.checkArgument(repository.exists(sourceDir), "Path {} does not exist", sourceDir);
-    Preconditions.checkArgument(repository.getPathType(sourceDir) == PathType.DIRECTORY,
-        "Path {} is not a directory", sourceDir);
-
-    for (String file : repository.listAll(sourceDir)) {
-      String zkNodePath = destZkPath + "/" + file;
-      URI path = repository.resolve(sourceDir, file);
-      PathType t = repository.getPathType(path);
-      switch (t) {
-        case FILE: {
-          try (IndexInput is = repository.openInput(sourceDir, file, IOContext.DEFAULT)) {
-            byte[] arr = new byte[(int) is.length()]; // probably ok since the config file should be small.
-            is.readBytes(arr, 0, (int) is.length());
-            zkClient.makePath(zkNodePath, arr, true);
-          } catch (KeeperException | InterruptedException e) {
-            throw new IOException(SolrZkClient.checkInterrupted(e));
-          }
-          break;
-        }
-
-        case DIRECTORY: {
-          if (!file.startsWith(".")) {
-            uploadToZk(zkClient, path, zkNodePath);
-          }
-          break;
-        }
-        default:
-          throw new IllegalStateException("Unknown path type " + t);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/package-info.java b/solr/core/src/java/org/apache/solr/core/backup/package-info.java
deleted file mode 100644
index defcad6..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-
-/**
- * Core classes for Solr's Backup/Restore functionality
- */
-package org.apache.solr.core.backup;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
deleted file mode 100644
index 875be18..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepository.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core.backup.repository;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.Optional;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-
-/**
- * This interface defines the functionality required to backup/restore Solr indexes to an arbitrary storage system.
- */
-public interface BackupRepository extends NamedListInitializedPlugin, Closeable {
-
-  /**
-   * This enumeration defines the type of a given path.
-   */
-  enum PathType {
-    DIRECTORY, FILE
-  }
-
-  /**
-   * This method returns the location where the backup should be stored (or restored from).
-   *
-   * @param override The location parameter supplied by the user.
-   * @return If <code>override</code> is not null then return the same value
-   *         Otherwise return the default configuration value for the {@linkplain CoreAdminParams#BACKUP_LOCATION} parameter.
-   */
-  default String getBackupLocation(String override) {
-    return Optional.ofNullable(override).orElse(getConfigProperty(CoreAdminParams.BACKUP_LOCATION));
-  }
-
-  /**
-   * This method returns the value of the specified configuration property.
-   */
-  <T> T getConfigProperty(String name);
-
-  /**
-   * This method returns the URI representation for the specified path.
-   * Note - the specified path could be a fully qualified URI OR a relative path for a file-system.
-   *
-   * @param path The path specified by the user.
-   * @return the URI representation of the user supplied value
-   */
-   URI createURI(String path);
-
-  /**
-   * This method resolves a URI using the specified path components (as method arguments).
-   *
-   * @param baseUri The base URI to use for creating the path
-   * @param pathComponents
-   *          The directory (or file-name) to be included in the URI.
-   * @return A URI containing absolute path
-   */
-  URI resolve(URI baseUri, String... pathComponents);
-
-  /**
-   * This method checks if the specified path exists in this repository.
-   *
-   * @param path
-   *          The path whose existence needs to be checked.
-   * @return if the specified path exists in this repository.
-   * @throws IOException
-   *           in case of errors
-   */
-  boolean exists(URI path) throws IOException;
-
-  /**
-   * This method returns the type of a specified path
-   *
-   * @param path
-   *          The path whose type needs to be checked.
-   * @return the {@linkplain PathType} for the specified path
-   * @throws IOException
-   *           in case of errors
-   */
-  PathType getPathType(URI path) throws IOException;
-
-  /**
-   * This method returns all the entries (files and directories) in the specified directory.
-   *
-   * @param path
-   *          The directory path
-   * @return an array of strings, one for each entry in the directory
-   * @throws IOException
-   *           in case of errors
-   */
-  String[] listAll(URI path) throws IOException;
-
-  /**
-   * This method returns a Lucene input stream reading an existing file.
-   *
-   * @param dirPath
-   *          The parent directory of the file to be read
-   * @param fileName
-   *          The name of the file to be read
-   * @param ctx
-   *          the Lucene IO context
-   * @return Lucene {@linkplain IndexInput} reference
-   * @throws IOException
-   *           in case of errors
-   */
-  IndexInput openInput(URI dirPath, String fileName, IOContext ctx) throws IOException;
-
-  /**
-   * This method returns a {@linkplain OutputStream} instance for the specified <code>path</code>
-   *
-   * @param path
-   *          The path for which {@linkplain OutputStream} needs to be created
-   * @return {@linkplain OutputStream} instance for the specified <code>path</code>
-   * @throws IOException
-   *           in case of errors
-   */
-  OutputStream createOutput(URI path) throws IOException;
-
-  /**
-   * This method creates a directory at the specified path.
-   *
-   * @param path
-   *          The path where the directory needs to be created.
-   * @throws IOException
-   *           in case of errors
-   */
-  void createDirectory(URI path) throws IOException;
-
-  /**
-   * This method deletes a directory at the specified path.
-   *
-   * @param path
-   *          The path referring to the directory to be deleted.
-   * @throws IOException
-   *           in case of errors
-   */
-  void deleteDirectory(URI path) throws IOException;
-
-  /**
-   * Copy a file from specified <code>sourceDir</code> to the destination repository (i.e. backup).
-   *
-   * @param sourceDir
-   *          The source directory hosting the file to be copied.
-   * @param fileName
-   *          The name of the file to by copied
-   * @param dest
-   *          The destination backup location.
-   * @throws IOException
-   *           in case of errors
-   */
-  void copyFileFrom(Directory sourceDir, String fileName, URI dest) throws IOException;
-
-  /**
-   * Copy a file from specified <code>sourceRepo</code> to the destination directory (i.e. restore).
-   *
-   * @param sourceRepo
-   *          The source URI hosting the file to be copied.
-   * @param fileName
-   *          The name of the file to by copied
-   * @param dest
-   *          The destination where the file should be copied.
-   * @throws IOException
-   *           in case of errors.
-   */
-  void copyFileTo(URI sourceRepo, String fileName, Directory dest) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
deleted file mode 100644
index 9e02b21..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core.backup.repository;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class BackupRepositoryFactory {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final Map<String,PluginInfo> backupRepoPluginByName = new HashMap<>();
-  private PluginInfo defaultBackupRepoPlugin = null;
-
-  public BackupRepositoryFactory(PluginInfo[] backupRepoPlugins) {
-    if (backupRepoPlugins != null) {
-      for (int i = 0; i < backupRepoPlugins.length; i++) {
-        String name = backupRepoPlugins[i].name;
-        boolean isDefault = backupRepoPlugins[i].isDefault();
-
-        if (backupRepoPluginByName.containsKey(name)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Duplicate backup repository with name " + name);
-        }
-        if (isDefault) {
-          if (this.defaultBackupRepoPlugin != null) {
-            throw new SolrException(ErrorCode.SERVER_ERROR, "More than one backup repository is configured as default");
-          }
-          this.defaultBackupRepoPlugin = backupRepoPlugins[i];
-        }
-        backupRepoPluginByName.put(name, backupRepoPlugins[i]);
-        log.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
-      }
-      if (backupRepoPlugins.length == 1) {
-        this.defaultBackupRepoPlugin = backupRepoPlugins[0];
-      }
-
-      if (this.defaultBackupRepoPlugin != null) {
-        log.info("Default configuration for backup repository is with configuration params {}",
-            defaultBackupRepoPlugin);
-      }
-    }
-  }
-
-  public BackupRepository newInstance(SolrResourceLoader loader, String name) {
-    Objects.requireNonNull(loader);
-    Objects.requireNonNull(name);
-    PluginInfo repo = Objects.requireNonNull(backupRepoPluginByName.get(name),
-        "Could not find a backup repository with name " + name);
-
-    BackupRepository result = loader.newInstance(repo.className, BackupRepository.class);
-    result.init(repo.initArgs);
-    return result;
-  }
-
-  public BackupRepository newInstance(SolrResourceLoader loader) {
-    if (defaultBackupRepoPlugin != null) {
-      return newInstance(loader, defaultBackupRepoPlugin.name);
-    }
-
-    LocalFileSystemRepository repo = new LocalFileSystemRepository();
-    repo.init(new NamedList<>());
-    return repo;
-  }
-}


[48/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
deleted file mode 100644
index d4f84f9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkCmdExecutor;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.RetryUtil;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.PeerSync;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.RefCounted;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.apache.zookeeper.KeeperException.NodeExistsException;
-import org.apache.zookeeper.Op;
-import org.apache.zookeeper.OpResult;
-import org.apache.zookeeper.OpResult.SetDataResult;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.ID;
-
-public abstract class ElectionContext implements Closeable {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  final String electionPath;
-  final ZkNodeProps leaderProps;
-  final String id;
-  final String leaderPath;
-  volatile String leaderSeqPath;
-  private SolrZkClient zkClient;
-
-  public ElectionContext(final String coreNodeName,
-      final String electionPath, final String leaderPath, final ZkNodeProps leaderProps, final SolrZkClient zkClient) {
-    this.id = coreNodeName;
-    this.electionPath = electionPath;
-    this.leaderPath = leaderPath;
-    this.leaderProps = leaderProps;
-    this.zkClient = zkClient;
-  }
-  
-  public void close() {
-
-  }
-  
-  public void cancelElection() throws InterruptedException, KeeperException {
-    if (leaderSeqPath != null) {
-      try {
-        log.debug("Canceling election {}", leaderSeqPath);
-        zkClient.delete(leaderSeqPath, -1, true);
-      } catch (NoNodeException e) {
-        // fine
-        log.debug("cancelElection did not find election node to remove {}", leaderSeqPath);
-      }
-    } else {
-      log.debug("cancelElection skipped as this context has not been initialized");
-    }
-  }
-
-  abstract void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs) throws KeeperException, InterruptedException, IOException;
-
-  public void checkIfIamLeaderFired() {}
-
-  public void joinedElectionFired() {}
-
-  public  ElectionContext copy(){
-    throw new UnsupportedOperationException("copy");
-  }
-}
-
-class ShardLeaderElectionContextBase extends ElectionContext {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected final SolrZkClient zkClient;
-  protected String shardId;
-  protected String collection;
-  protected LeaderElector leaderElector;
-  protected ZkStateReader zkStateReader;
-  private Integer leaderZkNodeParentVersion;
-
-  // Prevents a race between cancelling and becoming leader.
-  private final Object lock = new Object();
-
-  public ShardLeaderElectionContextBase(LeaderElector leaderElector,
-      final String shardId, final String collection, final String coreNodeName,
-      ZkNodeProps props, ZkStateReader zkStateReader) {
-    super(coreNodeName, ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection
-        + "/leader_elect/" + shardId, ZkStateReader.getShardLeadersPath(
-        collection, shardId), props, zkStateReader.getZkClient());
-    this.leaderElector = leaderElector;
-    this.zkClient = zkStateReader.getZkClient();
-    this.zkStateReader = zkStateReader;
-    this.shardId = shardId;
-    this.collection = collection;
-  }
-  
-  @Override
-  public void cancelElection() throws InterruptedException, KeeperException {
-    super.cancelElection();
-    synchronized (lock) {
-      if (leaderZkNodeParentVersion != null) {
-        try {
-          // We need to be careful and make sure we *only* delete our own leader registration node.
-          // We do this by using a multi and ensuring the parent znode of the leader registration node
-          // matches the version we expect - there is a setData call that increments the parent's znode
-          // version whenever a leader registers.
-          log.debug("Removing leader registration node on cancel: {} {}", leaderPath, leaderZkNodeParentVersion);
-          List<Op> ops = new ArrayList<>(2);
-          ops.add(Op.check(new Path(leaderPath).getParent().toString(), leaderZkNodeParentVersion));
-          ops.add(Op.delete(leaderPath, -1));
-          zkClient.multi(ops, true);
-        } catch (KeeperException.NoNodeException nne) {
-          // no problem
-          log.debug("No leader registration node found to remove: {}", leaderPath);
-        } catch (KeeperException.BadVersionException bve) {
-          log.info("Cannot remove leader registration node because the current registered node is not ours: {}", leaderPath);
-          // no problem
-        } catch (InterruptedException e) {
-          throw e;
-        } catch (Exception e) {
-          SolrException.log(log, e);
-        }
-        leaderZkNodeParentVersion = null;
-      } else {
-        log.info("No version found for ephemeral leader parent node, won't remove previous leader registration.");
-      }
-    }
-  }
-  
-  @Override
-  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs)
-      throws KeeperException, InterruptedException, IOException {
-    // register as leader - if an ephemeral is already there, wait to see if it goes away
-    
-    if (!zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
-      log.info("Will not register as leader because collection appears to be gone.");
-      return;
-    }
-    
-    String parent = new Path(leaderPath).getParent().toString();
-    ZkCmdExecutor zcmd = new ZkCmdExecutor(30000);
-    // only if /collections/{collection} exists already do we succeed in creating this path
-    zcmd.ensureExists(parent, (byte[])null, CreateMode.PERSISTENT, zkClient, 2);
-
-    try {
-      RetryUtil.retryOnThrowable(NodeExistsException.class, 60000, 5000, () -> {
-        synchronized (lock) {
-          log.debug("Creating leader registration node {} after winning as {}", leaderPath, leaderSeqPath);
-          List<Op> ops = new ArrayList<>(2);
-
-          // We use a multi operation to get the parent nodes version, which will
-          // be used to make sure we only remove our own leader registration node.
-          // The setData call used to get the parent version is also the trigger to
-          // increment the version. We also do a sanity check that our leaderSeqPath exists.
-
-          ops.add(Op.check(leaderSeqPath, -1));
-          ops.add(Op.create(leaderPath, Utils.toJSON(leaderProps), zkClient.getZkACLProvider().getACLsToAdd(leaderPath), CreateMode.EPHEMERAL));
-          ops.add(Op.setData(parent, null, -1));
-          List<OpResult> results;
-
-          results = zkClient.multi(ops, true);
-          for (OpResult result : results) {
-            if (result.getType() == ZooDefs.OpCode.setData) {
-              SetDataResult dresult = (SetDataResult) result;
-              Stat stat = dresult.getStat();
-              leaderZkNodeParentVersion = stat.getVersion();
-              return;
-            }
-          }
-          assert leaderZkNodeParentVersion != null;
-        }
-      });
-    } catch (Throwable t) {
-      if (t instanceof OutOfMemoryError) {
-        throw (OutOfMemoryError) t;
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Could not register as the leader because creating the ephemeral registration node in ZooKeeper failed", t);
-    } 
-    
-    assert shardId != null;
-    boolean isAlreadyLeader = false;
-    if (zkStateReader.getClusterState() != null &&
-        zkStateReader.getClusterState().getCollection(collection).getSlice(shardId).getReplicas().size() < 2) {
-      Replica leader = zkStateReader.getLeader(collection, shardId);
-      if (leader != null
-          && leader.getBaseUrl().equals(leaderProps.get(ZkStateReader.BASE_URL_PROP))
-          && leader.getCoreName().equals(leaderProps.get(ZkStateReader.CORE_NAME_PROP))) {
-        isAlreadyLeader = true;
-      }
-    }
-    if (!isAlreadyLeader) {
-      ZkNodeProps m = ZkNodeProps.fromKeyVals(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
-          ZkStateReader.SHARD_ID_PROP, shardId,
-          ZkStateReader.COLLECTION_PROP, collection,
-          ZkStateReader.BASE_URL_PROP, leaderProps.get(ZkStateReader.BASE_URL_PROP),
-          ZkStateReader.CORE_NAME_PROP, leaderProps.get(ZkStateReader.CORE_NAME_PROP),
-          ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
-      Overseer.getStateUpdateQueue(zkClient).offer(Utils.toJSON(m));
-    }
-  }
-
-  public LeaderElector getLeaderElector() {
-    return leaderElector;
-  }
-
-  Integer getLeaderZkNodeParentVersion() {
-    synchronized (lock) {
-      return leaderZkNodeParentVersion;
-    }
-  }
-}
-
-// add core container and stop passing core around...
-final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private final ZkController zkController;
-  private final CoreContainer cc;
-  private final SyncStrategy syncStrategy;
-
-  private volatile boolean isClosed = false;
-  
-  public ShardLeaderElectionContext(LeaderElector leaderElector, 
-      final String shardId, final String collection,
-      final String coreNodeName, ZkNodeProps props, ZkController zkController, CoreContainer cc) {
-    super(leaderElector, shardId, collection, coreNodeName, props,
-        zkController.getZkStateReader());
-    this.zkController = zkController;
-    this.cc = cc;
-    syncStrategy = new SyncStrategy(cc);
-  }
-  
-  @Override
-  public void close() {
-    super.close();
-    this.isClosed  = true;
-    syncStrategy.close();
-  }
-  
-  @Override
-  public void cancelElection() throws InterruptedException, KeeperException {
-    String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
-    try (SolrCore core = cc.getCore(coreName)) {
-      if (core != null) {
-        core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
-      }
-    }
-    
-    super.cancelElection();
-  }
-  
-  @Override
-  public ElectionContext copy() {
-    return new ShardLeaderElectionContext(leaderElector, shardId, collection, id, leaderProps, zkController, cc);
-  }
-  
-  /* 
-   * weAreReplacement: has someone else been the leader already?
-   */
-  @Override
-  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws KeeperException,
- InterruptedException, IOException {
-    String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
-    ActionThrottle lt;
-    try (SolrCore core = cc.getCore(coreName)) {
-      if (core == null ) {
-        if (cc.isShutDown()) {
-          return;
-        } else {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
-        }
-      }
-      MDCLoggingContext.setCore(core);
-      lt = core.getUpdateHandler().getSolrCoreState().getLeaderThrottle();
-    }
-
-    try {
-      lt.minimumWaitBetweenActions();
-      lt.markAttemptingAction();
-      
-      
-      int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
-      
-      log.debug("Running the leader process for shard={} and weAreReplacement={} and leaderVoteWait={}", shardId, weAreReplacement, leaderVoteWait);
-      if (zkController.getClusterState().getCollection(collection).getSlice(shardId).getReplicas().size() > 1) {
-        // Clear the leader in clusterstate. We only need to worry about this if there is actually more than one replica.
-        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
-            ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection);
-        Overseer.getStateUpdateQueue(zkClient).offer(Utils.toJSON(m));
-      }
-
-      boolean allReplicasInLine = false;
-      if (!weAreReplacement) {
-        allReplicasInLine = waitForReplicasToComeUp(leaderVoteWait);
-      } else {
-        allReplicasInLine = areAllReplicasParticipating();
-      }
-      
-      if (isClosed) {
-        // Solr is shutting down or the ZooKeeper session expired while waiting for replicas. If the later, 
-        // we cannot be sure we are still the leader, so we should bail out. The OnReconnect handler will 
-        // re-register the cores and handle a new leadership election.
-        return;
-      }
-      
-      Replica.Type replicaType;
-      String coreNodeName;
-      boolean setTermToMax = false;
-      try (SolrCore core = cc.getCore(coreName)) {
-        
-        if (core == null) {
-          if (!zkController.getCoreContainer().isShutDown())  {
-            cancelElection();
-            throw new SolrException(ErrorCode.SERVER_ERROR,
-                "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
-          } else  {
-            return;
-          }
-        }
-        
-        replicaType = core.getCoreDescriptor().getCloudDescriptor().getReplicaType();
-        coreNodeName = core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName();
-        // should I be leader?
-        ZkShardTerms zkShardTerms = zkController.getShardTerms(collection, shardId);
-        if (zkShardTerms.registered(coreNodeName) && !zkShardTerms.canBecomeLeader(coreNodeName)) {
-          if (!waitForEligibleBecomeLeaderAfterTimeout(zkShardTerms, coreNodeName, leaderVoteWait)) {
-            rejoinLeaderElection(core);
-            return;
-          } else {
-            // only log an error if this replica win the election
-            setTermToMax = true;
-          }
-        }
-
-        if (isClosed) {
-          return;
-        }
-        
-        log.info("I may be the new leader - try and sync");
-        
-        // we are going to attempt to be the leader
-        // first cancel any current recovery
-        core.getUpdateHandler().getSolrCoreState().cancelRecovery();
-        
-        if (weAreReplacement) {
-          // wait a moment for any floating updates to finish
-          try {
-            Thread.sleep(2500);
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, e);
-          }
-        }
-
-        PeerSync.PeerSyncResult result = null;
-        boolean success = false;
-        try {
-          result = syncStrategy.sync(zkController, core, leaderProps, weAreReplacement);
-          success = result.isSuccess();
-        } catch (Exception e) {
-          SolrException.log(log, "Exception while trying to sync", e);
-          result = PeerSync.PeerSyncResult.failure();
-        }
-        
-        UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-        
-        if (!success) {
-          boolean hasRecentUpdates = false;
-          if (ulog != null) {
-            // TODO: we could optimize this if necessary
-            try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-              hasRecentUpdates = !recentUpdates.getVersions(1).isEmpty();
-            }
-          }
-          
-          if (!hasRecentUpdates) {
-            // we failed sync, but we have no versions - we can't sync in that case
-            // - we were active
-            // before, so become leader anyway if no one else has any versions either
-            if (result.getOtherHasVersions().orElse(false))  {
-              log.info("We failed sync, but we have no versions - we can't sync in that case. But others have some versions, so we should not become leader");
-              success = false;
-            } else  {
-              log.info(
-                  "We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway");
-              success = true;
-            }
-          }
-        }
-        
-        // solrcloud_debug
-        if (log.isDebugEnabled()) {
-          try {
-            RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
-            SolrIndexSearcher searcher = searchHolder.get();
-            try {
-              log.debug(core.getCoreContainer().getZkController().getNodeName() + " synched "
-                  + searcher.count(new MatchAllDocsQuery()));
-            } finally {
-              searchHolder.decref();
-            }
-          } catch (Exception e) {
-            log.error("Error in solrcloud_debug block", e);
-          }
-        }
-        if (!success) {
-          rejoinLeaderElection(core);
-          return;
-        }
-        
-      }
-      
-      boolean isLeader = true;
-      if (!isClosed) {
-        try {
-          if (replicaType == Replica.Type.TLOG) {
-            // stop replicate from old leader
-            zkController.stopReplicationFromLeader(coreName);
-            if (weAreReplacement) {
-              try (SolrCore core = cc.getCore(coreName)) {
-                Future<UpdateLog.RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().recoverFromCurrentLog();
-                if (future != null) {
-                  log.info("Replaying tlog before become new leader");
-                  future.get();
-                } else {
-                  log.info("New leader does not have old tlog to replay");
-                }
-              }
-            }
-          }
-          // in case of leaderVoteWait timeout, a replica with lower term can win the election
-          if (setTermToMax) {
-            log.error("WARNING: Potential data loss -- Replica {} became leader after timeout (leaderVoteWait) " +
-                "without being up-to-date with the previous leader", coreNodeName);
-            zkController.getShardTerms(collection, shardId).setTermEqualsToLeader(coreNodeName);
-          }
-          super.runLeaderProcess(weAreReplacement, 0);
-          try (SolrCore core = cc.getCore(coreName)) {
-            if (core != null) {
-              core.getCoreDescriptor().getCloudDescriptor().setLeader(true);
-              publishActiveIfRegisteredAndNotActive(core);
-            } else {
-              return;
-            }
-          }
-          log.info("I am the new leader: " + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
-          
-          // we made it as leader - send any recovery requests we need to
-          syncStrategy.requestRecoveries();
-
-        } catch (Exception e) {
-          isLeader = false;
-          SolrException.log(log, "There was a problem trying to register as the leader", e);
-          
-          try (SolrCore core = cc.getCore(coreName)) {
-            
-            if (core == null) {
-              log.debug("SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
-              return;
-            }
-            
-            core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
-            
-            // we could not publish ourselves as leader - try and rejoin election
-            rejoinLeaderElection(core);
-          }
-        }
-      } else {
-        cancelElection();
-      }
-    } finally {
-      MDCLoggingContext.clear();
-    }
-  }
-
-  /**
-   * Wait for other replicas with higher terms participate in the electioon
-   * @return true if after {@code timeout} there are no other replicas with higher term participate in the election,
-   * false if otherwise
-   */
-  private boolean waitForEligibleBecomeLeaderAfterTimeout(ZkShardTerms zkShardTerms, String coreNodeName, int timeout) throws InterruptedException {
-    long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS);
-    while (!isClosed && !cc.isShutDown()) {
-      if (System.nanoTime() > timeoutAt) {
-        return true;
-      }
-      if (replicasWithHigherTermParticipated(zkShardTerms, coreNodeName)) {
-        log.info("Can't become leader, other replicas with higher term participated in leader election");
-        return false;
-      }
-      Thread.sleep(500L);
-    }
-    return false;
-  }
-
-  /**
-   * Do other replicas with higher term participated in the election
-   * @return true if other replicas with higher term participated in the election, false if otherwise
-   */
-  private boolean replicasWithHigherTermParticipated(ZkShardTerms zkShardTerms, String coreNodeName) {
-    ClusterState clusterState = zkController.getClusterState();
-    DocCollection docCollection = clusterState.getCollectionOrNull(collection);
-    Slice slices = (docCollection == null) ? null : docCollection.getSlice(shardId);
-    if (slices == null) return false;
-
-    long replicaTerm = zkShardTerms.getTerm(coreNodeName);
-    boolean isRecovering = zkShardTerms.isRecovering(coreNodeName);
-
-    for (Replica replica : slices.getReplicas()) {
-      if (replica.getName().equals(coreNodeName)) continue;
-
-      if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
-        long otherTerm = zkShardTerms.getTerm(replica.getName());
-        boolean isOtherReplicaRecovering = zkShardTerms.isRecovering(replica.getName());
-
-        if (isRecovering && !isOtherReplicaRecovering) return true;
-        if (otherTerm > replicaTerm) return true;
-      }
-    }
-    return false;
-  }
-
-  public void publishActiveIfRegisteredAndNotActive(SolrCore core) throws Exception {
-      if (core.getCoreDescriptor().getCloudDescriptor().hasRegistered()) {
-        ZkStateReader zkStateReader = zkController.getZkStateReader();
-        zkStateReader.forceUpdateCollection(collection);
-        ClusterState clusterState = zkStateReader.getClusterState();
-        Replica rep = getReplica(clusterState, collection, leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP));
-        if (rep == null) return;
-        if (rep.getState() != Replica.State.ACTIVE || core.getCoreDescriptor().getCloudDescriptor().getLastPublished() != Replica.State.ACTIVE) {
-          log.debug("We have become the leader after core registration but are not in an ACTIVE state - publishing ACTIVE");
-          zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
-        }
-      }
-  }
-  
-  private Replica getReplica(ClusterState clusterState, String collectionName, String replicaName) {
-    if (clusterState == null) return null;
-    final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-    if (docCollection == null) return null;
-    return docCollection.getReplica(replicaName);
-  }
-
-  // returns true if all replicas are found to be up, false if not
-  private boolean waitForReplicasToComeUp(int timeoutms) throws InterruptedException {
-    long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutms, TimeUnit.MILLISECONDS);
-    final String shardsElectZkPath = electionPath + LeaderElector.ELECTION_NODE;
-    
-    DocCollection docCollection = zkController.getClusterState().getCollectionOrNull(collection);
-    Slice slices = (docCollection == null) ? null : docCollection.getSlice(shardId);
-    int cnt = 0;
-    while (!isClosed && !cc.isShutDown()) {
-      // wait for everyone to be up
-      if (slices != null) {
-        int found = 0;
-        try {
-          found = zkClient.getChildren(shardsElectZkPath, null, true).size();
-        } catch (KeeperException e) {
-          if (e instanceof KeeperException.SessionExpiredException) {
-            // if the session has expired, then another election will be launched, so
-            // quit here
-            throw new SolrException(ErrorCode.SERVER_ERROR,
-                                    "ZK session expired - cancelling election for " + collection + " " + shardId);
-          }
-          SolrException.log(log,
-              "Error checking for the number of election participants", e);
-        }
-        
-        // on startup and after connection timeout, wait for all known shards
-        if (found >= slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size()) {
-          log.info("Enough replicas found to continue.");
-          return true;
-        } else {
-          if (cnt % 40 == 0) {
-            log.info("Waiting until we see more replicas up for shard {}: total={}"
-              + " found={}"
-              + " timeoutin={}ms",
-                shardId, slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size(), found,
-                TimeUnit.MILLISECONDS.convert(timeoutAt - System.nanoTime(), TimeUnit.NANOSECONDS));
-          }
-        }
-        
-        if (System.nanoTime() > timeoutAt) {
-          log.info("Was waiting for replicas to come up, but they are taking too long - assuming they won't come back till later");
-          return false;
-        }
-      } else {
-        log.warn("Shard not found: " + shardId + " for collection " + collection);
-
-        return false;
-
-      }
-      
-      Thread.sleep(500);
-      docCollection = zkController.getClusterState().getCollectionOrNull(collection);
-      slices = (docCollection == null) ? null : docCollection.getSlice(shardId);
-      cnt++;
-    }
-    return false;
-  }
-  
-  // returns true if all replicas are found to be up, false if not
-  private boolean areAllReplicasParticipating() throws InterruptedException {
-    final String shardsElectZkPath = electionPath + LeaderElector.ELECTION_NODE;
-    final DocCollection docCollection = zkController.getClusterState().getCollectionOrNull(collection);
-    
-    if (docCollection != null && docCollection.getSlice(shardId) != null) {
-      final Slice slices = docCollection.getSlice(shardId);
-      int found = 0;
-      try {
-        found = zkClient.getChildren(shardsElectZkPath, null, true).size();
-      } catch (KeeperException e) {
-        if (e instanceof KeeperException.SessionExpiredException) {
-          // if the session has expired, then another election will be launched, so
-          // quit here
-          throw new SolrException(ErrorCode.SERVER_ERROR,
-              "ZK session expired - cancelling election for " + collection + " " + shardId);
-        }
-        SolrException.log(log, "Error checking for the number of election participants", e);
-      }
-      
-      if (found >= slices.getReplicasMap().size()) {
-        log.debug("All replicas are ready to participate in election.");
-        return true;
-      }
-      
-    } else {
-      log.warn("Shard not found: " + shardId + " for collection " + collection);
-      
-      return false;
-    }
-    
-    return false;
-  }
-
-  private void rejoinLeaderElection(SolrCore core)
-      throws InterruptedException, KeeperException, IOException {
-    // remove our ephemeral and re join the election
-    if (cc.isShutDown()) {
-      log.debug("Not rejoining election because CoreContainer is closed");
-      return;
-    }
-    
-    log.info("There may be a better leader candidate than us - going back into recovery");
-    
-    cancelElection();
-    
-    core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
-    
-    leaderElector.joinElection(this, true);
-  }
-
-}
-
-final class OverseerElectionContext extends ElectionContext {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final SolrZkClient zkClient;
-  private Overseer overseer;
-
-  public OverseerElectionContext(SolrZkClient zkClient, Overseer overseer, final String zkNodeName) {
-    super(zkNodeName, Overseer.OVERSEER_ELECT, Overseer.OVERSEER_ELECT + "/leader", null, zkClient);
-    this.overseer = overseer;
-    this.zkClient = zkClient;
-    try {
-      new ZkCmdExecutor(zkClient.getZkClientTimeout()).ensureExists(Overseer.OVERSEER_ELECT, zkClient);
-    } catch (KeeperException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    }
-  }
-
-  @Override
-  void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStartMs) throws KeeperException,
-      InterruptedException {
-    log.info("I am going to be the leader {}", id);
-    final String id = leaderSeqPath
-        .substring(leaderSeqPath.lastIndexOf("/") + 1);
-    ZkNodeProps myProps = new ZkNodeProps(ID, id);
-
-    zkClient.makePath(leaderPath, Utils.toJSON(myProps),
-        CreateMode.EPHEMERAL, true);
-    if(pauseBeforeStartMs >0){
-      try {
-        Thread.sleep(pauseBeforeStartMs);
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        log.warn("Wait interrupted ", e);
-      }
-    }
-    if (!overseer.getZkController().isClosed() && !overseer.getZkController().getCoreContainer().isShutDown()) {
-      overseer.start(id);
-    }
-  }
-  
-  @Override
-  public void cancelElection() throws InterruptedException, KeeperException {
-    super.cancelElection();
-    overseer.close();
-  }
-  
-  @Override
-  public void close() {
-    overseer.close();
-  }
-
-  @Override
-  public ElectionContext copy() {
-    return new OverseerElectionContext(zkClient, overseer ,id);
-  }
-  
-  @Override
-  public void joinedElectionFired() {
-    overseer.close();
-  }
-  
-  @Override
-  public void checkIfIamLeaderFired() {
-    // leader changed - close the overseer
-    overseer.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
deleted file mode 100644
index 953023f..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.cloud.overseer.CollectionMutator;
-import org.apache.solr.cloud.overseer.SliceMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
-
-// Class to encapsulate processing replica properties that have at most one replica hosting a property per slice.
-class ExclusiveSliceProperty {
-  private ClusterState clusterState;
-  private final boolean onlyActiveNodes;
-  private final String property;
-  private final DocCollection collection;
-  private final String collectionName;
-
-  // Key structure. For each node, list all replicas on it regardless of whether they have the property or not.
-  private final Map<String, List<SliceReplica>> nodesHostingReplicas = new HashMap<>();
-  // Key structure. For each node, a list of the replicas _currently_ hosting the property.
-  private final Map<String, List<SliceReplica>> nodesHostingProp = new HashMap<>();
-  Set<String> shardsNeedingHosts = new HashSet<>();
-  Map<String, Slice> changedSlices = new HashMap<>(); // Work on copies rather than the underlying cluster state.
-
-  private int origMaxPropPerNode = 0;
-  private int origModulo = 0;
-  private int tmpMaxPropPerNode = 0;
-  private int tmpModulo = 0;
-  Random rand = new Random();
-
-  private int assigned = 0;
-
-  ExclusiveSliceProperty(ClusterState clusterState, ZkNodeProps message) {
-    this.clusterState = clusterState;
-    String tmp = message.getStr(ZkStateReader.PROPERTY_PROP);
-    if (StringUtils.startsWith(tmp, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
-      tmp = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + tmp;
-    }
-    this.property = tmp.toLowerCase(Locale.ROOT);
-    collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-
-    if (StringUtils.isBlank(collectionName) || StringUtils.isBlank(property)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Overseer '" + message.getStr(Overseer.QUEUE_OPERATION) + "'  requires both the '" + ZkStateReader.COLLECTION_PROP + "' and '" +
-              ZkStateReader.PROPERTY_PROP + "' parameters. No action taken ");
-    }
-
-    Boolean shardUnique = Boolean.parseBoolean(message.getStr(SHARD_UNIQUE));
-    if (shardUnique == false &&
-        SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(this.property) == false) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
-          + " the property be a pre-defined property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true' " +
-          " Property: " + this.property + " shardUnique: " + Boolean.toString(shardUnique));
-    }
-
-    collection = clusterState.getCollection(collectionName);
-    if (collection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Could not find collection ' " + collectionName + "' for overseer operation '" +
-              message.getStr(Overseer.QUEUE_OPERATION) + "'. No action taken.");
-    }
-    onlyActiveNodes = Boolean.parseBoolean(message.getStr(ONLY_ACTIVE_NODES, "true"));
-  }
-
-
-  DocCollection getDocCollection() {
-    return collection;
-  }
-
-  private boolean isActive(Replica replica) {
-    return replica.getState() == Replica.State.ACTIVE;
-  }
-
-  // Collect a list of all the nodes that _can_ host the indicated property. Along the way, also collect any of
-  // the replicas on that node that _already_ host the property as well as any slices that do _not_ have the
-  // property hosted.
-  //
-  // Return true if anything node needs it's property reassigned. False if the property is already balanced for
-  // the collection.
-
-  private boolean collectCurrentPropStats() {
-    int maxAssigned = 0;
-    // Get a list of potential replicas that can host the property _and_ their counts
-    // Move any obvious entries to a list of replicas to change the property on
-    Set<String> allHosts = new HashSet<>();
-    for (Slice slice : collection.getSlices()) {
-      boolean sliceHasProp = false;
-      for (Replica replica : slice.getReplicas()) {
-        if (onlyActiveNodes && isActive(replica) == false) {
-          if (StringUtils.isNotBlank(replica.getStr(property))) {
-            removeProp(slice, replica.getName()); // Note, we won't be committing this to ZK until later.
-          }
-          continue;
-        }
-        allHosts.add(replica.getNodeName());
-        String nodeName = replica.getNodeName();
-        if (StringUtils.isNotBlank(replica.getStr(property))) {
-          if (sliceHasProp) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "'" + BALANCESHARDUNIQUE + "' should only be called for properties that have at most one member " +
-                    "in any slice with the property set. No action taken.");
-          }
-          if (nodesHostingProp.containsKey(nodeName) == false) {
-            nodesHostingProp.put(nodeName, new ArrayList<>());
-          }
-          nodesHostingProp.get(nodeName).add(new SliceReplica(slice, replica));
-          ++assigned;
-          maxAssigned = Math.max(maxAssigned, nodesHostingProp.get(nodeName).size());
-          sliceHasProp = true;
-        }
-        if (nodesHostingReplicas.containsKey(nodeName) == false) {
-          nodesHostingReplicas.put(nodeName, new ArrayList<>());
-        }
-        nodesHostingReplicas.get(nodeName).add(new SliceReplica(slice, replica));
-      }
-    }
-
-    // If the total number of already-hosted properties assigned to nodes
-    // that have potential to host leaders is equal to the slice count _AND_ none of the current nodes has more than
-    // the max number of properties, there's nothing to do.
-    origMaxPropPerNode = collection.getSlices().size() / allHosts.size();
-
-    // Some nodes can have one more of the proeprty if the numbers aren't exactly even.
-    origModulo = collection.getSlices().size() % allHosts.size();
-    if (origModulo > 0) {
-      origMaxPropPerNode++;  // have to have some nodes with 1 more property.
-    }
-
-    // We can say for sure that we need to rebalance if we don't have as many assigned properties as slices.
-    if (assigned != collection.getSlices().size()) {
-      return true;
-    }
-
-    // Make sure there are no more slices at the limit than the "leftovers"
-    // Let's say there's 7 slices and 3 nodes. We need to distribute the property as 3 on node1, 2 on node2 and 2 on node3
-    // (3, 2, 2) We need to be careful to not distribute them as 3, 3, 1. that's what this check is all about.
-    int counter = origModulo;
-    for (List<SliceReplica> list : nodesHostingProp.values()) {
-      if (list.size() == origMaxPropPerNode) --counter;
-    }
-    if (counter == 0) return false; // nodes with 1 extra leader are exactly the needed number
-
-    return true;
-  }
-
-  private void removeSliceAlreadyHostedFromPossibles(String sliceName) {
-    for (Map.Entry<String, List<SliceReplica>> entReplica : nodesHostingReplicas.entrySet()) {
-
-      ListIterator<SliceReplica> iter = entReplica.getValue().listIterator();
-      while (iter.hasNext()) {
-        SliceReplica sr = iter.next();
-        if (sr.slice.getName().equals(sliceName))
-          iter.remove();
-      }
-    }
-  }
-
-  private void balanceUnassignedReplicas() {
-    tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
-    tmpModulo = origModulo;
-
-    // Get the nodeName and shardName for the node that has the least room for this
-
-    while (shardsNeedingHosts.size() > 0) {
-      String nodeName = "";
-      int minSize = Integer.MAX_VALUE;
-      SliceReplica srToChange = null;
-      for (String slice : shardsNeedingHosts) {
-        for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
-          // A little tricky. If we don't set this to something below, then it means all possible places to
-          // put this property are full up, so just put it somewhere.
-          if (srToChange == null && ent.getValue().size() > 0) {
-            srToChange = ent.getValue().get(0);
-          }
-          ListIterator<SliceReplica> iter = ent.getValue().listIterator();
-          while (iter.hasNext()) {
-            SliceReplica sr = iter.next();
-            if (StringUtils.equals(slice, sr.slice.getName()) == false) {
-              continue;
-            }
-            if (nodesHostingProp.containsKey(ent.getKey()) == false) {
-              nodesHostingProp.put(ent.getKey(), new ArrayList<SliceReplica>());
-            }
-            if (minSize > nodesHostingReplicas.get(ent.getKey()).size() && nodesHostingProp.get(ent.getKey()).size() < tmpMaxPropPerNode) {
-              minSize = nodesHostingReplicas.get(ent.getKey()).size();
-              srToChange = sr;
-              nodeName = ent.getKey();
-            }
-          }
-        }
-      }
-      // Now, you have a slice and node to put it on
-      shardsNeedingHosts.remove(srToChange.slice.getName());
-      if (nodesHostingProp.containsKey(nodeName) == false) {
-        nodesHostingProp.put(nodeName, new ArrayList<SliceReplica>());
-      }
-      nodesHostingProp.get(nodeName).add(srToChange);
-      adjustLimits(nodesHostingProp.get(nodeName));
-      removeSliceAlreadyHostedFromPossibles(srToChange.slice.getName());
-      addProp(srToChange.slice, srToChange.replica.getName());
-    }
-  }
-
-  // Adjust the min/max counts per allowed per node. Special handling here for dealing with the fact
-  // that no node should have more than 1 more replica with this property than any other.
-  private void adjustLimits(List<SliceReplica> changeList) {
-    if (changeList.size() == tmpMaxPropPerNode) {
-      if (tmpModulo < 0) return;
-
-      --tmpModulo;
-      if (tmpModulo == 0) {
-        --tmpMaxPropPerNode;
-        --tmpModulo;  // Prevent dropping tmpMaxPropPerNode again.
-      }
-    }
-  }
-
-  // Go through the list of presently-hosted properties and remove any that have too many replicas that host the property
-  private void removeOverallocatedReplicas() {
-    tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
-    tmpModulo = origModulo;
-
-    for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingProp.entrySet()) {
-      while (ent.getValue().size() > tmpMaxPropPerNode) { // remove delta nodes
-        ent.getValue().remove(rand.nextInt(ent.getValue().size()));
-      }
-      adjustLimits(ent.getValue());
-    }
-  }
-
-  private void removeProp(Slice origSlice, String replicaName) {
-    getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property);
-  }
-
-  private void addProp(Slice origSlice, String replicaName) {
-    getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true");
-  }
-
-  // Just a place to encapsulate the fact that we need to have new slices (copy) to update before we
-  // put this all in the cluster state.
-  private Replica getReplicaFromChanged(Slice origSlice, String replicaName) {
-    Slice newSlice = changedSlices.get(origSlice.getName());
-    Replica replica;
-    if (newSlice != null) {
-      replica = newSlice.getReplica(replicaName);
-    } else {
-      newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy());
-      changedSlices.put(origSlice.getName(), newSlice);
-      replica = newSlice.getReplica(replicaName);
-    }
-    if (replica == null) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Should have been able to find replica '" +
-          replicaName + "' in slice '" + origSlice.getName() + "'. No action taken");
-    }
-    return replica;
-
-  }
-  // Main entry point for carrying out the action. Returns "true" if we have actually moved properties around.
-
-  boolean balanceProperty() {
-    if (collectCurrentPropStats() == false) {
-      return false;
-    }
-
-    // we have two lists based on nodeName
-    // 1> all the nodes that _could_ host a property for the slice
-    // 2> all the nodes that _currently_ host a property for the slice.
-
-    // So, remove a replica from the nodes that have too many
-    removeOverallocatedReplicas();
-
-    // prune replicas belonging to a slice that have the property currently assigned from the list of replicas
-    // that could host the property.
-    for (Map.Entry<String, List<SliceReplica>> entProp : nodesHostingProp.entrySet()) {
-      for (SliceReplica srHosting : entProp.getValue()) {
-        removeSliceAlreadyHostedFromPossibles(srHosting.slice.getName());
-      }
-    }
-
-    // Assemble the list of slices that do not have any replica hosting the property:
-    for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
-      ListIterator<SliceReplica> iter = ent.getValue().listIterator();
-      while (iter.hasNext()) {
-        SliceReplica sr = iter.next();
-        shardsNeedingHosts.add(sr.slice.getName());
-      }
-    }
-
-    // At this point, nodesHostingProp should contain _only_ lists of replicas that belong to slices that do _not_
-    // have any replica hosting the property. So let's assign them.
-
-    balanceUnassignedReplicas();
-    for (Slice newSlice : changedSlices.values()) {
-      DocCollection docCollection = CollectionMutator.updateSlice(collectionName, clusterState.getCollection(collectionName), newSlice);
-      clusterState = ClusterStateMutator.newState(clusterState, collectionName, docCollection);
-    }
-    return true;
-  }
-
-  private static class SliceReplica {
-    Slice slice;
-    Replica replica;
-
-    SliceReplica(Slice slice, Replica replica) {
-      this.slice = slice;
-      this.replica = replica;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java b/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
deleted file mode 100644
index 46f3c88..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.solr.cloud.ZkController.ContextKey;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkCmdExecutor;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.ConnectionLossException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.Watcher.Event.EventType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Leader Election process. This class contains the logic by which a
- * leader is chosen. First call * {@link #setup(ElectionContext)} to ensure
- * the election process is init'd. Next call
- * {@link #joinElection(ElectionContext, boolean)} to start the leader election.
- * 
- * The implementation follows the classic ZooKeeper recipe of creating an
- * ephemeral, sequential node for each candidate and then looking at the set
- * of such nodes - if the created node is the lowest sequential node, the
- * candidate that created the node is the leader. If not, the candidate puts
- * a watch on the next lowest node it finds, and if that node goes down, 
- * starts the whole process over by checking if it's the lowest sequential node, etc.
- * 
- */
-public  class LeaderElector {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  static final String ELECTION_NODE = "/election";
-  
-  public final static Pattern LEADER_SEQ = Pattern.compile(".*?/?.*?-n_(\\d+)");
-  private final static Pattern SESSION_ID = Pattern.compile(".*?/?(.*?-.*?)-n_\\d+");
-  private final static Pattern  NODE_NAME = Pattern.compile(".*?/?(.*?-)(.*?)-n_\\d+");
-
-  protected SolrZkClient zkClient;
-  
-  private ZkCmdExecutor zkCmdExecutor;
-
-  private volatile ElectionContext context;
-
-  private ElectionWatcher watcher;
-
-  private Map<ContextKey,ElectionContext> electionContexts;
-  private ContextKey contextKey;
-
-  public LeaderElector(SolrZkClient zkClient) {
-    this.zkClient = zkClient;
-    zkCmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
-  }
-  
-  public LeaderElector(SolrZkClient zkClient, ContextKey key, Map<ContextKey,ElectionContext> electionContexts) {
-    this.zkClient = zkClient;
-    zkCmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
-    this.electionContexts = electionContexts;
-    this.contextKey = key;
-  }
-
-  public ElectionContext getContext() {
-    return context;
-  }
-
-  /**
-   * Check if the candidate with the given n_* sequence number is the leader.
-   * If it is, set the leaderId on the leader zk node. If it is not, start
-   * watching the candidate that is in line before this one - if it goes down, check
-   * if this candidate is the leader again.
-   *
-   * @param replacement has someone else been the leader already?
-   */
-  private void checkIfIamLeader(final ElectionContext context, boolean replacement) throws KeeperException,
-      InterruptedException, IOException {
-    context.checkIfIamLeaderFired();
-    // get all other numbers...
-    final String holdElectionPath = context.electionPath + ELECTION_NODE;
-    List<String> seqs = zkClient.getChildren(holdElectionPath, null, true);
-    sortSeqs(seqs);
-
-    String leaderSeqNodeName = context.leaderSeqPath.substring(context.leaderSeqPath.lastIndexOf('/') + 1);
-    if (!seqs.contains(leaderSeqNodeName)) {
-      log.warn("Our node is no longer in line to be leader");
-      return;
-    }
-
-    // If any double-registrations exist for me, remove all but this latest one!
-    // TODO: can we even get into this state?
-    String prefix = zkClient.getSolrZooKeeper().getSessionId() + "-" + context.id + "-";
-    Iterator<String> it = seqs.iterator();
-    while (it.hasNext()) {
-      String elec = it.next();
-      if (!elec.equals(leaderSeqNodeName) && elec.startsWith(prefix)) {
-        try {
-          String toDelete = holdElectionPath + "/" + elec;
-          log.warn("Deleting duplicate registration: {}", toDelete);
-          zkClient.delete(toDelete, -1, true);
-        } catch (KeeperException.NoNodeException e) {
-          // ignore
-        }
-        it.remove();
-      }
-    }
-
-    if (leaderSeqNodeName.equals(seqs.get(0))) {
-      // I am the leader
-      try {
-        runIamLeaderProcess(context, replacement);
-      } catch (KeeperException.NodeExistsException e) {
-        log.error("node exists",e);
-        retryElection(context, false);
-        return;
-      }
-    } else {
-      // I am not the leader - watch the node below me
-      String toWatch = seqs.get(0);
-      for (String node : seqs) {
-        if (leaderSeqNodeName.equals(node)) {
-          break;
-        }
-        toWatch = node;
-      }
-      try {
-        String watchedNode = holdElectionPath + "/" + toWatch;
-        zkClient.getData(watchedNode, watcher = new ElectionWatcher(context.leaderSeqPath, watchedNode, getSeq(context.leaderSeqPath), context), null, true);
-        log.debug("Watching path {} to know if I could be the leader", watchedNode);
-      } catch (KeeperException.SessionExpiredException e) {
-        throw e;
-      } catch (KeeperException.NoNodeException e) {
-        // the previous node disappeared, check if we are the leader again
-        checkIfIamLeader(context, true);
-      } catch (KeeperException e) {
-        // we couldn't set our watch for some other reason, retry
-        log.warn("Failed setting watch", e);
-        checkIfIamLeader(context, true);
-      }
-    }
-  }
-
-  // TODO: get this core param out of here
-  protected void runIamLeaderProcess(final ElectionContext context, boolean weAreReplacement) throws KeeperException,
-      InterruptedException, IOException {
-    context.runLeaderProcess(weAreReplacement,0);
-  }
-  
-  /**
-   * Returns int given String of form n_0000000001 or n_0000000003, etc.
-   * 
-   * @return sequence number
-   */
-  public static int getSeq(String nStringSequence) {
-    int seq = 0;
-    Matcher m = LEADER_SEQ.matcher(nStringSequence);
-    if (m.matches()) {
-      seq = Integer.parseInt(m.group(1));
-    } else {
-      throw new IllegalStateException("Could not find regex match in:"
-          + nStringSequence);
-    }
-    return seq;
-  }
-  
-  private String getNodeId(String nStringSequence) {
-    String id;
-    Matcher m = SESSION_ID.matcher(nStringSequence);
-    if (m.matches()) {
-      id = m.group(1);
-    } else {
-      throw new IllegalStateException("Could not find regex match in:"
-          + nStringSequence);
-    }
-    return id;
-  }
-
-  public static String getNodeName(String nStringSequence){
-    String result;
-    Matcher m = NODE_NAME.matcher(nStringSequence);
-    if (m.matches()) {
-      result = m.group(2);
-    } else {
-      throw new IllegalStateException("Could not find regex match in:"
-          + nStringSequence);
-    }
-    return result;
-
-  }
-  
-  public int joinElection(ElectionContext context, boolean replacement) throws KeeperException, InterruptedException, IOException {
-    return joinElection(context,replacement, false);
-  }
-
-    /**
-     * Begin participating in the election process. Gets a new sequential number
-     * and begins watching the node with the sequence number before it, unless it
-     * is the lowest number, in which case, initiates the leader process. If the
-     * node that is watched goes down, check if we are the new lowest node, else
-     * watch the next lowest numbered node.
-     *
-     * @return sequential node number
-     */
-  public int joinElection(ElectionContext context, boolean replacement,boolean joinAtHead) throws KeeperException, InterruptedException, IOException {
-    context.joinedElectionFired();
-    
-    final String shardsElectZkPath = context.electionPath + LeaderElector.ELECTION_NODE;
-    
-    long sessionId = zkClient.getSolrZooKeeper().getSessionId();
-    String id = sessionId + "-" + context.id;
-    String leaderSeqPath = null;
-    boolean cont = true;
-    int tries = 0;
-    while (cont) {
-      try {
-        if(joinAtHead){
-          log.debug("Node {} trying to join election at the head", id);
-          List<String> nodes = OverseerTaskProcessor.getSortedElectionNodes(zkClient, shardsElectZkPath);
-          if(nodes.size() <2){
-            leaderSeqPath = zkClient.create(shardsElectZkPath + "/" + id + "-n_", null,
-                CreateMode.EPHEMERAL_SEQUENTIAL, false);
-          } else {
-            String firstInLine = nodes.get(1);
-            log.debug("The current head: {}", firstInLine);
-            Matcher m = LEADER_SEQ.matcher(firstInLine);
-            if (!m.matches()) {
-              throw new IllegalStateException("Could not find regex match in:"
-                  + firstInLine);
-            }
-            leaderSeqPath = shardsElectZkPath + "/" + id + "-n_"+ m.group(1);
-            zkClient.create(leaderSeqPath, null, CreateMode.EPHEMERAL, false);
-          }
-        } else {
-          leaderSeqPath = zkClient.create(shardsElectZkPath + "/" + id + "-n_", null,
-              CreateMode.EPHEMERAL_SEQUENTIAL, false);
-        }
-
-        log.debug("Joined leadership election with path: {}", leaderSeqPath);
-        context.leaderSeqPath = leaderSeqPath;
-        cont = false;
-      } catch (ConnectionLossException e) {
-        // we don't know if we made our node or not...
-        List<String> entries = zkClient.getChildren(shardsElectZkPath, null, true);
-        
-        boolean foundId = false;
-        for (String entry : entries) {
-          String nodeId = getNodeId(entry);
-          if (id.equals(nodeId)) {
-            // we did create our node...
-            foundId  = true;
-            break;
-          }
-        }
-        if (!foundId) {
-          cont = true;
-          if (tries++ > 20) {
-            throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-                "", e);
-          }
-          try {
-            Thread.sleep(50);
-          } catch (InterruptedException e2) {
-            Thread.currentThread().interrupt();
-          }
-        }
-
-      } catch (KeeperException.NoNodeException e) {
-        // we must have failed in creating the election node - someone else must
-        // be working on it, lets try again
-        if (tries++ > 20) {
-          context = null;
-          throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-              "", e);
-        }
-        cont = true;
-        try {
-          Thread.sleep(50);
-        } catch (InterruptedException e2) {
-          Thread.currentThread().interrupt();
-        }
-      }
-    }
-    checkIfIamLeader(context, replacement);
-
-    return getSeq(context.leaderSeqPath);
-  }
-
-  private class ElectionWatcher implements Watcher {
-    final String myNode,watchedNode;
-    final ElectionContext context;
-
-    private boolean canceled = false;
-
-    private ElectionWatcher(String myNode, String watchedNode, int seq, ElectionContext context) {
-      this.myNode = myNode;
-      this.watchedNode = watchedNode;
-      this.context = context;
-    }
-
-    void cancel() {
-      canceled = true;
-
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-      // session events are not change events, and do not remove the watcher
-      if (EventType.None.equals(event.getType())) {
-        return;
-      }
-      if (canceled) {
-        log.debug("This watcher is not active anymore {}", myNode);
-        try {
-          zkClient.delete(myNode, -1, true);
-        } catch (KeeperException.NoNodeException nne) {
-          // expected . don't do anything
-        } catch (Exception e) {
-          log.warn("My watched node still exists and can't remove " + myNode, e);
-        }
-        return;
-      }
-      try {
-        // am I the next leader?
-        checkIfIamLeader(context, true);
-      } catch (Exception e) {
-        if (!zkClient.isClosed()) {
-          log.warn("", e);
-        }
-      }
-    }
-  }
-
-  /**
-   * Set up any ZooKeeper nodes needed for leader election.
-   */
-  public void setup(final ElectionContext context) throws InterruptedException,
-      KeeperException {
-    String electZKPath = context.electionPath + LeaderElector.ELECTION_NODE;
-    if (context instanceof OverseerElectionContext) {
-      zkCmdExecutor.ensureExists(electZKPath, zkClient);
-    } else {
-      // we use 2 param so that replica won't create /collection/{collection} if it doesn't exist
-      zkCmdExecutor.ensureExists(electZKPath, (byte[])null, CreateMode.PERSISTENT, zkClient, 2);
-    }
-
-    this.context = context;
-  }
-  
-  /**
-   * Sort n string sequence list.
-   */
-  public static void sortSeqs(List<String> seqs) {
-    Collections.sort(seqs, (o1, o2) -> {
-      int i = getSeq(o1) - getSeq(o2);
-      return i == 0 ? o1.compareTo(o2) : i;
-    });
-  }
-
-  void retryElection(ElectionContext context, boolean joinAtHead) throws KeeperException, InterruptedException, IOException {
-    ElectionWatcher watcher = this.watcher;
-    ElectionContext ctx = context.copy();
-    if (electionContexts != null) {
-      electionContexts.put(contextKey, ctx);
-    }
-    if (watcher != null) watcher.cancel();
-    this.context.cancelElection();
-    this.context.close();
-    this.context = ctx;
-    joinElection(ctx, true, joinAtHead);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/LockTree.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/LockTree.java b/solr/core/src/java/org/apache/solr/cloud/LockTree.java
deleted file mode 100644
index af0d30e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/LockTree.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.cloud.OverseerMessageHandler.Lock;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CollectionParams.LockLevel;
-import org.apache.solr.common.util.StrUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This is a utility class that offers fine grained locking for various Collection Operations
- * This class is designed for single threaded operation. It's safe for multiple threads to use it
- * but internally it is synchronized so that only one thread can perform any operation.
- */
-public class LockTree {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final Node root = new Node(null, LockLevel.CLUSTER, null);
-
-  public void clear() {
-    synchronized (this) {
-      root.clear();
-    }
-  }
-
-  private class LockImpl implements Lock {
-    final Node node;
-
-    LockImpl( Node node) {
-      this.node = node;
-    }
-
-    @Override
-    public void unlock() {
-      synchronized (LockTree.this) {
-        node.unlock(this);
-      }
-    }
-
-    @Override
-    public String toString() {
-      return StrUtils.join(node.constructPath(new LinkedList<>()), '/');
-    }
-  }
-
-
-  public class Session {
-    private SessionNode root = new SessionNode(LockLevel.CLUSTER);
-
-    public Lock lock(CollectionParams.CollectionAction action, List<String> path) {
-      synchronized (LockTree.this) {
-        if (action.lockLevel == LockLevel.NONE) return FREELOCK;
-        if (root.isBusy(action.lockLevel, path)) return null;
-        Lock lockObject = LockTree.this.root.lock(action.lockLevel, path);
-        if (lockObject == null) root.markBusy(path, 0);
-        return lockObject;
-      }
-    }
-  }
-
-  private static class SessionNode {
-    final LockLevel level;
-    Map<String, SessionNode> kids;
-    boolean busy = false;
-
-    SessionNode(LockLevel level) {
-      this.level = level;
-    }
-
-    void markBusy(List<String> path, int depth) {
-      if (path.size() == depth) {
-        busy = true;
-      } else {
-        String s = path.get(depth);
-        if (kids == null) kids = new HashMap<>();
-        SessionNode node = kids.get(s);
-        if (node == null) kids.put(s, node = new SessionNode(level.getChild()));
-        node.markBusy(path, depth + 1);
-      }
-    }
-
-    boolean isBusy(LockLevel lockLevel, List<String> path) {
-      if (lockLevel.isHigherOrEqual(level)) {
-        if (busy) return true;
-        String s = path.get(level.level);
-        if (kids == null || kids.get(s) == null) return false;
-        return kids.get(s).isBusy(lockLevel, path);
-      } else {
-        return false;
-      }
-    }
-  }
-
-  public Session getSession() {
-    return new Session();
-  }
-
-  private class Node {
-    final String name;
-    final Node mom;
-    final LockLevel level;
-    HashMap<String, Node> children = new HashMap<>();
-    LockImpl myLock;
-
-    Node(String name, LockLevel level, Node mom) {
-      this.name = name;
-      this.level = level;
-      this.mom = mom;
-    }
-
-    //if this or any of its children are locked
-    boolean isLocked() {
-      if (myLock != null) return true;
-      for (Node node : children.values()) if (node.isLocked()) return true;
-      return false;
-    }
-
-
-    void unlock(LockImpl lockObject) {
-      if (myLock == lockObject) myLock = null;
-      else {
-        log.info("Unlocked multiple times : {}", lockObject.toString());
-      }
-    }
-
-
-    Lock lock(LockLevel lockLevel, List<String> path) {
-      if (myLock != null) return null;//I'm already locked. no need to go any further
-      if (lockLevel == level) {
-        //lock is supposed to be acquired at this level
-        //If I am locked or any of my children or grandchildren are locked
-        // it is not possible to acquire a lock
-        if (isLocked()) return null;
-        return myLock = new LockImpl(this);
-      } else {
-        String childName = path.get(level.level);
-        Node child = children.get(childName);
-        if (child == null)
-          children.put(childName, child = new Node(childName, LockLevel.getLevel(level.level + 1), this));
-        return child.lock(lockLevel, path);
-      }
-    }
-
-    LinkedList<String> constructPath(LinkedList<String> collect) {
-      if (name != null) collect.addFirst(name);
-      if (mom != null) mom.constructPath(collect);
-      return collect;
-    }
-
-    void clear() {
-      if (myLock != null) {
-        log.warn("lock_is_leaked at" + constructPath(new LinkedList<>()));
-        myLock = null;
-      }
-      for (Node node : children.values()) node.clear();
-    }
-  }
-  static final Lock FREELOCK = () -> {};
-
-}


[44/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkController.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
deleted file mode 100644
index 5caad81..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ /dev/null
@@ -1,2590 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.lang.invoke.MethodHandles;
-import java.net.InetAddress;
-import java.net.NetworkInterface;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.net.URLEncoder;
-import java.net.UnknownHostException;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.google.common.base.Strings;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.cloud.overseer.SliceMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.BeforeReconnect;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DefaultConnectionStrategy;
-import org.apache.solr.common.cloud.DefaultZkACLProvider;
-import org.apache.solr.common.cloud.DefaultZkCredentialsProvider;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.LiveNodesListener;
-import org.apache.solr.common.cloud.OnReconnect;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.Type;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkACLProvider;
-import org.apache.solr.common.cloud.ZkCmdExecutor;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkCredentialsProvider;
-import org.apache.solr.common.cloud.ZkMaintenanceUtils;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.ObjectReleaseTracker;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.URLUtil;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CloseHook;
-import org.apache.solr.core.CloudConfig;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrCoreInitializationException;
-import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.servlet.SolrDispatchFilter;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.RefCounted;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.apache.zookeeper.KeeperException.SessionExpiredException;
-import org.apache.zookeeper.Op;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-
-/**
- * Handle ZooKeeper interactions.
- * <p>
- * notes: loads everything on init, creates what's not there - further updates
- * are prompted with Watches.
- * <p>
- * TODO: exceptions during close on attempts to update cloud state
- */
-public class ZkController {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  static final int WAIT_DOWN_STATES_TIMEOUT_SECONDS = 60;
-
-  private final boolean SKIP_AUTO_RECOVERY = Boolean.getBoolean("solrcloud.skip.autorecovery");
-
-  private final ZkDistributedQueue overseerJobQueue;
-  private final OverseerTaskQueue overseerCollectionQueue;
-  private final OverseerTaskQueue overseerConfigSetQueue;
-
-  private final DistributedMap overseerRunningMap;
-  private final DistributedMap overseerCompletedMap;
-  private final DistributedMap overseerFailureMap;
-  private final DistributedMap asyncIdsMap;
-
-  public final static String COLLECTION_PARAM_PREFIX = "collection.";
-  public final static String CONFIGNAME_PROP = "configName";
-
-  static class ContextKey {
-
-    private String collection;
-    private String coreNodeName;
-
-    public ContextKey(String collection, String coreNodeName) {
-      this.collection = collection;
-      this.coreNodeName = coreNodeName;
-    }
-
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result
-          + ((collection == null) ? 0 : collection.hashCode());
-      result = prime * result
-          + ((coreNodeName == null) ? 0 : coreNodeName.hashCode());
-      return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj) return true;
-      if (obj == null) return false;
-      if (getClass() != obj.getClass()) return false;
-      ContextKey other = (ContextKey) obj;
-      if (collection == null) {
-        if (other.collection != null) return false;
-      } else if (!collection.equals(other.collection)) return false;
-      if (coreNodeName == null) {
-        if (other.coreNodeName != null) return false;
-      } else if (!coreNodeName.equals(other.coreNodeName)) return false;
-      return true;
-    }
-  }
-
-  private final Map<ContextKey, ElectionContext> electionContexts = Collections.synchronizedMap(new HashMap<>());
-
-  private final SolrZkClient zkClient;
-  public final ZkStateReader zkStateReader;
-  private SolrCloudManager cloudManager;
-  private CloudSolrClient cloudSolrClient;
-
-  private final String zkServerAddress;          // example: 127.0.0.1:54062/solr
-
-  private final int localHostPort;      // example: 54065
-  private final String hostName;           // example: 127.0.0.1
-  private final String nodeName;           // example: 127.0.0.1:54065_solr
-  private String baseURL;            // example: http://127.0.0.1:54065/solr
-
-  private final CloudConfig cloudConfig;
-
-  private LeaderElector overseerElector;
-
-  private Map<String, ReplicateFromLeader> replicateFromLeaders = new ConcurrentHashMap<>();
-  private final Map<String, ZkCollectionTerms> collectionToTerms = new HashMap<>();
-
-  // for now, this can be null in tests, in which case recovery will be inactive, and other features
-  // may accept defaults or use mocks rather than pulling things from a CoreContainer
-  private CoreContainer cc;
-
-  protected volatile Overseer overseer;
-
-  private int leaderVoteWait;
-  private int leaderConflictResolveWait;
-
-  private boolean genericCoreNodeNames;
-
-  private int clientTimeout;
-
-  private volatile boolean isClosed;
-
-  private final ConcurrentHashMap<String, Throwable> replicasMetTragicEvent = new ConcurrentHashMap<>();
-
-  @Deprecated
-  // keeps track of replicas that have been asked to recover by leaders running on this node
-  private final Map<String, String> replicasInLeaderInitiatedRecovery = new HashMap<String, String>();
-
-  // This is an expert and unsupported development mode that does not create
-  // an Overseer or register a /live node. This let's you monitor the cluster
-  // and interact with zookeeper via the Solr admin UI on a node outside the cluster,
-  // and so one that will not be killed or stopped when testing. See developer cloud-scripts.
-  private boolean zkRunOnly = Boolean.getBoolean("zkRunOnly"); // expert
-
-  // keeps track of a list of objects that need to know a new ZooKeeper session was created after expiration occurred
-  // ref is held as a HashSet since we clone the set before notifying to avoid synchronizing too long
-  private HashSet<OnReconnect> reconnectListeners = new HashSet<OnReconnect>();
-
-  private class RegisterCoreAsync implements Callable {
-
-    CoreDescriptor descriptor;
-    boolean recoverReloadedCores;
-    boolean afterExpiration;
-
-    RegisterCoreAsync(CoreDescriptor descriptor, boolean recoverReloadedCores, boolean afterExpiration) {
-      this.descriptor = descriptor;
-      this.recoverReloadedCores = recoverReloadedCores;
-      this.afterExpiration = afterExpiration;
-    }
-
-    public Object call() throws Exception {
-      log.info("Registering core {} afterExpiration? {}", descriptor.getName(), afterExpiration);
-      register(descriptor.getName(), descriptor, recoverReloadedCores, afterExpiration, false);
-      return descriptor;
-    }
-  }
-
-  // notifies registered listeners after the ZK reconnect in the background
-  private static class OnReconnectNotifyAsync implements Callable {
-
-    private final OnReconnect listener;
-
-    OnReconnectNotifyAsync(OnReconnect listener) {
-      this.listener = listener;
-    }
-
-    @Override
-    public Object call() throws Exception {
-      listener.command();
-      return null;
-    }
-  }
-
-  public ZkController(final CoreContainer cc, String zkServerAddress, int zkClientConnectTimeout, CloudConfig cloudConfig, final CurrentCoreDescriptorProvider registerOnReconnect)
-      throws InterruptedException, TimeoutException, IOException {
-
-    if (cc == null) throw new IllegalArgumentException("CoreContainer cannot be null.");
-    this.cc = cc;
-
-    this.cloudConfig = cloudConfig;
-
-    this.genericCoreNodeNames = cloudConfig.getGenericCoreNodeNames();
-
-    // be forgiving and strip this off leading/trailing slashes
-    // this allows us to support users specifying hostContext="/" in 
-    // solr.xml to indicate the root context, instead of hostContext="" 
-    // which means the default of "solr"
-    String localHostContext = trimLeadingAndTrailingSlashes(cloudConfig.getSolrHostContext());
-
-    this.zkServerAddress = zkServerAddress;
-    this.localHostPort = cloudConfig.getSolrHostPort();
-    this.hostName = normalizeHostName(cloudConfig.getHost());
-    this.nodeName = generateNodeName(this.hostName, Integer.toString(this.localHostPort), localHostContext);
-    MDCLoggingContext.setNode(nodeName);
-    this.leaderVoteWait = cloudConfig.getLeaderVoteWait();
-    this.leaderConflictResolveWait = cloudConfig.getLeaderConflictResolveWait();
-
-    this.clientTimeout = cloudConfig.getZkClientTimeout();
-    DefaultConnectionStrategy strat = new DefaultConnectionStrategy();
-    String zkACLProviderClass = cloudConfig.getZkACLProviderClass();
-    ZkACLProvider zkACLProvider = null;
-    if (zkACLProviderClass != null && zkACLProviderClass.trim().length() > 0) {
-      zkACLProvider = cc.getResourceLoader().newInstance(zkACLProviderClass, ZkACLProvider.class);
-    } else {
-      zkACLProvider = new DefaultZkACLProvider();
-    }
-
-    String zkCredentialsProviderClass = cloudConfig.getZkCredentialsProviderClass();
-    if (zkCredentialsProviderClass != null && zkCredentialsProviderClass.trim().length() > 0) {
-      strat.setZkCredentialsToAddAutomatically(cc.getResourceLoader().newInstance(zkCredentialsProviderClass, ZkCredentialsProvider.class));
-    } else {
-      strat.setZkCredentialsToAddAutomatically(new DefaultZkCredentialsProvider());
-    }
-    addOnReconnectListener(getConfigDirListener());
-
-    zkClient = new SolrZkClient(zkServerAddress, clientTimeout, zkClientConnectTimeout, strat,
-        // on reconnect, reload cloud info
-        new OnReconnect() {
-
-          @Override
-          public void command() {
-            log.info("ZooKeeper session re-connected ... refreshing core states after session expiration.");
-            clearZkCollectionTerms();
-            try {
-              zkStateReader.createClusterStateWatchersAndUpdate();
-
-              // this is troublesome - we dont want to kill anything the old
-              // leader accepted
-              // though I guess sync will likely get those updates back? But
-              // only if
-              // he is involved in the sync, and he certainly may not be
-              // ExecutorUtil.shutdownAndAwaitTermination(cc.getCmdDistribExecutor());
-              // we need to create all of our lost watches
-
-              // seems we dont need to do this again...
-              // Overseer.createClientNodes(zkClient, getNodeName());
-              
-              // start the overseer first as following code may need it's processing
-              if (!zkRunOnly) {
-                ElectionContext context = new OverseerElectionContext(zkClient,
-                    overseer, getNodeName());
-
-                ElectionContext prevContext = overseerElector.getContext();
-                if (prevContext != null) {
-                  prevContext.cancelElection();
-                  prevContext.close();
-                }
-
-                overseerElector.setup(context);
-                overseerElector.joinElection(context, true);
-              }
-
-              cc.cancelCoreRecoveries();
-
-              registerAllCoresAsDown(registerOnReconnect, false);
-
-              // we have to register as live first to pick up docs in the buffer
-              createEphemeralLiveNode();
-
-              List<CoreDescriptor> descriptors = registerOnReconnect.getCurrentDescriptors();
-              // re register all descriptors
-              ExecutorService executorService = (cc != null) ? cc.getCoreZkRegisterExecutorService() : null;
-              if (descriptors != null) {
-                for (CoreDescriptor descriptor : descriptors) {
-                  // TODO: we need to think carefully about what happens when it
-                  // was
-                  // a leader that was expired - as well as what to do about
-                  // leaders/overseers
-                  // with connection loss
-                  try {
-                    // unload solrcores that have been 'failed over'
-                    throwErrorIfReplicaReplaced(descriptor);
-
-                    if (executorService != null) {
-                      executorService.submit(new RegisterCoreAsync(descriptor, true, true));
-                    } else {
-                      register(descriptor.getName(), descriptor, true, true, false);
-                    }
-                  } catch (Exception e) {
-                    SolrException.log(log, "Error registering SolrCore", e);
-                  }
-                }
-              }
-
-              // notify any other objects that need to know when the session was re-connected
-              HashSet<OnReconnect> clonedListeners;
-              synchronized (reconnectListeners) {
-                clonedListeners = (HashSet<OnReconnect>)reconnectListeners.clone();
-              }
-              // the OnReconnect operation can be expensive per listener, so do that async in the background
-              for (OnReconnect listener : clonedListeners) {
-                try {
-                  if (executorService != null) {
-                    executorService.submit(new OnReconnectNotifyAsync(listener));
-                  } else {
-                    listener.command();
-                  }
-                } catch (Exception exc) {
-                  // not much we can do here other than warn in the log
-                  log.warn("Error when notifying OnReconnect listener " + listener + " after session re-connected.", exc);
-                }
-              }
-            } catch (InterruptedException e) {
-              // Restore the interrupted status
-              Thread.currentThread().interrupt();
-              throw new ZooKeeperException(
-                  SolrException.ErrorCode.SERVER_ERROR, "", e);
-            } catch (Exception e) {
-              SolrException.log(log, "", e);
-              throw new ZooKeeperException(
-                  SolrException.ErrorCode.SERVER_ERROR, "", e);
-            }
-          }
-
-        }, new BeforeReconnect() {
-
-      @Override
-      public void command() {
-        try {
-          ZkController.this.overseer.close();
-        } catch (Exception e) {
-          log.error("Error trying to stop any Overseer threads", e);
-        }
-        closeOutstandingElections(registerOnReconnect);
-        markAllAsNotLeader(registerOnReconnect);
-      }
-    }, zkACLProvider);
-
-    this.overseerJobQueue = Overseer.getStateUpdateQueue(zkClient);
-    this.overseerCollectionQueue = Overseer.getCollectionQueue(zkClient);
-    this.overseerConfigSetQueue = Overseer.getConfigSetQueue(zkClient);
-    this.overseerRunningMap = Overseer.getRunningMap(zkClient);
-    this.overseerCompletedMap = Overseer.getCompletedMap(zkClient);
-    this.overseerFailureMap = Overseer.getFailureMap(zkClient);
-    this.asyncIdsMap = Overseer.getAsyncIdsMap(zkClient);
-
-    zkStateReader = new ZkStateReader(zkClient, () -> {
-      if (cc != null) cc.securityNodeChanged();
-    });
-
-    init(registerOnReconnect);
-
-    assert ObjectReleaseTracker.track(this);
-  }
-
-  public int getLeaderVoteWait() {
-    return leaderVoteWait;
-  }
-
-  public int getLeaderConflictResolveWait() {
-    return leaderConflictResolveWait;
-  }
-
-  private void registerAllCoresAsDown(
-      final CurrentCoreDescriptorProvider registerOnReconnect, boolean updateLastPublished) {
-    List<CoreDescriptor> descriptors = registerOnReconnect
-        .getCurrentDescriptors();
-    if (isClosed) return;
-    if (descriptors != null) {
-      // before registering as live, make sure everyone is in a
-      // down state
-      publishNodeAsDown(getNodeName()); 
-      for (CoreDescriptor descriptor : descriptors) {
-        // if it looks like we are going to be the leader, we don't
-        // want to wait for the following stuff
-        CloudDescriptor cloudDesc = descriptor.getCloudDescriptor();
-        String collection = cloudDesc.getCollectionName();
-        String slice = cloudDesc.getShardId();
-        try {
-
-          int children = zkStateReader
-              .getZkClient()
-              .getChildren(
-                  ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection
-                      + "/leader_elect/" + slice + "/election", null, true).size();
-          if (children == 0) {
-            log.debug("looks like we are going to be the leader for collection {} shard {}", collection, slice);
-            continue;
-          }
-
-        } catch (NoNodeException e) {
-          log.debug("looks like we are going to be the leader for collection {} shard {}", collection, slice);
-          continue;
-        } catch (InterruptedException e2) {
-          Thread.currentThread().interrupt();
-        } catch (KeeperException e) {
-          log.warn("", e);
-          Thread.currentThread().interrupt();
-        }
-
-        final String coreZkNodeName = descriptor.getCloudDescriptor().getCoreNodeName();
-        try {
-          log.debug("calling waitForLeaderToSeeDownState for coreZkNodeName={} collection={} shard={}", new Object[]{coreZkNodeName, collection, slice});
-          waitForLeaderToSeeDownState(descriptor, coreZkNodeName);
-        } catch (Exception e) {
-          SolrException.log(log, "", e);
-          if (isClosed) {
-            return;
-          }
-        }
-      }
-    }
-  }
-  
-  private void closeOutstandingElections(final CurrentCoreDescriptorProvider registerOnReconnect) {
-    
-    List<CoreDescriptor> descriptors = registerOnReconnect.getCurrentDescriptors();
-    if (descriptors != null) {
-      for (CoreDescriptor descriptor : descriptors) {
-        closeExistingElectionContext(descriptor);
-      }
-    }
-  }
-  
-  private ContextKey closeExistingElectionContext(CoreDescriptor cd) {
-    // look for old context - if we find it, cancel it
-    String collection = cd.getCloudDescriptor().getCollectionName();
-    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
-    
-    ContextKey contextKey = new ContextKey(collection, coreNodeName);
-    ElectionContext prevContext = electionContexts.get(contextKey);
-    
-    if (prevContext != null) {
-      prevContext.close();
-      electionContexts.remove(contextKey);
-    }
-    
-    return contextKey;
-  }
-
-  private void markAllAsNotLeader(
-      final CurrentCoreDescriptorProvider registerOnReconnect) {
-    List<CoreDescriptor> descriptors = registerOnReconnect
-        .getCurrentDescriptors();
-    if (descriptors != null) {
-      for (CoreDescriptor descriptor : descriptors) {
-        descriptor.getCloudDescriptor().setLeader(false);
-        descriptor.getCloudDescriptor().setHasRegistered(false);
-      }
-    }
-  }
-
-  /**
-   * Closes the underlying ZooKeeper client.
-   */
-  public void close() {
-    this.isClosed = true;
-    synchronized (collectionToTerms) {
-      collectionToTerms.values().forEach(ZkCollectionTerms::close);
-    }
-    try {
-      for (ElectionContext context : electionContexts.values()) {
-        try {
-          context.close();
-        } catch (Exception e) {
-          log.error("Error closing overseer", e);
-        }
-      }
-    } finally {
-      try {
-        IOUtils.closeQuietly(overseerElector.getContext());
-        IOUtils.closeQuietly(overseer);
-      } finally {
-        if (cloudSolrClient != null) {
-          IOUtils.closeQuietly(cloudSolrClient);
-        }
-        if (cloudManager != null) {
-          IOUtils.closeQuietly(cloudManager);
-        }
-        try {
-          try {
-            zkStateReader.close();
-          } catch (Exception e) {
-            log.error("Error closing zkStateReader", e);
-          }
-        } finally {
-          try {
-            zkClient.close();
-          } catch (Exception e) {
-            log.error("Error closing zkClient", e);
-          }
-        }
-      }
-    }
-    assert ObjectReleaseTracker.release(this);
-  }
-
-  public void giveupLeadership(CoreDescriptor cd, Throwable tragicException) {
-    DocCollection dc = getClusterState().getCollectionOrNull(cd.getCollectionName());
-    if (dc == null) return;
-
-    Slice shard = dc.getSlice(cd.getCloudDescriptor().getShardId());
-    if (shard == null) return;
-
-    // if this replica is not a leader, it will be put in recovery state by the leader
-    if (shard.getReplica(cd.getCloudDescriptor().getCoreNodeName()) != shard.getLeader()) return;
-
-    int numActiveReplicas = shard.getReplicas(
-        rep -> rep.getState() == Replica.State.ACTIVE
-            && rep.getType() != Type.PULL
-            && getClusterState().getLiveNodes().contains(rep.getNodeName())
-    ).size();
-
-    // at least the leader still be able to search, we should give up leadership if other replicas can take over
-    if (numActiveReplicas >= 2) {
-      String key = cd.getCollectionName() + ":" + cd.getCloudDescriptor().getCoreNodeName();
-      //TODO better handling the case when delete replica was failed
-      if (replicasMetTragicEvent.putIfAbsent(key, tragicException) == null) {
-        log.warn("Leader {} met tragic exception, give up its leadership", key, tragicException);
-        try {
-          // by using Overseer to remove and add replica back, we can do the task in an async/robust manner
-          Map<String,Object> props = new HashMap<>();
-          props.put(Overseer.QUEUE_OPERATION, "deletereplica");
-          props.put(COLLECTION_PROP, cd.getCollectionName());
-          props.put(SHARD_ID_PROP, shard.getName());
-          props.put(REPLICA_PROP, cd.getCloudDescriptor().getCoreNodeName());
-          getOverseerCollectionQueue().offer(Utils.toJSON(new ZkNodeProps(props)));
-
-          props.clear();
-          props.put(Overseer.QUEUE_OPERATION, "addreplica");
-          props.put(COLLECTION_PROP, cd.getCollectionName());
-          props.put(SHARD_ID_PROP, shard.getName());
-          props.put(ZkStateReader.REPLICA_TYPE, cd.getCloudDescriptor().getReplicaType().name().toUpperCase(Locale.ROOT));
-          props.put(CoreAdminParams.NODE, getNodeName());
-          getOverseerCollectionQueue().offer(Utils.toJSON(new ZkNodeProps(props)));
-        } catch (KeeperException e) {
-          log.info("Met exception on give up leadership for {}", key, e);
-          replicasMetTragicEvent.remove(key);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          log.info("Met exception on give up leadership for {}", key, e);
-          replicasMetTragicEvent.remove(key);
-        }
-      }
-    }
-  }
-
-
-  /**
-   * Returns true if config file exists
-   */
-  public boolean configFileExists(String collection, String fileName)
-      throws KeeperException, InterruptedException {
-    Stat stat = zkClient.exists(ZkConfigManager.CONFIGS_ZKNODE + "/" + collection + "/" + fileName, null, true);
-    return stat != null;
-  }
-
-  /**
-   * @return information about the cluster from ZooKeeper
-   */
-  public ClusterState getClusterState() {
-    return zkStateReader.getClusterState();
-  }
-
-  public SolrCloudManager getSolrCloudManager() {
-    if (cloudManager != null) {
-      return cloudManager;
-    }
-    synchronized(this) {
-      if (cloudManager != null) {
-        return cloudManager;
-      }
-      cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkServerAddress), Optional.empty())
-          .withHttpClient(cc.getUpdateShardHandler().getDefaultHttpClient()).build();
-      cloudManager = new SolrClientCloudManager(new ZkDistributedQueueFactory(zkClient), cloudSolrClient);
-    }
-    return cloudManager;
-  }
-
-  /**
-   * Returns config file data (in bytes)
-   */
-  public byte[] getConfigFileData(String zkConfigName, String fileName)
-      throws KeeperException, InterruptedException {
-    String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + zkConfigName + "/" + fileName;
-    byte[] bytes = zkClient.getData(zkPath, null, null, true);
-    if (bytes == null) {
-      log.error("Config file contains no data:" + zkPath);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-          "Config file contains no data:" + zkPath);
-    }
-
-    return bytes;
-  }
-
-  // normalize host removing any url scheme.
-  // input can be null, host, or url_prefix://host
-  private String normalizeHostName(String host) throws IOException {
-
-    if (host == null || host.length() == 0) {
-      String hostaddress;
-      try {
-        hostaddress = InetAddress.getLocalHost().getHostAddress();
-      } catch (UnknownHostException e) {
-        hostaddress = "127.0.0.1"; // cannot resolve system hostname, fall through
-      }
-      // Re-get the IP again for "127.0.0.1", the other case we trust the hosts
-      // file is right.
-      if ("127.0.0.1".equals(hostaddress)) {
-        Enumeration<NetworkInterface> netInterfaces = null;
-        try {
-          netInterfaces = NetworkInterface.getNetworkInterfaces();
-          while (netInterfaces.hasMoreElements()) {
-            NetworkInterface ni = netInterfaces.nextElement();
-            Enumeration<InetAddress> ips = ni.getInetAddresses();
-            while (ips.hasMoreElements()) {
-              InetAddress ip = ips.nextElement();
-              if (ip.isSiteLocalAddress()) {
-                hostaddress = ip.getHostAddress();
-              }
-            }
-          }
-        } catch (Exception e) {
-          SolrException.log(log,
-              "Error while looking for a better host name than 127.0.0.1", e);
-        }
-      }
-      host = hostaddress;
-    } else {
-      if (URLUtil.hasScheme(host)) {
-        host = URLUtil.removeScheme(host);
-      }
-    }
-
-    return host;
-  }
-
-  public String getHostName() {
-    return hostName;
-  }
-
-  public int getHostPort() {
-    return localHostPort;
-  }
-
-  public SolrZkClient getZkClient() {
-    return zkClient;
-  }
-
-  /**
-   * @return zookeeper server address
-   */
-  public String getZkServerAddress() {
-    return zkServerAddress;
-  }
-
-  boolean isClosed() {
-    return isClosed;
-  }
-
-  /**
-   * Create the zknodes necessary for a cluster to operate
-   *
-   * @param zkClient a SolrZkClient
-   * @throws KeeperException      if there is a Zookeeper error
-   * @throws InterruptedException on interrupt
-   */
-  public static void createClusterZkNodes(SolrZkClient zkClient) throws KeeperException, InterruptedException, IOException {
-    ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zkClient.getZkClientTimeout());
-    cmdExecutor.ensureExists(ZkStateReader.LIVE_NODES_ZKNODE, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.COLLECTIONS_ZKNODE, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.ALIASES, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH, zkClient);
-    byte[] emptyJson = "{}".getBytes(StandardCharsets.UTF_8);
-    cmdExecutor.ensureExists(ZkStateReader.CLUSTER_STATE, emptyJson, CreateMode.PERSISTENT, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.SOLR_SECURITY_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient);
-    cmdExecutor.ensureExists(ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, emptyJson, CreateMode.PERSISTENT, zkClient);
-   bootstrapDefaultConfigSet(zkClient);
-  }
-
-  private static void bootstrapDefaultConfigSet(SolrZkClient zkClient) throws KeeperException, InterruptedException, IOException {
-    if (zkClient.exists("/configs/_default", true) == false) {
-      String configDirPath = getDefaultConfigDirPath();
-      if (configDirPath == null) {
-        log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset" +
-            " intended to be the default. Current 'solr.default.confdir' value: {}", System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
-      } else {
-        ZkMaintenanceUtils.upConfig(zkClient, Paths.get(configDirPath), ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-      }
-    }
-  }
-
-  /**
-   * Gets the absolute filesystem path of the _default configset to bootstrap from.
-   * First tries the sysprop "solr.default.confdir". If not found, tries to find
-   * the _default dir relative to the sysprop "solr.install.dir".
-   * If that fails as well (usually for unit tests), tries to get the _default from the
-   * classpath. Returns null if not found anywhere.
-   */
-  private static String getDefaultConfigDirPath() {
-    String configDirPath = null;
-    String serverSubPath = "solr" + File.separator +
-        "configsets" + File.separator + "_default" +
-        File.separator + "conf";
-    String subPath = File.separator + "server" + File.separator + serverSubPath;
-    if (System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE) != null && new File(System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)).exists()) {
-      configDirPath = new File(System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)).getAbsolutePath();
-    } else if (System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) != null &&
-        new File(System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) + subPath).exists()) {
-      configDirPath = new File(System.getProperty(SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE) + subPath).getAbsolutePath();
-    } else { // find "_default" in the classpath. This one is used for tests
-      configDirPath = getDefaultConfigDirFromClasspath(serverSubPath);
-    }
-    return configDirPath;
-  }
-
-  private static String getDefaultConfigDirFromClasspath(String serverSubPath) {
-    URL classpathUrl = ZkController.class.getClassLoader().getResource(serverSubPath);
-    try {
-      if (classpathUrl != null && new File(classpathUrl.toURI()).exists()) {
-        return new File(classpathUrl.toURI()).getAbsolutePath();
-      }
-    } catch (URISyntaxException ex) {}
-    return null;
-  }
-
-  private void init(CurrentCoreDescriptorProvider registerOnReconnect) {
-
-    try {
-      createClusterZkNodes(zkClient);
-      zkStateReader.createClusterStateWatchersAndUpdate();
-      this.baseURL = zkStateReader.getBaseUrlForNodeName(this.nodeName);
-
-      checkForExistingEphemeralNode();
-      registerLiveNodesListener();
-
-      // start the overseer first as following code may need it's processing
-      if (!zkRunOnly) {
-        overseerElector = new LeaderElector(zkClient);
-        this.overseer = new Overseer(cc.getShardHandlerFactory().getShardHandler(), cc.getUpdateShardHandler(),
-            CommonParams.CORES_HANDLER_PATH, zkStateReader, this, cloudConfig);
-        ElectionContext context = new OverseerElectionContext(zkClient,
-            overseer, getNodeName());
-        overseerElector.setup(context);
-        overseerElector.joinElection(context, false);
-      }
-
-      Stat stat = zkClient.exists(ZkStateReader.LIVE_NODES_ZKNODE, null, true);
-      if (stat != null && stat.getNumChildren() > 0) {
-        publishAndWaitForDownStates();
-      }
-
-      // Do this last to signal we're up.
-      createEphemeralLiveNode();
-    } catch (IOException e) {
-      log.error("", e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Can't create ZooKeeperController", e);
-    } catch (InterruptedException e) {
-      // Restore the interrupted status
-      Thread.currentThread().interrupt();
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-          "", e);
-    } catch (KeeperException e) {
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-          "", e);
-    }
-
-  }
-
-  private void checkForExistingEphemeralNode() throws KeeperException, InterruptedException {
-    if (zkRunOnly) {
-      return;
-    }
-    String nodeName = getNodeName();
-    String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
-
-    if (!zkClient.exists(nodePath, true)) {
-      return;
-    }
-
-    final CountDownLatch deletedLatch = new CountDownLatch(1);
-    Stat stat = zkClient.exists(nodePath, event -> {
-      if (Watcher.Event.EventType.None.equals(event.getType())) {
-        return;
-      }
-      if (Watcher.Event.EventType.NodeDeleted.equals(event.getType())) {
-        deletedLatch.countDown();
-      }
-    }, true);
-
-    if (stat == null) {
-      // znode suddenly disappeared but that's okay
-      return;
-    }
-
-    boolean deleted = deletedLatch.await(zkClient.getSolrZooKeeper().getSessionTimeout() * 2, TimeUnit.MILLISECONDS);
-    if (!deleted) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "A previous ephemeral live node still exists. " +
-          "Solr cannot continue. Please ensure that no other Solr process using the same port is running already.");
-    }
-  }
-
-  private void registerLiveNodesListener() {
-    // this listener is used for generating nodeLost events, so we check only if
-    // some nodes went missing compared to last state
-    LiveNodesListener listener = (oldNodes, newNodes) -> {
-      oldNodes.removeAll(newNodes);
-      if (oldNodes.isEmpty()) { // only added nodes
-        return;
-      }
-      if (isClosed) {
-        return;
-      }
-      // if this node is in the top three then attempt to create nodeLost message
-      int i = 0;
-      for (String n : newNodes) {
-        if (n.equals(getNodeName())) {
-          break;
-        }
-        if (i > 2) {
-          return; // this node is not in the top three
-        }
-        i++;
-      }
-
-      // retrieve current trigger config - if there are no nodeLost triggers
-      // then don't create markers
-      boolean createNodes = false;
-      try {
-        createNodes = zkStateReader.getAutoScalingConfig().hasTriggerForEvents(TriggerEventType.NODELOST);
-      } catch (KeeperException | InterruptedException e1) {
-        log.warn("Unable to read autoscaling.json", e1);
-      }
-      if (createNodes) {
-        for (String n : oldNodes) {
-          String path = ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH + "/" + n;
-          try {
-            zkClient.create(path, null, CreateMode.PERSISTENT, true);
-          } catch (KeeperException.NodeExistsException e) {
-            // someone else already created this node - ignore
-          } catch (KeeperException | InterruptedException e1) {
-            log.warn("Unable to register nodeLost path for " + n, e1);
-          }
-        }
-      }
-    };
-    zkStateReader.registerLiveNodesListener(listener);
-  }
-
-  public void publishAndWaitForDownStates() throws KeeperException,
-      InterruptedException {
-
-    publishNodeAsDown(getNodeName());
-
-    Set<String> collectionsWithLocalReplica = ConcurrentHashMap.newKeySet();
-    for (CoreDescriptor descriptor : cc.getCoreDescriptors()) {
-      collectionsWithLocalReplica.add(descriptor.getCloudDescriptor().getCollectionName());
-    }
-
-    CountDownLatch latch = new CountDownLatch(collectionsWithLocalReplica.size());
-    for (String collectionWithLocalReplica : collectionsWithLocalReplica) {
-      zkStateReader.registerCollectionStateWatcher(collectionWithLocalReplica, (liveNodes, collectionState) -> {
-        if (collectionState == null)  return false;
-        boolean foundStates = true;
-        for (CoreDescriptor coreDescriptor : cc.getCoreDescriptors()) {
-          if (coreDescriptor.getCloudDescriptor().getCollectionName().equals(collectionWithLocalReplica))  {
-            Replica replica = collectionState.getReplica(coreDescriptor.getCloudDescriptor().getCoreNodeName());
-            if (replica == null || replica.getState() != Replica.State.DOWN) {
-              foundStates = false;
-            }
-          }
-        }
-
-        if (foundStates && collectionsWithLocalReplica.remove(collectionWithLocalReplica))  {
-          latch.countDown();
-        }
-        return foundStates;
-      });
-    }
-
-    boolean allPublishedDown = latch.await(WAIT_DOWN_STATES_TIMEOUT_SECONDS, TimeUnit.SECONDS);
-    if (!allPublishedDown) {
-      log.warn("Timed out waiting to see all nodes published as DOWN in our cluster state.");
-    }
-  }
-
-  /**
-   * Validates if the chroot exists in zk (or if it is successfully created).
-   * Optionally, if create is set to true this method will create the path in
-   * case it doesn't exist
-   *
-   * @return true if the path exists or is created false if the path doesn't
-   * exist and 'create' = false
-   */
-  public static boolean checkChrootPath(String zkHost, boolean create)
-      throws KeeperException, InterruptedException {
-    if (!SolrZkClient.containsChroot(zkHost)) {
-      return true;
-    }
-    log.trace("zkHost includes chroot");
-    String chrootPath = zkHost.substring(zkHost.indexOf("/"), zkHost.length());
-
-    SolrZkClient tmpClient = new SolrZkClient(zkHost.substring(0,
-        zkHost.indexOf("/")), 60000, 30000, null, null, null);
-    boolean exists = tmpClient.exists(chrootPath, true);
-    if (!exists && create) {
-      tmpClient.makePath(chrootPath, false, true);
-      exists = true;
-    }
-    tmpClient.close();
-    return exists;
-  }
-
-  public boolean isConnected() {
-    return zkClient.isConnected();
-  }
-
-  private void createEphemeralLiveNode() throws KeeperException,
-      InterruptedException {
-    if (zkRunOnly) {
-      return;
-    }
-    String nodeName = getNodeName();
-    String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
-    String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
-    log.info("Register node as live in ZooKeeper:" + nodePath);
-    List<Op> ops = new ArrayList<>(2);
-    ops.add(Op.create(nodePath, null, zkClient.getZkACLProvider().getACLsToAdd(nodePath), CreateMode.EPHEMERAL));
-    // if there are nodeAdded triggers don't create nodeAdded markers
-    boolean createMarkerNode = zkStateReader.getAutoScalingConfig().hasTriggerForEvents(TriggerEventType.NODEADDED);
-    if (createMarkerNode && !zkClient.exists(nodeAddedPath, true)) {
-      // use EPHEMERAL so that it disappears if this node goes down
-      // and no other action is taken
-      ops.add(Op.create(nodeAddedPath, null, zkClient.getZkACLProvider().getACLsToAdd(nodeAddedPath), CreateMode.EPHEMERAL));
-    }
-    zkClient.multi(ops, true);
-  }
-
-  public void removeEphemeralLiveNode() throws KeeperException, InterruptedException {
-    if (zkRunOnly) {
-      return;
-    }
-    String nodeName = getNodeName();
-    String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
-    String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
-    log.info("Remove node as live in ZooKeeper:" + nodePath);
-    List<Op> ops = new ArrayList<>(2);
-    ops.add(Op.delete(nodePath, -1));
-    if (zkClient.exists(nodeAddedPath, true)) {
-      ops.add(Op.delete(nodeAddedPath, -1));
-    }
-    zkClient.multi(ops, true);
-  }
-
-  public String getNodeName() {
-    return nodeName;
-  }
-
-  /**
-   * Returns true if the path exists
-   */
-  public boolean pathExists(String path) throws KeeperException,
-      InterruptedException {
-    return zkClient.exists(path, true);
-  }
-
-
-  /**
-   * Register shard with ZooKeeper.
-   *
-   * @return the shardId for the SolrCore
-   */
-  public String register(String coreName, final CoreDescriptor desc, boolean skipRecovery) throws Exception {
-    return register(coreName, desc, false, false, skipRecovery);
-  }
-
-
-  /**
-   * Register shard with ZooKeeper.
-   *
-   * @return the shardId for the SolrCore
-   */
-  public String register(String coreName, final CoreDescriptor desc, boolean recoverReloadedCores,
-                         boolean afterExpiration, boolean skipRecovery) throws Exception {
-    try (SolrCore core = cc.getCore(desc.getName())) {
-      MDCLoggingContext.setCore(core);
-    }
-    try {
-      // pre register has published our down state
-      final String baseUrl = getBaseUrl();
-      final CloudDescriptor cloudDesc = desc.getCloudDescriptor();
-      final String collection = cloudDesc.getCollectionName();
-      final String shardId = cloudDesc.getShardId();
-      final String coreZkNodeName = cloudDesc.getCoreNodeName();
-      assert coreZkNodeName != null : "we should have a coreNodeName by now";
-
-      // check replica's existence in clusterstate first
-      try {
-        zkStateReader.waitForState(collection, Overseer.isLegacy(zkStateReader) ? 60000 : 100,
-            TimeUnit.MILLISECONDS, (liveNodes, collectionState) -> getReplicaOrNull(collectionState, shardId, coreZkNodeName) != null);
-      } catch (TimeoutException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Error registering SolrCore, timeout waiting for replica present in clusterstate");
-      }
-      Replica replica = getReplicaOrNull(zkStateReader.getClusterState().getCollectionOrNull(collection), shardId, coreZkNodeName);
-      if (replica == null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Error registering SolrCore, replica is removed from clusterstate");
-      }
-
-      ZkShardTerms shardTerms = getShardTerms(collection, cloudDesc.getShardId());
-
-      if (replica.getType() != Type.PULL) {
-        shardTerms.registerTerm(coreZkNodeName);
-      }
-
-      log.debug("Register replica - core:{} address:{} collection:{} shard:{}",
-          coreName, baseUrl, collection, shardId);
-
-      try {
-        // If we're a preferred leader, insert ourselves at the head of the queue
-        boolean joinAtHead = replica.getBool(SliceMutator.PREFERRED_LEADER_PROP, false);
-        if (replica.getType() != Type.PULL) {
-          joinElection(desc, afterExpiration, joinAtHead);
-        } else if (replica.getType() == Type.PULL) {
-          if (joinAtHead) {
-            log.warn("Replica {} was designated as preferred leader but it's type is {}, It won't join election", coreZkNodeName, Type.PULL);
-          }
-          log.debug("Replica {} skipping election because it's type is {}", coreZkNodeName, Type.PULL);
-          startReplicationFromLeader(coreName, false);
-        }
-      } catch (InterruptedException e) {
-        // Restore the interrupted status
-        Thread.currentThread().interrupt();
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-      } catch (KeeperException | IOException e) {
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-      }
-      
-      // in this case, we want to wait for the leader as long as the leader might
-      // wait for a vote, at least - but also long enough that a large cluster has
-      // time to get its act together
-      String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000);
-      
-      String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
-      log.debug("We are " + ourUrl + " and leader is " + leaderUrl);
-      boolean isLeader = leaderUrl.equals(ourUrl);
-      assert !(isLeader && replica.getType() == Type.PULL) : "Pull replica became leader!";
-
-      try (SolrCore core = cc.getCore(desc.getName())) {
-        
-        // recover from local transaction log and wait for it to complete before
-        // going active
-        // TODO: should this be moved to another thread? To recoveryStrat?
-        // TODO: should this actually be done earlier, before (or as part of)
-        // leader election perhaps?
-        
-        UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-        boolean isTlogReplicaAndNotLeader = replica.getType() == Replica.Type.TLOG && !isLeader;
-        if (isTlogReplicaAndNotLeader) {
-          String commitVersion = ReplicateFromLeader.getCommitVersion(core);
-          if (commitVersion != null) {
-            ulog.copyOverOldUpdates(Long.parseLong(commitVersion));
-          }
-        }
-        // we will call register again after zk expiration and on reload
-        if (!afterExpiration && !core.isReloaded() && ulog != null && !isTlogReplicaAndNotLeader) {
-          // disable recovery in case shard is in construction state (for shard splits)
-          Slice slice = getClusterState().getCollection(collection).getSlice(shardId);
-          if (slice.getState() != Slice.State.CONSTRUCTION || !isLeader) {
-            Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler().getUpdateLog().recoverFromLog();
-            if (recoveryFuture != null) {
-              log.info("Replaying tlog for " + ourUrl + " during startup... NOTE: This can take a while.");
-              recoveryFuture.get(); // NOTE: this could potentially block for
-              // minutes or more!
-              // TODO: public as recovering in the mean time?
-              // TODO: in the future we could do peersync in parallel with recoverFromLog
-            } else {
-              log.debug("No LogReplay needed for core={} baseURL={}", core.getName(), baseUrl);
-            }
-          }
-        }
-        boolean didRecovery
-            = checkRecovery(recoverReloadedCores, isLeader, skipRecovery, collection, coreZkNodeName, shardId, core, cc, afterExpiration);
-        if (!didRecovery) {
-          if (isTlogReplicaAndNotLeader) {
-            startReplicationFromLeader(coreName, true);
-          }
-          publish(desc, Replica.State.ACTIVE);
-        }
-
-        if (replica.getType() != Type.PULL) {
-          // the watcher is added to a set so multiple calls of this method will left only one watcher
-          shardTerms.addListener(new RecoveringCoreTermWatcher(core.getCoreDescriptor(), getCoreContainer()));
-        }
-        core.getCoreDescriptor().getCloudDescriptor().setHasRegistered(true);
-      } catch (Exception e) {
-        unregister(coreName, desc, false);
-        throw e;
-      }
-      
-      // make sure we have an update cluster state right away
-      zkStateReader.forceUpdateCollection(collection);
-      // the watcher is added to a set so multiple calls of this method will left only one watcher
-      zkStateReader.registerCollectionStateWatcher(cloudDesc.getCollectionName(),
-          new UnloadCoreOnDeletedWatcher(coreZkNodeName, shardId, desc.getName()));
-      return shardId;
-    } finally {
-      MDCLoggingContext.clear();
-    }
-  }
-
-  private Replica getReplicaOrNull(DocCollection docCollection, String shard, String coreNodeName) {
-    if (docCollection == null) return null;
-
-    Slice slice = docCollection.getSlice(shard);
-    if (slice == null) return null;
-
-    Replica replica = slice.getReplica(coreNodeName);
-    if (replica == null) return null;
-    if (!getNodeName().equals(replica.getNodeName())) return null;
-
-    return replica;
-  }
-
-  public void startReplicationFromLeader(String coreName, boolean switchTransactionLog) throws InterruptedException {
-    log.info("{} starting background replication from leader", coreName);
-    ReplicateFromLeader replicateFromLeader = new ReplicateFromLeader(cc, coreName);
-    synchronized (replicateFromLeader) { // synchronize to prevent any stop before we finish the start
-      if (replicateFromLeaders.putIfAbsent(coreName, replicateFromLeader) == null) {
-        replicateFromLeader.startReplication(switchTransactionLog);
-      } else {
-        log.warn("A replicate from leader instance already exists for core {}", coreName);
-      }
-    }
-  }
-
-  public void stopReplicationFromLeader(String coreName) {
-    log.info("{} stopping background replication from leader", coreName);
-    ReplicateFromLeader replicateFromLeader = replicateFromLeaders.remove(coreName);
-    if (replicateFromLeader != null) {
-      synchronized (replicateFromLeader) {
-        replicateFromLeader.stopReplication();
-      }
-    }
-  }
-
-  // timeoutms is the timeout for the first call to get the leader - there is then
-  // a longer wait to make sure that leader matches our local state
-  private String getLeader(final CloudDescriptor cloudDesc, int timeoutms) {
-
-    String collection = cloudDesc.getCollectionName();
-    String shardId = cloudDesc.getShardId();
-    // rather than look in the cluster state file, we go straight to the zknodes
-    // here, because on cluster restart there could be stale leader info in the
-    // cluster state node that won't be updated for a moment
-    String leaderUrl;
-    try {
-      leaderUrl = getLeaderProps(collection, cloudDesc.getShardId(), timeoutms)
-          .getCoreUrl();
-
-      // now wait until our currently cloud state contains the latest leader
-      String clusterStateLeaderUrl = zkStateReader.getLeaderUrl(collection,
-          shardId, timeoutms * 2); // since we found it in zk, we are willing to
-      // wait a while to find it in state
-      int tries = 0;
-      final long msInSec = 1000L;
-      int maxTries = (int) Math.floor(leaderConflictResolveWait / msInSec);
-      while (!leaderUrl.equals(clusterStateLeaderUrl)) {
-        if (tries > maxTries) {
-          throw new SolrException(ErrorCode.SERVER_ERROR,
-              "There is conflicting information about the leader of shard: "
-                  + cloudDesc.getShardId() + " our state says:"
-                  + clusterStateLeaderUrl + " but zookeeper says:" + leaderUrl);
-        }
-        tries++;
-        if (tries % 30 == 0) {
-          String warnMsg = String.format(Locale.ENGLISH, "Still seeing conflicting information about the leader "
-                  + "of shard %s for collection %s after %d seconds; our state says %s, but ZooKeeper says %s",
-              cloudDesc.getShardId(), collection, tries, clusterStateLeaderUrl, leaderUrl);
-          log.warn(warnMsg);
-        }
-        Thread.sleep(msInSec);
-        clusterStateLeaderUrl = zkStateReader.getLeaderUrl(collection, shardId,
-            timeoutms);
-        leaderUrl = getLeaderProps(collection, cloudDesc.getShardId(), timeoutms)
-            .getCoreUrl();
-      }
-
-    } catch (Exception e) {
-      log.error("Error getting leader from zk", e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error getting leader from zk for shard " + shardId, e);
-    }
-    return leaderUrl;
-  }
-
-  /**
-   * Get leader props directly from zk nodes.
-   */
-  public ZkCoreNodeProps getLeaderProps(final String collection,
-                                        final String slice, int timeoutms) throws InterruptedException {
-    return getLeaderProps(collection, slice, timeoutms, false);
-  }
-
-  /**
-   * Get leader props directly from zk nodes.
-   *
-   * @return leader props
-   */
-  public ZkCoreNodeProps getLeaderProps(final String collection,
-                                        final String slice, int timeoutms, boolean failImmediatelyOnExpiration) throws InterruptedException {
-    int iterCount = timeoutms / 1000;
-    Exception exp = null;
-    while (iterCount-- > 0) {
-      try {
-        byte[] data = zkClient.getData(
-            ZkStateReader.getShardLeadersPath(collection, slice), null, null,
-            true);
-        ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(
-            ZkNodeProps.load(data));
-        return leaderProps;
-      } catch (InterruptedException e) {
-        throw e;
-      } catch (SessionExpiredException e) {
-        if (failImmediatelyOnExpiration) {
-          throw new RuntimeException("Session has expired - could not get leader props", exp);
-        }
-        exp = e;
-        Thread.sleep(1000);
-      } catch (Exception e) {
-        exp = e;
-        Thread.sleep(1000);
-      }
-      if (cc.isShutDown()) {
-        throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "CoreContainer is closed");
-      }
-    }
-    throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Could not get leader props", exp);
-  }
-
-
-  private void joinElection(CoreDescriptor cd, boolean afterExpiration, boolean joinAtHead)
-      throws InterruptedException, KeeperException, IOException {
-    // look for old context - if we find it, cancel it
-    String collection = cd.getCloudDescriptor().getCollectionName();
-    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
-
-    ContextKey contextKey = new ContextKey(collection, coreNodeName);
-
-    ElectionContext prevContext = electionContexts.get(contextKey);
-
-    if (prevContext != null) {
-      prevContext.cancelElection();
-    }
-
-    String shardId = cd.getCloudDescriptor().getShardId();
-
-    Map<String, Object> props = new HashMap<>();
-    // we only put a subset of props into the leader node
-    props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
-    props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
-    props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
-    props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
-
-
-    ZkNodeProps ourProps = new ZkNodeProps(props);
-
-    LeaderElector leaderElector = new LeaderElector(zkClient, contextKey, electionContexts);
-    ElectionContext context = new ShardLeaderElectionContext(leaderElector, shardId,
-        collection, coreNodeName, ourProps, this, cc);
-
-    leaderElector.setup(context);
-    electionContexts.put(contextKey, context);
-    leaderElector.joinElection(context, false, joinAtHead);
-  }
-
-
-  /**
-   * Returns whether or not a recovery was started
-   */
-  private boolean checkRecovery(boolean recoverReloadedCores, final boolean isLeader, boolean skipRecovery,
-                                final String collection, String coreZkNodeName, String shardId,
-                                SolrCore core, CoreContainer cc, boolean afterExpiration) {
-    if (SKIP_AUTO_RECOVERY) {
-      log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
-      return false;
-    }
-    boolean doRecovery = true;
-    if (!isLeader) {
-
-      if (skipRecovery || (!afterExpiration && core.isReloaded() && !recoverReloadedCores)) {
-        doRecovery = false;
-      }
-
-      if (doRecovery) {
-        log.info("Core needs to recover:" + core.getName());
-        core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
-        return true;
-      }
-
-      ZkShardTerms zkShardTerms = getShardTerms(collection, shardId);
-      if (zkShardTerms.registered(coreZkNodeName) && !zkShardTerms.canBecomeLeader(coreZkNodeName)) {
-        log.info("Leader's term larger than core " + core.getName() + "; starting recovery process");
-        core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
-        return true;
-      }
-    } else {
-      log.info("I am the leader, no recovery necessary");
-    }
-
-    return false;
-  }
-
-
-  public String getBaseUrl() {
-    return baseURL;
-  }
-
-  public void publish(final CoreDescriptor cd, final Replica.State state) throws Exception {
-    publish(cd, state, true, false);
-  }
-
-  /**
-   * Publish core state to overseer.
-   */
-  public void publish(final CoreDescriptor cd, final Replica.State state, boolean updateLastState, boolean forcePublish) throws Exception {
-    if (!forcePublish) {
-      try (SolrCore core = cc.getCore(cd.getName())) {
-        if (core == null || core.isClosed()) {
-          return;
-        }
-        MDCLoggingContext.setCore(core);
-      }
-    } else {
-      MDCLoggingContext.setCoreDescriptor(cc, cd);
-    }
-    try {
-      String collection = cd.getCloudDescriptor().getCollectionName();
-      
-      log.debug("publishing state={}", state.toString());
-      // System.out.println(Thread.currentThread().getStackTrace()[3]);
-      Integer numShards = cd.getCloudDescriptor().getNumShards();
-      if (numShards == null) { // XXX sys prop hack
-        log.debug("numShards not found on descriptor - reading it from system property");
-        numShards = Integer.getInteger(ZkStateReader.NUM_SHARDS_PROP);
-      }
-      
-      assert collection != null && collection.length() > 0;
-      
-      String shardId = cd.getCloudDescriptor().getShardId();
-      
-      String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
-
-      Map<String,Object> props = new HashMap<>();
-      props.put(Overseer.QUEUE_OPERATION, "state");
-      props.put(ZkStateReader.STATE_PROP, state.toString());
-      props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
-      props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
-      props.put(ZkStateReader.ROLES_PROP, cd.getCloudDescriptor().getRoles());
-      props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
-      props.put(ZkStateReader.SHARD_ID_PROP, cd.getCloudDescriptor().getShardId());
-      props.put(ZkStateReader.COLLECTION_PROP, collection);
-      props.put(ZkStateReader.REPLICA_TYPE, cd.getCloudDescriptor().getReplicaType().toString());
-      if (!Overseer.isLegacy(zkStateReader)) {
-        props.put(ZkStateReader.FORCE_SET_STATE_PROP, "false");
-      }
-      if (numShards != null) {
-        props.put(ZkStateReader.NUM_SHARDS_PROP, numShards.toString());
-      }
-      if (coreNodeName != null) {
-        props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
-      }
-      try (SolrCore core = cc.getCore(cd.getName())) {
-        if (core != null && state == Replica.State.ACTIVE) {
-          ensureRegisteredSearcher(core);
-        }
-        if (core != null && core.getDirectoryFactory().isSharedStorage()) {
-          if (core.getDirectoryFactory().isSharedStorage()) {
-            props.put(ZkStateReader.SHARED_STORAGE_PROP, "true");
-            props.put("dataDir", core.getDataDir());
-            UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-            if (ulog != null) {
-              props.put("ulogDir", ulog.getLogDir());
-            }
-          }
-        }
-      } catch (SolrCoreInitializationException ex) {
-        // The core had failed to initialize (in a previous request, not this one), hence nothing to do here.
-        log.info("The core '{}' had failed to initialize before.", cd.getName());
-      }
-
-      // pull replicas are excluded because their terms are not considered
-      if (state == Replica.State.RECOVERING && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
-        // state is used by client, state of replica can change from RECOVERING to DOWN without needed to finish recovery
-        // by calling this we will know that a replica actually finished recovery or not
-        getShardTerms(collection, shardId).startRecovering(coreNodeName);
-      }
-      if (state == Replica.State.ACTIVE && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
-        getShardTerms(collection, shardId).doneRecovering(coreNodeName);
-      }
-
-      ZkNodeProps m = new ZkNodeProps(props);
-      
-      if (updateLastState) {
-        cd.getCloudDescriptor().setLastPublished(state);
-      }
-      overseerJobQueue.offer(Utils.toJSON(m));
-    } finally {
-      MDCLoggingContext.clear();
-    }
-  }
-
-  public ZkShardTerms getShardTerms(String collection, String shardId) {
-    return getCollectionTerms(collection).getShard(shardId);
-  }
-
-  private ZkCollectionTerms getCollectionTerms(String collection) {
-    synchronized (collectionToTerms) {
-      if (!collectionToTerms.containsKey(collection)) collectionToTerms.put(collection, new ZkCollectionTerms(collection, zkClient));
-      return collectionToTerms.get(collection);
-    }
-  }
-
-  public void clearZkCollectionTerms() {
-    synchronized (collectionToTerms) {
-      collectionToTerms.values().forEach(ZkCollectionTerms::close);
-      collectionToTerms.clear();
-    }
-  }
-
-  public void unregister(String coreName, CoreDescriptor cd) throws Exception {
-    unregister(coreName, cd, true);
-  }
-
-  public void unregister(String coreName, CoreDescriptor cd, boolean removeCoreFromZk) throws Exception {
-    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
-    final String collection = cd.getCloudDescriptor().getCollectionName();
-    getCollectionTerms(collection).remove(cd.getCloudDescriptor().getShardId(), cd);
-    replicasMetTragicEvent.remove(collection+":"+coreNodeName);
-
-    if (Strings.isNullOrEmpty(collection)) {
-      log.error("No collection was specified.");
-      assert false : "No collection was specified [" + collection + "]";
-      return;
-    }
-    final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collection);
-    Replica replica = (docCollection == null) ? null : docCollection.getReplica(coreNodeName);
-
-    if (replica == null || replica.getType() != Type.PULL) {
-      ElectionContext context = electionContexts.remove(new ContextKey(collection, coreNodeName));
-
-      if (context != null) {
-        context.cancelElection();
-      }
-    }
-    CloudDescriptor cloudDescriptor = cd.getCloudDescriptor();
-    if (removeCoreFromZk) {
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
-          OverseerAction.DELETECORE.toLower(), ZkStateReader.CORE_NAME_PROP, coreName,
-          ZkStateReader.NODE_NAME_PROP, getNodeName(),
-          ZkStateReader.COLLECTION_PROP, cloudDescriptor.getCollectionName(),
-          ZkStateReader.BASE_URL_PROP, getBaseUrl(),
-          ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
-      overseerJobQueue.offer(Utils.toJSON(m));
-    }
-  }
-
-  public void createCollection(String collection) throws Exception {
-    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
-        CollectionParams.CollectionAction.CREATE.toLower(), ZkStateReader.NODE_NAME_PROP, getNodeName(),
-        ZkStateReader.COLLECTION_PROP, collection);
-    overseerJobQueue.offer(Utils.toJSON(m));
-  }
-
-  // convenience for testing
-  void printLayoutToStdOut() throws KeeperException, InterruptedException {
-    zkClient.printLayoutToStdOut();
-  }
-
-  public ZkStateReader getZkStateReader() {
-    return zkStateReader;
-  }
-
-  private void doGetShardIdAndNodeNameProcess(CoreDescriptor cd) {
-    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
-
-    if (coreNodeName != null) {
-      waitForShardId(cd);
-    } else {
-      // if no explicit coreNodeName, we want to match by base url and core name
-      waitForCoreNodeName(cd);
-      waitForShardId(cd);
-    }
-  }
-
-  private void waitForCoreNodeName(CoreDescriptor descriptor) {
-    int retryCount = 320;
-    log.debug("look for our core node name");
-    while (retryCount-- > 0) {
-      final DocCollection docCollection = zkStateReader.getClusterState()
-          .getCollectionOrNull(descriptor.getCloudDescriptor().getCollectionName());
-      if (docCollection != null && docCollection.getSlicesMap() != null) {
-        final Map<String, Slice> slicesMap = docCollection.getSlicesMap();
-        for (Slice slice : slicesMap.values()) {
-          for (Replica replica : slice.getReplicas()) {
-            // TODO: for really large clusters, we could 'index' on this
-
-            String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-
-            String msgNodeName = getNodeName();
-            String msgCore = descriptor.getName();
-
-            if (msgNodeName.equals(nodeName) && core.equals(msgCore)) {
-              descriptor.getCloudDescriptor()
-                  .setCoreNodeName(replica.getName());
-              getCoreContainer().getCoresLocator().persist(getCoreContainer(), descriptor);
-              return;
-            }
-          }
-        }
-      }
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-    }
-  }
-
-  private void waitForShardId(CoreDescriptor cd) {
-    log.debug("waiting to find shard id in clusterstate for " + cd.getName());
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      final String shardId = zkStateReader.getClusterState().getShardId(cd.getCollectionName(), getNodeName(), cd.getName());
-      if (shardId != null) {
-        cd.getCloudDescriptor().setShardId(shardId);
-        return;
-      }
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-    }
-
-    throw new SolrException(ErrorCode.SERVER_ERROR,
-        "Could not get shard id for core: " + cd.getName());
-  }
-
-
-  public String getCoreNodeName(CoreDescriptor descriptor) {
-    String coreNodeName = descriptor.getCloudDescriptor().getCoreNodeName();
-    if (coreNodeName == null && !genericCoreNodeNames) {
-      // it's the default
-      return getNodeName() + "_" + descriptor.getName();
-    }
-
-    return coreNodeName;
-  }
-
-  public void preRegister(CoreDescriptor cd, boolean publishState) {
-
-    String coreNodeName = getCoreNodeName(cd);
-
-    // before becoming available, make sure we are not live and active
-    // this also gets us our assigned shard id if it was not specified
-    try {
-      checkStateInZk(cd);
-
-      CloudDescriptor cloudDesc = cd.getCloudDescriptor();
-
-      // make sure the node name is set on the descriptor
-      if (cloudDesc.getCoreNodeName() == null) {
-        cloudDesc.setCoreNodeName(coreNodeName);
-      }
-
-      // publishState == false on startup
-      if (publishState || isPublishAsDownOnStartup(cloudDesc)) {
-        publish(cd, Replica.State.DOWN, false, true);
-      }
-      String collectionName = cd.getCloudDescriptor().getCollectionName();
-      DocCollection collection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
-      log.debug(collection == null ?
-              "Collection {} not visible yet, but flagging it so a watch is registered when it becomes visible" :
-              "Registering watch for collection {}",
-          collectionName);
-    } catch (KeeperException e) {
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-    } catch (NotInClusterStateException e) {
-      // make the stack trace less verbose
-      throw e;
-    } catch (Exception e) {
-      log.error("", e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-    }
-
-    doGetShardIdAndNodeNameProcess(cd);
-
-  }
-
-  /**
-   * On startup, the node already published all of its replicas as DOWN,
-   * so in case of legacyCloud=false ( the replica must already present on Zk )
-   * we can skip publish the replica as down
-   * @return Should publish the replica as down on startup
-   */
-  private boolean isPublishAsDownOnStartup(CloudDescriptor cloudDesc) {
-    if (!Overseer.isLegacy(zkStateReader)) {
-      Replica replica = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName())
-          .getSlice(cloudDesc.getShardId())
-          .getReplica(cloudDesc.getCoreNodeName());
-      if (replica.getNodeName().equals(getNodeName())) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  private void checkStateInZk(CoreDescriptor cd) throws InterruptedException, NotInClusterStateException {
-    if (!Overseer.isLegacy(zkStateReader)) {
-      CloudDescriptor cloudDesc = cd.getCloudDescriptor();
-      String nodeName = cloudDesc.getCoreNodeName();
-      if (nodeName == null) {
-        if (cc.repairCoreProperty(cd, CoreDescriptor.CORE_NODE_NAME) == false) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "No coreNodeName for " + cd);
-        }
-        nodeName = cloudDesc.getCoreNodeName();
-        // verify that the repair worked.
-        if (nodeName == null) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "No coreNodeName for " + cd);
-        }
-      }
-      final String coreNodeName = nodeName;
-
-      if (cloudDesc.getShardId() == null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "No shard id for " + cd);
-      }
-
-      AtomicReference<String> errorMessage = new AtomicReference<>();
-      AtomicReference<DocCollection> collectionState = new AtomicReference<>();
-      try {
-        zkStateReader.waitForState(cd.getCollectionName(), 10, TimeUnit.SECONDS, (n, c) -> {
-          collectionState.set(c);
-          if (c == null)
-            return false;
-          Slice slice = c.getSlice(cloudDesc.getShardId());
-          if (slice == null) {
-            errorMessage.set("Invalid shard: " + cloudDesc.getShardId());
-            return false;
-          }
-          Replica replica = slice.getReplica(coreNodeName);
-          if (replica == null) {
-            errorMessage.set("coreNodeName " + coreNodeName + " does not exist in shard " + cloudDesc.getShardId() +
-                ", ignore the exception if the replica was deleted");
-            return false;
-          }
-          return true;
-        });
-      } catch (TimeoutException e) {
-        String error = errorMessage.get();
-        if (error == null)
-          error = "coreNodeName " + coreNodeName + " does not exist in shard " + cloudDesc.getShardId() +
-              ", ignore the exception if the replica was deleted";
-        throw new NotInClusterStateException(ErrorCode.SERVER_ERROR, error);
-      }
-    }
-  }
-
-  private ZkCoreNodeProps waitForLeaderToSeeDownState(
-      CoreDescriptor descriptor, final String coreZkNodeName) {
-    // try not to wait too long here - if we are waiting too long, we should probably
-    // move along and join the election
-    
-    CloudDescriptor cloudDesc = descriptor.getCloudDescriptor();
-    String collection = cloudDesc.getCollectionName();
-    String shard = cloudDesc.getShardId();
-    ZkCoreNodeProps leaderProps = null;
-
-    int retries = 2;
-    for (int i = 0; i < retries; i++) {
-      try {
-        if (isClosed) {
-          throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
-              "We have been closed");
-        }
-
-        // go straight to zk, not the cloud state - we want current info
-        leaderProps = getLeaderProps(collection, shard, 5000);
-        break;
-      } catch (Exception e) {
-        SolrException.log(log, "There was a problem finding the leader in zk", e);
-        try {
-          Thread.sleep(2000);
-        } catch (InterruptedException e1) {
-          Thread.currentThread().interrupt();
-        }
-        if (i == retries - 1) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "There was a problem finding the leader in zk");
-        }
-      }
-    }
-
-    String leaderBaseUrl = leaderProps.getBaseUrl();
-    String leaderCoreName = leaderProps.getCoreName();
-
-    String myCoreNodeName = cloudDesc.getCoreNodeName();
-    String myCoreName = descriptor.getName();
-    String ourUrl = ZkCoreNodeProps.getCoreUrl(getBaseUrl(), myCoreName);
-
-    boolean isLeader = leaderProps.getCoreUrl().equals(ourUrl);
-    if (!isLeader && !SKIP_AUTO_RECOVERY) {
-      if (!getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) {
-        log.debug("Term of replica " + myCoreNodeName +
-            " is already less than leader, so not waiting for leader to see down state.");
-      } else {
-
-        log.info("Replica need to wait for leader to see down state.");
-
-        try (HttpSolrClient client = new Builder(leaderBaseUrl)
-            .withConnectionTimeout(15000)
-            .withSocketTimeout(120000)
-            .build()) {
-          WaitForState prepCmd = new WaitForState();
-          prepCmd.setCoreName(leaderCoreName);
-          prepCmd.setNodeName(getNodeName());
-          prepCmd.setCoreNodeName(coreZkNodeName);
-          prepCmd.setState(Replica.State.DOWN);
-
-          // let's retry a couple times - perhaps the leader just went down,
-          // or perhaps he is just not quite ready for us yet
-          retries = 2;
-          for (int i = 0; i < retries; i++) {
-            if (isClosed) {
-              throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
-                  "We have been closed");
-            }
-            try {
-              client.request(prepCmd);
-              break;
-            } catch (Exception e) {
-
-              // if the core container is shutdown, don't wait
-              if (cc.isShutDown()) {
-                throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
-                    "Core container is shutdown.");
-              }
-
-              Throwable rootCause = SolrException.getRootCause(e);
-              if (rootCause instanceof IOException) {
-                // if there was a communication error talking to the leader, see if the leader is even alive
-                if (!zkStateReader.getClusterState().liveNodesContain(leaderProps.getNodeName())) {
-                  throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
-                      "Node " + leaderProps.getNodeName() + " hosting leader for " +
-                          shard + " in " + collection + " is not live!");
-                }
-              }
-
-              SolrException.log(log,
-                  "There was a problem making a request to the leader", e);
-              try {
-                Thread.sleep(2000);
-              } catch (InterruptedException e1) {
-                Thread.currentThread().interrupt();
-              }
-              if (i == retries - 1) {
-                throw new SolrException(ErrorCode.SERVER_ERROR,
-                    "There was a problem making a request to the leader");
-              }
-            }
-          }
-        } catch (IOException e) {
-          SolrException.log(log, "Error closing HttpSolrClient", e);
-        }
-      }
-    }
-    return leaderProps;
-  }
-
-  public static void linkConfSet(SolrZkClient zkClient, String collection, String confSetName) throws KeeperException, InterruptedException {
-    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
-    log.debug("Load collection config from:" + path);
-    byte[] data;
-    try {
-      data = zkClient.getData(path, null, null, true);
-    } catch (NoNodeException e) {
-      // if there is no node, we will try and create it
-      // first try to make in case we are pre configuring
-      ZkNodeProps props = new ZkNodeProps(CONFIGNAME_PROP, confSetName);
-      try {
-
-        zkClient.makePath(path, Utils.toJSON(props),
-            CreateMode.PERSISTENT, null, true);
-      } catch (KeeperException e2) {
-        // it's okay if the node already exists
-        if (e2.code() != KeeperException.Code.NODEEXISTS) {
-          throw e;
-        }
-        // if we fail creating, setdata
-        // TODO: we should consider using version
-        zkClient.setData(path, Utils.toJSON(props), true);
-      }
-      return;
-    }
-    // we found existing data, let's update it
-    ZkNodeProps props = null;
-    if (data != null) {
-      props = ZkNodeProps.load(data);
-      Map<String, Object> newProps = new HashMap<>();
-      newProps.putAll(props.getProperties());
-      newProps.put(CONFIGNAME_PROP, confSetName);
-      props = new ZkNodeProps(newProps);
-    } else {
-      props = new ZkNodeProps(CONFIGNAME_PROP, confSetName);
-    }
-
-    // TODO: we should consider using version
-    zkClient.setData(path, Utils.toJSON(props), true);
-
-  }
-
-  /**
-   * If in SolrCloud mode, upload config sets for each SolrCore in solr.xml.
-   */
-  public static void bootstrapConf(SolrZkClient zkClient, CoreContainer cc, String solrHome) throws IOException {
-
-    ZkConfigManager configManager = new ZkConfigManager(zkClient);
-
-    //List<String> allCoreNames = cfg.getAllCoreNames();
-    List<CoreDescriptor> cds = cc.getCoresLocator().discover(cc);
-
-    log.info("bootstrapping config for " + cds.size() + " cores into ZooKeeper using solr.xml from " + solrHome);
-
-    for (CoreDescriptor cd : cds) {
-      String coreName = cd.getName();
-      String confName = cd.getCollectionName();
-      if (StringUtils.isEmpty(confName))
-        confName = coreName;
-      Path udir = cd.getInstanceDir().resolve("conf");
-      log.info("Uploading directory " + udir + " with name " + confName + " for SolrCore " + coreName);
-      configManager.uploadConfigDir(udir, confName);
-    }
-  }
-
-  public ZkDistributedQueue getOverseerJobQueue() {
-    return overseerJobQueue;
-  }
-
-  public OverseerTaskQueue getOverseerCollectionQueue() {
-    return overseerCollectionQueue;
-  }
-
-  public OverseerTaskQueue getOverseerConfigSetQueue() {
-    return overseerConfigSetQueue;
-  }
-
-  public DistributedMap getOverseerRunningMap() {
-    return overseerRunningMap;
-  }
-
-  public DistributedMap getOverseerCompletedMap() {
-    return overseerCompletedMap;
-  }
-
-  public DistributedMap getOverseerFailureMap() {
-    return overseerFailureMap;
-  }
-
-  /**
-   * When an operation needs to be performed in an asynchronous mode, the asyncId needs
-   * to be claimed by calling this method to make sure it's not duplicate (hasn't been
-   * claimed by other request). If this method returns true, the asyncId in the parameter
-   * has been reserved for the operation, meaning that no other thread/operation can claim
-   * it. If for whatever reason, the operation is not scheduled, the asuncId needs to be
-   * cleared using {@link #clearAsyncId(String)}.
-   * If this method returns false, no reservation has been made, and this asyncId can't 
-   * be used, since it's being used by another operation (currently or in the past)
-   * @param asyncId A string representing the asyncId of an operation. Can't be null.
-   * @return True if the reservation succeeds.
-   *         False if this ID is already in use.
-   */
-  public boolean claimAsyncId(String asyncId) throws KeeperException {
-    try {
-      return asyncIdsMap.putIfAbsent(asyncId, new byte[0]);
-    } catch (InterruptedException e) {
-      log.error("Could not claim asyncId=" + asyncId, e);
-      Thread.currentThread().interrupt();
-      throw new RuntimeException(e);
-    }
-  }
-  
-  /**
-   * Clears an asyncId previously claimed by calling {@link #claimAsyncId(String)}
-   * @param asyncId A string representing the asyncId of an operation. Can't be null.
-   * @return True if the asyncId existed and was cleared.
-   *         False if the asyncId didn't exist before.
-   */
-  public boolean clearAsyncId(String asyncId) throws KeeperException {
-    try {
-      return asyncIdsMap.remove(asyncId);
-    } catch (InterruptedException e) {
-      log.error("Could not release asyncId=" + asyncId, e);
-      Thread.currentThread().interrupt();
-      throw new RuntimeException(e);
-    }
-  }
-
-  public int getClientTimeout() {
-    return clientTimeout;
-  }
-
-  public Overseer getOverseer() {
-    return overseer;
-  }
-
-  public LeaderElector getOverseerElector() {
-    return overseerElector;
-  }
-
-  /**
-   * Returns the nodeName that should be used based on the specified properties.
-   *
-   * @param hostName    - must not be null or the empty string
-   * @param hostPort    - must consist only of digits, must not be null or the empty string
-   * @param hostContext - should not begin or end with a slash (leading/trailin slashes will be ignored), must not be null, may be the empty string to denote the root context
-   * @lucene.experimental
-   * @see ZkStateReader#getBaseUrlForNodeName
-   */
-  static String generateNodeName(final String hostName,
-                                 final String hostPort,
-                                 final String hostContext) {
-    try {
-      return hostName + ':' + hostPort + '_' +
-          URLEncoder.encode(trimLeadingAndTrailingSlashes(hostContext), "UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      throw new Error("JVM Does not seem to support UTF-8", e);
-    }
-  }
-
-  /**
-   * Utility method for trimming and leading and/or trailing slashes from
-   * its input.  May return the empty string.  May return null if and only
-   * if the input is null.
-   */
-  public static String trimLeadingAndTrailingSlashes(final String in) {
-    if (null == in) return in;
-
-    String out = in;
-    if (out.startsWith("/")) {
-      out = out.substring(1);
-    }
-    if (out.endsWith("/")) {
-      out = out.substring(0, out.length() - 1);
-    }
-    return out;
-  }
-
-  public void rejoinOverseerElection(String electionNode, boolean joinAtHead) {
-    try {
-      if (electionNode != null) {
-        //this call is from inside the JVM  . not from CoreAdminHandler
-        if (overseerElector.getContext() == null || overseerElector.getContext().leaderSeqPath == null) {
-          overseerElector.retryElection(new OverseerElectionContext(zkClient,
-              overseer, getNodeName()), joinAtHead);
-          return;
-        }
-        if (!overseerElector.getContext().leaderSeqPath.endsWith(electionNode)) {
-          log.warn("Asked to rejoin with wrong election node : {}, current node is {}", electionNode, overseerElector.getContext().leaderSeqPath);
-          //however delete it . This is possible when the last attempt at deleting the election node failed.
-          if (electionNode.startsWith(getNodeName())) {
-            try {
-              zkClient.delete(Overseer.OVERSEER_ELECT + LeaderElector.ELECTION_NODE + "/" + electionNode, -1, true);
-            } catch (NoNodeException e) {
-              //no problem
-            } catch (InterruptedException e) {
-              Thread.currentThread().interrupt();
-            } catch (Exception e) {
-              log.warn("Old election node exists , could not be removed ", e);
-            }
-          }
-        }
-      } else {
-        overseerElector.retryElection(overseerElector.getContext(), joinAtHead);
-      }
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to rejoin election", e);
-    }
-
-  }
-
-  public void rejoinShardLeaderElection(SolrParams params) {
-    try {
-      
-      String collectionName = params.get(COLLECTION_PROP);
-      String shardId = params.get(SHARD_ID_PROP);
-      String coreNodeName = params.get(CORE_NODE_NAME_PROP);
-      String coreName = params.get(CORE_NAME_PROP);
-      String electionNode = params.get(ELECTION_NODE_PROP);
-      String baseUrl = params.get(BASE_URL_PROP);
-
-      try (SolrCore core = cc.getCore(coreName)) {
-        MDCLoggingContext.setCore(core);
-        
-        log.info("Rejoin the shard leader election.");
-        
-        ContextKey contextKey = new ContextKey(collectionName, coreNodeName);
-        
-        ElectionContext prevContext = electionContexts.get(contextKey);
-        if (prevContext != null) prevContext.cancelElection();
-        
-        ZkNodeProps zkProps = new ZkNodeProps(BASE_URL_PROP, baseUrl, CORE_NAME_PROP, coreName, NODE_NAME_PROP, getNodeName(), CORE_NODE_NAME_PROP, coreNodeName);
-            
-        LeaderElector elect = ((ShardLeaderElectionContextBase) prevContext).getLeaderElector();
-        ShardLeaderElectionContext context = new ShardLeaderElectionContext(elect, shardId, collectionName,
-            coreNodeName, zkProps, this, getCoreContainer());
-            
-        context.leaderSeqPath = context.electionPath + LeaderElector.ELECTION_NODE + "/" + electionNode;
-        elect.setup(context);
-        electionContexts.put(contextKey, context);
-        
-        elect.retryElection(context, params.getBool(REJOIN_AT_HEAD_PROP, false));
-      }
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to rejoin election", e);
-    }
-
-  }
-
-  public void checkOverseerDesignate() {
-    try {
-      byte[] data = zkClient.getData(ZkStateReader.ROLES, null, new Stat(), 

<TRUNCATED>

[46/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerTaskProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskProcessor.java
deleted file mode 100644
index febeec0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskProcessor.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.Closeable;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import com.codahale.metrics.Timer;
-import com.google.common.collect.ImmutableSet;
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.cloud.Overseer.LeaderStatus;
-import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.ID;
-
-/**
- * A generic processor run in the Overseer, used for handling items added
- * to a distributed work queue.  Has support for handling exclusive tasks
- * (i.e. tasks that should not run in parallel with each other).
- *
- * An {@link OverseerMessageHandlerSelector} determines which
- * {@link OverseerMessageHandler} handles specific messages in the
- * queue.
- */
-public class OverseerTaskProcessor implements Runnable, Closeable {
-
-  /**
-   * Maximum number of overseer collection operations which can be
-   * executed concurrently
-   */
-  public static final int MAX_PARALLEL_TASKS = 100;
-  public static final int MAX_BLOCKED_TASKS = 1000;
-
-  public ExecutorService tpe;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private OverseerTaskQueue workQueue;
-  private DistributedMap runningMap;
-  private DistributedMap completedMap;
-  private DistributedMap failureMap;
-
-  // Set that maintains a list of all the tasks that are running. This is keyed on zk id of the task.
-  final private Set<String> runningTasks;
-
-  // List of completed tasks. This is used to clean up workQueue in zk.
-  final private HashMap<String, QueueEvent> completedTasks;
-
-  private String myId;
-
-  private ZkStateReader zkStateReader;
-
-  private boolean isClosed;
-
-  private Stats stats;
-
-  // Set of tasks that have been picked up for processing but not cleaned up from zk work-queue.
-  // It may contain tasks that have completed execution, have been entered into the completed/failed map in zk but not
-  // deleted from the work-queue as that is a batched operation.
-  final private Set<String> runningZKTasks;
-  // This map may contain tasks which are read from work queue but could not
-  // be executed because they are blocked or the execution queue is full
-  // This is an optimization to ensure that we do not read the same tasks
-  // again and again from ZK.
-  final private Map<String, QueueEvent> blockedTasks = new LinkedHashMap<>();
-  final private Predicate<String> excludedTasks = new Predicate<String>() {
-    @Override
-    public boolean test(String s) {
-      return runningTasks.contains(s) || blockedTasks.containsKey(s);
-    }
-
-    @Override
-    public String toString() {
-      return StrUtils.join(ImmutableSet.of(runningTasks, blockedTasks.keySet()), ',');
-    }
-
-  };
-
-  private final Object waitLock = new Object();
-
-  protected OverseerMessageHandlerSelector selector;
-
-  private OverseerNodePrioritizer prioritizer;
-
-  private String thisNode;
-
-  public OverseerTaskProcessor(ZkStateReader zkStateReader, String myId,
-                                        Stats stats,
-                                        OverseerMessageHandlerSelector selector,
-                                        OverseerNodePrioritizer prioritizer,
-                                        OverseerTaskQueue workQueue,
-                                        DistributedMap runningMap,
-                                        DistributedMap completedMap,
-                                        DistributedMap failureMap) {
-    this.zkStateReader = zkStateReader;
-    this.myId = myId;
-    this.stats = stats;
-    this.selector = selector;
-    this.prioritizer = prioritizer;
-    this.workQueue = workQueue;
-    this.runningMap = runningMap;
-    this.completedMap = completedMap;
-    this.failureMap = failureMap;
-    this.runningZKTasks = new HashSet<>();
-    this.runningTasks = new HashSet<>();
-    this.completedTasks = new HashMap<>();
-    thisNode = Utils.getMDCNode();
-  }
-
-  @Override
-  public void run() {
-    MDCLoggingContext.setNode(thisNode);
-    log.debug("Process current queue of overseer operations");
-    LeaderStatus isLeader = amILeader();
-    while (isLeader == LeaderStatus.DONT_KNOW) {
-      log.debug("am_i_leader unclear {}", isLeader);
-      isLeader = amILeader();  // not a no, not a yes, try ask again
-    }
-
-    String oldestItemInWorkQueue = null;
-    // hasLeftOverItems - used for avoiding re-execution of async tasks that were processed by a previous Overseer.
-    // This variable is set in case there's any task found on the workQueue when the OCP starts up and
-    // the id for the queue tail is used as a marker to check for the task in completed/failed map in zk.
-    // Beyond the marker, all tasks can safely be assumed to have never been executed.
-    boolean hasLeftOverItems = true;
-
-    try {
-      oldestItemInWorkQueue = workQueue.getTailId();
-    } catch (KeeperException e) {
-      // We don't need to handle this. This is just a fail-safe which comes in handy in skipping already processed
-      // async calls.
-      SolrException.log(log, "", e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-    }
-
-    if (oldestItemInWorkQueue == null)
-      hasLeftOverItems = false;
-    else
-      log.debug("Found already existing elements in the work-queue. Last element: {}", oldestItemInWorkQueue);
-
-    try {
-      prioritizer.prioritizeOverseerNodes(myId);
-    } catch (Exception e) {
-      if (!zkStateReader.getZkClient().isClosed()) {
-        log.error("Unable to prioritize overseer ", e);
-      }
-    }
-
-    // TODO: Make maxThreads configurable.
-
-    this.tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, MAX_PARALLEL_TASKS, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<Runnable>(),
-        new DefaultSolrThreadFactory("OverseerThreadFactory"));
-    try {
-      while (!this.isClosed) {
-        try {
-          isLeader = amILeader();
-          if (LeaderStatus.NO == isLeader) {
-            break;
-          } else if (LeaderStatus.YES != isLeader) {
-            log.debug("am_i_leader unclear {}", isLeader);
-            continue; // not a no, not a yes, try asking again
-          }
-
-          log.debug("Cleaning up work-queue. #Running tasks: {}", runningTasks.size());
-          cleanUpWorkQueue();
-
-          printTrackingMaps();
-
-          boolean waited = false;
-
-          while (runningTasks.size() > MAX_PARALLEL_TASKS) {
-            synchronized (waitLock) {
-              waitLock.wait(100);//wait for 100 ms or till a task is complete
-            }
-            waited = true;
-          }
-
-          if (waited)
-            cleanUpWorkQueue();
-
-
-          ArrayList<QueueEvent> heads = new ArrayList<>(blockedTasks.size() + MAX_PARALLEL_TASKS);
-          heads.addAll(blockedTasks.values());
-
-          //If we have enough items in the blocked tasks already, it makes
-          // no sense to read more items from the work queue. it makes sense
-          // to clear out at least a few items in the queue before we read more items
-          if (heads.size() < MAX_BLOCKED_TASKS) {
-            //instead of reading MAX_PARALLEL_TASKS items always, we should only fetch as much as we can execute
-            int toFetch = Math.min(MAX_BLOCKED_TASKS - heads.size(), MAX_PARALLEL_TASKS - runningTasks.size());
-            List<QueueEvent> newTasks = workQueue.peekTopN(toFetch, excludedTasks, 2000L);
-            log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks);
-            heads.addAll(newTasks);
-          } else {
-            // Prevent free-spinning this loop.
-            Thread.sleep(1000);
-          }
-
-          if (isClosed) break;
-
-          if (heads.isEmpty()) {
-            continue;
-          }
-
-          blockedTasks.clear(); // clear it now; may get refilled below.
-
-          taskBatch.batchId++;
-          boolean tooManyTasks = false;
-          for (QueueEvent head : heads) {
-            if (!tooManyTasks) {
-              synchronized (runningTasks) {
-                tooManyTasks = runningTasks.size() >= MAX_PARALLEL_TASKS;
-              }
-            }
-            if (tooManyTasks) {
-              // Too many tasks are running, just shove the rest into the "blocked" queue.
-              if(blockedTasks.size() < MAX_BLOCKED_TASKS)
-                blockedTasks.put(head.getId(), head);
-              continue;
-            }
-            if (runningZKTasks.contains(head.getId())) continue;
-            final ZkNodeProps message = ZkNodeProps.load(head.getBytes());
-            final String asyncId = message.getStr(ASYNC);
-            if (hasLeftOverItems) {
-              if (head.getId().equals(oldestItemInWorkQueue))
-                hasLeftOverItems = false;
-              if (asyncId != null && (completedMap.contains(asyncId) || failureMap.contains(asyncId))) {
-                log.debug("Found already processed task in workQueue, cleaning up. AsyncId [{}]",asyncId );
-                workQueue.remove(head);
-                continue;
-              }
-            }
-            String operation = message.getStr(Overseer.QUEUE_OPERATION);
-            if (operation == null) {
-              log.error("Msg does not have required " + Overseer.QUEUE_OPERATION + ": {}", message);
-              workQueue.remove(head);
-              continue;
-            }
-            OverseerMessageHandler messageHandler = selector.selectOverseerMessageHandler(message);
-            OverseerMessageHandler.Lock lock = messageHandler.lockTask(message, taskBatch);
-            if (lock == null) {
-              log.debug("Exclusivity check failed for [{}]", message.toString());
-              //we may end crossing the size of the MAX_BLOCKED_TASKS. They are fine
-              if (blockedTasks.size() < MAX_BLOCKED_TASKS)
-                blockedTasks.put(head.getId(), head);
-              continue;
-            }
-            try {
-              markTaskAsRunning(head, asyncId);
-              log.debug("Marked task [{}] as running", head.getId());
-            } catch (KeeperException.NodeExistsException e) {
-              lock.unlock();
-              // This should never happen
-              log.error("Tried to pick up task [{}] when it was already running!", head.getId());
-              continue;
-            } catch (InterruptedException e) {
-              lock.unlock();
-              log.error("Thread interrupted while trying to pick task for execution.", head.getId());
-              Thread.currentThread().interrupt();
-              continue;
-            }
-            log.debug(messageHandler.getName() + ": Get the message id:" + head.getId() + " message:" + message.toString());
-            Runner runner = new Runner(messageHandler, message,
-                operation, head, lock);
-            tpe.execute(runner);
-          }
-
-        } catch (KeeperException e) {
-          if (e.code() == KeeperException.Code.SESSIONEXPIRED) {
-            log.warn("Overseer cannot talk to ZK");
-            return;
-          }
-          SolrException.log(log, "", e);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          return;
-        } catch (Exception e) {
-          SolrException.log(log, "", e);
-        }
-      }
-    } finally {
-      this.close();
-    }
-  }
-
-  private void cleanUpWorkQueue() throws KeeperException, InterruptedException {
-    synchronized (completedTasks) {
-      for (String id : completedTasks.keySet()) {
-        workQueue.remove(completedTasks.get(id));
-        runningZKTasks.remove(id);
-      }
-      completedTasks.clear();
-    }
-  }
-
-  public void close() {
-    isClosed = true;
-    if (tpe != null) {
-      if (!tpe.isShutdown()) {
-        ExecutorUtil.shutdownAndAwaitTermination(tpe);
-      }
-    }
-    IOUtils.closeQuietly(selector);
-  }
-
-  public static List<String> getSortedOverseerNodeNames(SolrZkClient zk) throws KeeperException, InterruptedException {
-    List<String> children = null;
-    try {
-      children = zk.getChildren(Overseer.OVERSEER_ELECT + LeaderElector.ELECTION_NODE, null, true);
-    } catch (Exception e) {
-      log.warn("error ", e);
-      return new ArrayList<>();
-    }
-    LeaderElector.sortSeqs(children);
-    ArrayList<String> nodeNames = new ArrayList<>(children.size());
-    for (String c : children) nodeNames.add(LeaderElector.getNodeName(c));
-    return nodeNames;
-  }
-
-  public static List<String> getSortedElectionNodes(SolrZkClient zk, String path) throws KeeperException, InterruptedException {
-    List<String> children = null;
-    try {
-      children = zk.getChildren(path, null, true);
-      LeaderElector.sortSeqs(children);
-      return children;
-    } catch (Exception e) {
-      throw e;
-    }
-
-  }
-
-  public static String getLeaderNode(SolrZkClient zkClient) throws KeeperException, InterruptedException {
-    String id = getLeaderId(zkClient);
-    return id==null ?
-        null:
-        LeaderElector.getNodeName( id);
-  }
-
-  public static String getLeaderId(SolrZkClient zkClient) throws KeeperException,InterruptedException{
-    byte[] data = null;
-    try {
-      data = zkClient.getData(Overseer.OVERSEER_ELECT + "/leader", null, new Stat(), true);
-    } catch (KeeperException.NoNodeException e) {
-      return null;
-    }
-    Map m = (Map) Utils.fromJSON(data);
-    return  (String) m.get(ID);
-  }
-
-  protected LeaderStatus amILeader() {
-    String statsName = "collection_am_i_leader";
-    Timer.Context timerContext = stats.time(statsName);
-    boolean success = true;
-    String propsId = null;
-    try {
-      ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(
-          Overseer.OVERSEER_ELECT + "/leader", null, null, true));
-      propsId = props.getStr(ID);
-      if (myId.equals(propsId)) {
-        return LeaderStatus.YES;
-      }
-    } catch (KeeperException e) {
-      success = false;
-      if (e.code() == KeeperException.Code.CONNECTIONLOSS) {
-        log.error("", e);
-        return LeaderStatus.DONT_KNOW;
-      } else if (e.code() != KeeperException.Code.SESSIONEXPIRED) {
-        log.warn("", e);
-      } else {
-        log.debug("", e);
-      }
-    } catch (InterruptedException e) {
-      success = false;
-      Thread.currentThread().interrupt();
-    } finally {
-      timerContext.stop();
-      if (success)  {
-        stats.success(statsName);
-      } else  {
-        stats.error(statsName);
-      }
-    }
-    log.info("According to ZK I (id={}) am no longer a leader. propsId={}", myId, propsId);
-    return LeaderStatus.NO;
-  }
-
-  public boolean isClosed() {
-    return isClosed;
-  }
-
-  @SuppressWarnings("unchecked")
-  private void markTaskAsRunning(QueueEvent head, String asyncId)
-      throws KeeperException, InterruptedException {
-    synchronized (runningZKTasks) {
-      runningZKTasks.add(head.getId());
-    }
-
-    synchronized (runningTasks) {
-      runningTasks.add(head.getId());
-    }
-
-
-    if (asyncId != null)
-      runningMap.put(asyncId, null);
-  }
-  
-  protected class Runner implements Runnable {
-    ZkNodeProps message;
-    String operation;
-    SolrResponse response;
-    QueueEvent head;
-    OverseerMessageHandler messageHandler;
-    private final OverseerMessageHandler.Lock lock;
-
-    public Runner(OverseerMessageHandler messageHandler, ZkNodeProps message, String operation, QueueEvent head, OverseerMessageHandler.Lock lock) {
-      this.message = message;
-      this.operation = operation;
-      this.head = head;
-      this.messageHandler = messageHandler;
-      this.lock = lock;
-      response = null;
-    }
-
-
-    public void run() {
-      String statsName = messageHandler.getTimerName(operation);
-      final Timer.Context timerContext = stats.time(statsName);
-
-      boolean success = false;
-      final String asyncId = message.getStr(ASYNC);
-      String taskKey = messageHandler.getTaskKey(message);
-
-      try {
-        try {
-          log.debug("Runner processing {}", head.getId());
-          response = messageHandler.processMessage(message, operation);
-        } finally {
-          timerContext.stop();
-          updateStats(statsName);
-        }
-
-        if (asyncId != null) {
-          if (response != null && (response.getResponse().get("failure") != null 
-              || response.getResponse().get("exception") != null)) {
-            failureMap.put(asyncId, SolrResponse.serializable(response));
-            log.debug("Updated failed map for task with zkid:[{}]", head.getId());
-          } else {
-            completedMap.put(asyncId, SolrResponse.serializable(response));
-            log.debug("Updated completed map for task with zkid:[{}]", head.getId());
-          }
-        } else {
-          head.setBytes(SolrResponse.serializable(response));
-          log.debug("Completed task:[{}]", head.getId());
-        }
-
-        markTaskComplete(head.getId(), asyncId);
-        log.debug("Marked task [{}] as completed.", head.getId());
-        printTrackingMaps();
-
-        log.debug(messageHandler.getName() + ": Message id:" + head.getId() +
-            " complete, response:" + response.getResponse().toString());
-        success = true;
-      } catch (KeeperException e) {
-        SolrException.log(log, "", e);
-      } catch (InterruptedException e) {
-        // Reset task from tracking data structures so that it can be retried.
-        resetTaskWithException(messageHandler, head.getId(), asyncId, taskKey, message);
-        log.warn("Resetting task {} as the thread was interrupted.", head.getId());
-        Thread.currentThread().interrupt();
-      } finally {
-        lock.unlock();
-        if (!success) {
-          // Reset task from tracking data structures so that it can be retried.
-          resetTaskWithException(messageHandler, head.getId(), asyncId, taskKey, message);
-        }
-        synchronized (waitLock){
-          waitLock.notifyAll();
-        }
-      }
-    }
-
-    private void markTaskComplete(String id, String asyncId)
-        throws KeeperException, InterruptedException {
-      synchronized (completedTasks) {
-        completedTasks.put(id, head);
-      }
-
-      synchronized (runningTasks) {
-        runningTasks.remove(id);
-      }
-
-      if (asyncId != null) {
-        if (!runningMap.remove(asyncId)) {
-          log.warn("Could not find and remove async call [" + asyncId + "] from the running map.");
-        }
-      }
-
-      workQueue.remove(head);
-    }
-
-    private void resetTaskWithException(OverseerMessageHandler messageHandler, String id, String asyncId, String taskKey, ZkNodeProps message) {
-      log.warn("Resetting task: {}, requestid: {}, taskKey: {}", id, asyncId, taskKey);
-      try {
-        if (asyncId != null) {
-          if (!runningMap.remove(asyncId)) {
-            log.warn("Could not find and remove async call [" + asyncId + "] from the running map.");
-          }
-        }
-
-        synchronized (runningTasks) {
-          runningTasks.remove(id);
-        }
-
-      } catch (KeeperException e) {
-        SolrException.log(log, "", e);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-
-    }
-
-    private void updateStats(String statsName) {
-      if (isSuccessful()) {
-        stats.success(statsName);
-      } else {
-        stats.error(statsName);
-        stats.storeFailureDetails(statsName, message, response);
-      }
-    }
-
-    private boolean isSuccessful() {
-      if (response == null)
-        return false;
-      return !(response.getResponse().get("failure") != null || response.getResponse().get("exception") != null);
-    }
-  }
-
-  private void printTrackingMaps() {
-    if (log.isDebugEnabled()) {
-      synchronized (runningTasks) {
-        log.debug("RunningTasks: {}", runningTasks.toString());
-      }
-      log.debug("BlockedTasks: {}", blockedTasks.keySet().toString());
-      synchronized (completedTasks) {
-        log.debug("CompletedTasks: {}", completedTasks.keySet().toString());
-      }
-      synchronized (runningZKTasks) {
-        log.debug("RunningZKTasks: {}", runningZKTasks.toString());
-      }
-    }
-  }
-
-
-
-  String getId(){
-    return myId;
-  }
-
-  /**
-   * An interface to determine which {@link OverseerMessageHandler}
-   * handles a given message.  This could be a single OverseerMessageHandler
-   * for the case where a single type of message is handled (e.g. collection
-   * messages only) , or a different handler could be selected based on the
-   * contents of the message.
-   */
-  public interface OverseerMessageHandlerSelector extends Closeable {
-    OverseerMessageHandler selectOverseerMessageHandler(ZkNodeProps message);
-  }
-
-  final private TaskBatch taskBatch = new TaskBatch();
-
-  public class TaskBatch {
-    private long batchId = 0;
-
-    public long getId() {
-      return batchId;
-    }
-
-    public int getRunningTasks() {
-      synchronized (runningTasks) {
-        return runningTasks.size();
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
deleted file mode 100644
index 66a31c5..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import com.codahale.metrics.Timer;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Predicate;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.util.Pair;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A {@link ZkDistributedQueue} augmented with helper methods specific to the overseer task queues.
- * Methods specific to this subclass ignore superclass internal state and hit ZK directly.
- * This is inefficient!  But the API on this class is kind of muddy..
- */
-public class OverseerTaskQueue extends ZkDistributedQueue {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private static final String RESPONSE_PREFIX = "qnr-" ;
-
-  public OverseerTaskQueue(SolrZkClient zookeeper, String dir) {
-    this(zookeeper, dir, new Stats());
-  }
-
-  public OverseerTaskQueue(SolrZkClient zookeeper, String dir, Stats stats) {
-    super(zookeeper, dir, stats);
-  }
-  
-  /**
-   * Returns true if the queue contains a task with the specified async id.
-   */
-  public boolean containsTaskWithRequestId(String requestIdKey, String requestId)
-      throws KeeperException, InterruptedException {
-
-    List<String> childNames = zookeeper.getChildren(dir, null, true);
-    stats.setQueueLength(childNames.size());
-    for (String childName : childNames) {
-      if (childName != null && childName.startsWith(PREFIX)) {
-        try {
-          byte[] data = zookeeper.getData(dir + "/" + childName, null, null, true);
-          if (data != null) {
-            ZkNodeProps message = ZkNodeProps.load(data);
-            if (message.containsKey(requestIdKey)) {
-              log.debug("Looking for {}, found {}", message.get(requestIdKey), requestId);
-              if(message.get(requestIdKey).equals(requestId)) return true;
-            }
-          }
-        } catch (KeeperException.NoNodeException e) {
-          // Another client removed the node first, try next
-        }
-      }
-    }
-
-    return false;
-  }
-
-  /**
-   * Remove the event and save the response into the other path.
-   */
-  public void remove(QueueEvent event) throws KeeperException,
-      InterruptedException {
-    Timer.Context time = stats.time(dir + "_remove_event");
-    try {
-      String path = event.getId();
-      String responsePath = dir + "/" + RESPONSE_PREFIX
-          + path.substring(path.lastIndexOf("-") + 1);
-      if (zookeeper.exists(responsePath, true)) {
-        zookeeper.setData(responsePath, event.getBytes(), true);
-      } else {
-        log.info("Response ZK path: " + responsePath + " doesn't exist."
-            + "  Requestor may have disconnected from ZooKeeper");
-      }
-      try {
-        zookeeper.delete(path, -1, true);
-      } catch (KeeperException.NoNodeException ignored) {
-      }
-    } finally {
-      time.stop();
-    }
-  }
-
-  /**
-   * Watcher that blocks until a WatchedEvent occurs for a znode.
-   */
-  static final class LatchWatcher implements Watcher {
-
-    private final Lock lock;
-    private final Condition eventReceived;
-    private WatchedEvent event;
-    private Event.EventType latchEventType;
-    
-    LatchWatcher() {
-      this(null);
-    }
-    
-    LatchWatcher(Event.EventType eventType) {
-      this.lock = new ReentrantLock();
-      this.eventReceived = lock.newCondition();
-      this.latchEventType = eventType;
-    }
-
-
-    @Override
-    public void process(WatchedEvent event) {
-      // session events are not change events, and do not remove the watcher
-      if (Event.EventType.None.equals(event.getType())) {
-        return;
-      }
-      // If latchEventType is not null, only fire if the type matches
-      log.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
-      if (latchEventType == null || event.getType() == latchEventType) {
-        lock.lock();
-        try {
-          this.event = event;
-          eventReceived.signalAll();
-        } finally {
-          lock.unlock();
-        }
-      }
-    }
-
-    public void await(long timeoutMs) throws InterruptedException {
-      assert timeoutMs > 0;
-      lock.lock();
-      try {
-        if (this.event != null) {
-          return;
-        }
-        eventReceived.await(timeoutMs, TimeUnit.MILLISECONDS);
-      } finally {
-        lock.unlock();
-      }
-    }
-
-    public WatchedEvent getWatchedEvent() {
-      return event;
-    }
-  }
-
-  /**
-   * Inserts data into zookeeper.
-   * 
-   * @return true if data was successfully added
-   */
-  private String createData(String path, byte[] data, CreateMode mode)
-      throws KeeperException, InterruptedException {
-    for (;;) {
-      try {
-        return zookeeper.create(path, data, mode, true);
-      } catch (KeeperException.NoNodeException e) {
-        try {
-          zookeeper.create(dir, new byte[0], CreateMode.PERSISTENT, true);
-        } catch (KeeperException.NodeExistsException ne) {
-          // someone created it
-        }
-      }
-    }
-  }
-  
-  /**
-   * Offer the data and wait for the response
-   * 
-   */
-  public QueueEvent offer(byte[] data, long timeout) throws KeeperException,
-      InterruptedException {
-    Timer.Context time = stats.time(dir + "_offer");
-    try {
-      // Create and watch the response node before creating the request node;
-      // otherwise we may miss the response.
-      String watchID = createResponseNode();
-
-      LatchWatcher watcher = new LatchWatcher();
-      Stat stat = zookeeper.exists(watchID, watcher, true);
-
-      // create the request node
-      createRequestNode(data, watchID);
-
-      if (stat != null) {
-        watcher.await(timeout);
-      }
-      byte[] bytes = zookeeper.getData(watchID, null, null, true);
-      // create the event before deleting the node, otherwise we can get the deleted
-      // event from the watcher.
-      QueueEvent event =  new QueueEvent(watchID, bytes, watcher.getWatchedEvent());
-      zookeeper.delete(watchID, -1, true);
-      return event;
-    } finally {
-      time.stop();
-    }
-  }
-
-  void createRequestNode(byte[] data, String watchID) throws KeeperException, InterruptedException {
-    createData(dir + "/" + PREFIX + watchID.substring(watchID.lastIndexOf("-") + 1),
-        data, CreateMode.PERSISTENT);
-  }
-
-  String createResponseNode() throws KeeperException, InterruptedException {
-    return createData(
-            dir + "/" + RESPONSE_PREFIX,
-            null, CreateMode.EPHEMERAL_SEQUENTIAL);
-  }
-
-
-  public List<QueueEvent> peekTopN(int n, Predicate<String> excludeSet, long waitMillis)
-      throws KeeperException, InterruptedException {
-    ArrayList<QueueEvent> topN = new ArrayList<>();
-
-    log.debug("Peeking for top {} elements. ExcludeSet: {}", n, excludeSet);
-    Timer.Context time;
-    if (waitMillis == Long.MAX_VALUE) time = stats.time(dir + "_peekTopN_wait_forever");
-    else time = stats.time(dir + "_peekTopN_wait" + waitMillis);
-
-    try {
-      for (Pair<String, byte[]> element : peekElements(n, waitMillis, child -> !excludeSet.test(dir + "/" + child))) {
-        topN.add(new QueueEvent(dir + "/" + element.first(),
-            element.second(), null));
-      }
-      printQueueEventsListElementIds(topN);
-      return topN;
-    } finally {
-      time.stop();
-    }
-  }
-
-  private static void printQueueEventsListElementIds(ArrayList<QueueEvent> topN) {
-    if (log.isDebugEnabled() && !topN.isEmpty()) {
-      StringBuilder sb = new StringBuilder("[");
-      for (QueueEvent queueEvent : topN) {
-        sb.append(queueEvent.getId()).append(", ");
-      }
-      sb.append("]");
-      log.debug("Returning topN elements: {}", sb.toString());
-    }
-  }
-
-
-  /**
-   *
-   * Gets last element of the Queue without removing it.
-   */
-  public String getTailId() throws KeeperException, InterruptedException {
-    // TODO: could we use getChildren here?  Unsure what freshness guarantee the caller needs.
-    TreeSet<String> orderedChildren = fetchZkChildren(null);
-
-    for (String headNode : orderedChildren.descendingSet())
-      if (headNode != null) {
-        try {
-          QueueEvent queueEvent = new QueueEvent(dir + "/" + headNode, zookeeper.getData(dir + "/" + headNode,
-              null, null, true), null);
-          return queueEvent.getId();
-        } catch (KeeperException.NoNodeException e) {
-          // Another client removed the node first, try next
-        }
-      }
-    return null;
-  }
-  
-  public static class QueueEvent {
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result + ((id == null) ? 0 : id.hashCode());
-      return result;
-    }
-    
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj) return true;
-      if (obj == null) return false;
-      if (getClass() != obj.getClass()) return false;
-      QueueEvent other = (QueueEvent) obj;
-      if (id == null) {
-        if (other.id != null) return false;
-      } else if (!id.equals(other.id)) return false;
-      return true;
-    }
-    
-    private WatchedEvent event = null;
-    private String id;
-    private byte[] bytes;
-    
-    QueueEvent(String id, byte[] bytes, WatchedEvent event) {
-      this.id = id;
-      this.bytes = bytes;
-      this.event = event;
-    }
-    
-    public void setId(String id) {
-      this.id = id;
-    }
-    
-    public String getId() {
-      return id;
-    }
-    
-    public void setBytes(byte[] bytes) {
-      this.bytes = bytes;
-    }
-    
-    public byte[] getBytes() {
-      return bytes;
-    }
-    
-    public WatchedEvent getWatchedEvent() {
-      return event;
-    }
-    
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java b/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java
deleted file mode 100644
index 007d221..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrCore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Start recovery of a core if its term is less than leader's term
- */
-public class RecoveringCoreTermWatcher implements ZkShardTerms.CoreTermWatcher {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final CoreDescriptor coreDescriptor;
-  private final CoreContainer coreContainer;
-  // used to prevent the case when term of other replicas get changed, we redo recovery
-  // the idea here is with a specific term of a replica, we only do recovery one
-  private final AtomicLong lastTermDoRecovery;
-
-  RecoveringCoreTermWatcher(CoreDescriptor coreDescriptor, CoreContainer coreContainer) {
-    this.coreDescriptor = coreDescriptor;
-    this.coreContainer = coreContainer;
-    this.lastTermDoRecovery = new AtomicLong(-1);
-  }
-
-  @Override
-  public boolean onTermChanged(ZkShardTerms.Terms terms) {
-    if (coreContainer.isShutDown()) return false;
-
-    try (SolrCore solrCore = coreContainer.getCore(coreDescriptor.getName())) {
-      if (solrCore == null || solrCore.isClosed()) {
-        return false;
-      }
-
-      if (solrCore.getCoreDescriptor() == null || solrCore.getCoreDescriptor().getCloudDescriptor() == null) return true;
-      String coreNodeName = solrCore.getCoreDescriptor().getCloudDescriptor().getCoreNodeName();
-      if (terms.haveHighestTermValue(coreNodeName)) return true;
-      if (lastTermDoRecovery.get() < terms.getTerm(coreNodeName)) {
-        log.info("Start recovery on {} because core's term is less than leader's term", coreNodeName);
-        lastTermDoRecovery.set(terms.getTerm(coreNodeName));
-        solrCore.getUpdateHandler().getSolrCoreState().doRecovery(solrCore.getCoreContainer(), solrCore.getCoreDescriptor());
-      }
-    } catch (Exception e) {
-      log.info("Failed to watch term of core {}", coreDescriptor.getName(), e);
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    RecoveringCoreTermWatcher that = (RecoveringCoreTermWatcher) o;
-
-    return coreDescriptor.getName().equals(that.coreDescriptor.getName());
-  }
-
-  @Override
-  public int hashCode() {
-    return coreDescriptor.getName().hashCode();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
deleted file mode 100644
index 94e126e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
+++ /dev/null
@@ -1,873 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.HttpUriRequestResponse;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.SolrPingResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.ReplicationHandler;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.CommitUpdateCommand;
-import org.apache.solr.update.PeerSyncWithLeader;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.update.UpdateLog.RecoveryInfo;
-import org.apache.solr.update.processor.DistributedUpdateProcessor;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.SolrPluginUtils;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class may change in future and customisations are not supported
- * between versions in terms of API or back compat behaviour.
- * @lucene.experimental
- */
-public class RecoveryStrategy implements Runnable, Closeable {
-
-  public static class Builder implements NamedListInitializedPlugin {
-    private NamedList args;
-    @Override
-    public void init(NamedList args) {
-      this.args = args;
-    }
-    // this should only be used from SolrCoreState
-    public RecoveryStrategy create(CoreContainer cc, CoreDescriptor cd,
-        RecoveryStrategy.RecoveryListener recoveryListener) {
-      final RecoveryStrategy recoveryStrategy = newRecoveryStrategy(cc, cd, recoveryListener);
-      SolrPluginUtils.invokeSetters(recoveryStrategy, args);
-      return recoveryStrategy;
-    }
-    protected RecoveryStrategy newRecoveryStrategy(CoreContainer cc, CoreDescriptor cd,
-        RecoveryStrategy.RecoveryListener recoveryListener) {
-      return new RecoveryStrategy(cc, cd, recoveryListener);
-    }
-  }
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private int waitForUpdatesWithStaleStatePauseMilliSeconds = Integer.getInteger("solr.cloud.wait-for-updates-with-stale-state-pause", 2500);
-  private int maxRetries = 500;
-  private int startingRecoveryDelayMilliSeconds = 5000;
-
-  public static interface RecoveryListener {
-    public void recovered();
-    public void failed();
-  }
-  
-  private volatile boolean close = false;
-
-  private RecoveryListener recoveryListener;
-  private ZkController zkController;
-  private String baseUrl;
-  private String coreZkNodeName;
-  private ZkStateReader zkStateReader;
-  private volatile String coreName;
-  private int retries;
-  private boolean recoveringAfterStartup;
-  private CoreContainer cc;
-  private volatile HttpUriRequest prevSendPreRecoveryHttpUriRequest;
-  private final Replica.Type replicaType;
-
-  protected RecoveryStrategy(CoreContainer cc, CoreDescriptor cd, RecoveryListener recoveryListener) {
-    this.cc = cc;
-    this.coreName = cd.getName();
-    this.recoveryListener = recoveryListener;
-    zkController = cc.getZkController();
-    zkStateReader = zkController.getZkStateReader();
-    baseUrl = zkController.getBaseUrl();
-    coreZkNodeName = cd.getCloudDescriptor().getCoreNodeName();
-    replicaType = cd.getCloudDescriptor().getReplicaType();
-  }
-
-  final public int getWaitForUpdatesWithStaleStatePauseMilliSeconds() {
-    return waitForUpdatesWithStaleStatePauseMilliSeconds;
-  }
-
-  final public void setWaitForUpdatesWithStaleStatePauseMilliSeconds(int waitForUpdatesWithStaleStatePauseMilliSeconds) {
-    this.waitForUpdatesWithStaleStatePauseMilliSeconds = waitForUpdatesWithStaleStatePauseMilliSeconds;
-  }
-
-  final public int getMaxRetries() {
-    return maxRetries;
-  }
-
-  final public void setMaxRetries(int maxRetries) {
-    this.maxRetries = maxRetries;
-  }
-
-  final public int getStartingRecoveryDelayMilliSeconds() {
-    return startingRecoveryDelayMilliSeconds;
-  }
-
-  final public void setStartingRecoveryDelayMilliSeconds(int startingRecoveryDelayMilliSeconds) {
-    this.startingRecoveryDelayMilliSeconds = startingRecoveryDelayMilliSeconds;
-  }
-
-  final public boolean getRecoveringAfterStartup() {
-    return recoveringAfterStartup;
-  }
-
-  final public void setRecoveringAfterStartup(boolean recoveringAfterStartup) {
-    this.recoveringAfterStartup = recoveringAfterStartup;
-  }
-
-  // make sure any threads stop retrying
-  @Override
-  final public void close() {
-    close = true;
-    if (prevSendPreRecoveryHttpUriRequest != null) {
-      prevSendPreRecoveryHttpUriRequest.abort();
-    }
-    log.warn("Stopping recovery for core=[{}] coreNodeName=[{}]", coreName, coreZkNodeName);
-  }
-
-  final private void recoveryFailed(final SolrCore core,
-      final ZkController zkController, final String baseUrl,
-      final String shardZkNodeName, final CoreDescriptor cd) throws Exception {
-    SolrException.log(log, "Recovery failed - I give up.");
-    try {
-      zkController.publish(cd, Replica.State.RECOVERY_FAILED);
-    } finally {
-      close();
-      recoveryListener.failed();
-    }
-  }
-  
-  /**
-   * This method may change in future and customisations are not supported
-   * between versions in terms of API or back compat behaviour.
-   * @lucene.experimental
-   */
-  protected String getReplicateLeaderUrl(ZkNodeProps leaderprops) {
-    return new ZkCoreNodeProps(leaderprops).getCoreUrl();
-  }
-
-  final private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops)
-      throws SolrServerException, IOException {
-
-    final String leaderUrl = getReplicateLeaderUrl(leaderprops);
-    
-    log.info("Attempting to replicate from [{}].", leaderUrl);
-    
-    // send commit
-    commitOnLeader(leaderUrl);
-    
-    // use rep handler directly, so we can do this sync rather than async
-    SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH);
-    ReplicationHandler replicationHandler = (ReplicationHandler) handler;
-    
-    if (replicationHandler == null) {
-      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
-          "Skipping recovery, no " + ReplicationHandler.PATH + " handler found");
-    }
-    
-    ModifiableSolrParams solrParams = new ModifiableSolrParams();
-    solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
-    solrParams.set(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, replicaType == Replica.Type.TLOG);
-    // always download the tlogs from the leader when running with cdcr enabled. We need to have all the tlogs
-    // to ensure leader failover doesn't cause missing docs on the target
-    if (core.getUpdateHandler().getUpdateLog() != null && core.getUpdateHandler().getUpdateLog() instanceof CdcrUpdateLog) {
-      solrParams.set(ReplicationHandler.TLOG_FILES, true);
-    }
-    
-    if (isClosed()) return; // we check closed on return
-    boolean success = replicationHandler.doFetch(solrParams, false).getSuccessful();
-    
-    if (!success) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
-    }
-    
-    // solrcloud_debug
-    if (log.isDebugEnabled()) {
-      try {
-        RefCounted<SolrIndexSearcher> searchHolder = core
-            .getNewestSearcher(false);
-        SolrIndexSearcher searcher = searchHolder.get();
-        Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
-        try {
-          log.debug(core.getCoreContainer()
-              .getZkController().getNodeName()
-              + " replicated "
-              + searcher.count(new MatchAllDocsQuery())
-              + " from "
-              + leaderUrl
-              + " gen:"
-              + (core.getDeletionPolicy().getLatestCommit() != null ? "null" : core.getDeletionPolicy().getLatestCommit().getGeneration())
-              + " data:" + core.getDataDir()
-              + " index:" + core.getIndexDir()
-              + " newIndex:" + core.getNewIndexDir()
-              + " files:" + Arrays.asList(dir.listAll()));
-        } finally {
-          core.getDirectoryFactory().release(dir);
-          searchHolder.decref();
-        }
-      } catch (Exception e) {
-        log.debug("Error in solrcloud_debug block", e);
-      }
-    }
-
-  }
-
-  final private void commitOnLeader(String leaderUrl) throws SolrServerException,
-      IOException {
-    try (HttpSolrClient client = new HttpSolrClient.Builder(leaderUrl)
-        .withConnectionTimeout(30000)
-        .build()) {
-      UpdateRequest ureq = new UpdateRequest();
-      ureq.setParams(new ModifiableSolrParams());
-      ureq.getParams().set(DistributedUpdateProcessor.COMMIT_END_POINT, true);
-//      ureq.getParams().set(UpdateParams.OPEN_SEARCHER, onlyLeaderIndexes);// Why do we need to open searcher if "onlyLeaderIndexes"?
-      ureq.getParams().set(UpdateParams.OPEN_SEARCHER, false);
-      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true).process(
-          client);
-    }
-  }
-
-  @Override
-  final public void run() {
-
-    // set request info for logging
-    try (SolrCore core = cc.getCore(coreName)) {
-
-      if (core == null) {
-        SolrException.log(log, "SolrCore not found - cannot recover:" + coreName);
-        return;
-      }
-      MDCLoggingContext.setCore(core);
-
-      log.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
-
-      try {
-        doRecovery(core);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        SolrException.log(log, "", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-      } catch (Exception e) {
-        log.error("", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-      }
-    } finally {
-      MDCLoggingContext.clear();
-    }
-  }
-  
-  final public void doRecovery(SolrCore core) throws Exception {
-    if (core.getCoreDescriptor().getCloudDescriptor().requiresTransactionLog()) {
-      doSyncOrReplicateRecovery(core);
-    } else {
-      doReplicateOnlyRecovery(core);
-    }
-  }
-
-  final private void doReplicateOnlyRecovery(SolrCore core) throws InterruptedException {
-    boolean successfulRecovery = false;
-
-//  if (core.getUpdateHandler().getUpdateLog() != null) {
-//    SolrException.log(log, "'replicate-only' recovery strategy should only be used if no update logs are present, but this core has one: "
-//        + core.getUpdateHandler().getUpdateLog());
-//    return;
-//  }
-    while (!successfulRecovery && !Thread.currentThread().isInterrupted() && !isClosed()) { // don't use interruption or it will close channels though
-      try {
-        CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
-        ZkNodeProps leaderprops = zkStateReader.getLeaderRetry(
-            cloudDesc.getCollectionName(), cloudDesc.getShardId());
-        final String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
-        final String leaderCoreName = leaderprops.getStr(ZkStateReader.CORE_NAME_PROP);
-
-        String leaderUrl = ZkCoreNodeProps.getCoreUrl(leaderBaseUrl, leaderCoreName);
-
-        String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
-
-        boolean isLeader = leaderUrl.equals(ourUrl); //TODO: We can probably delete most of this code if we say this strategy can only be used for pull replicas
-        if (isLeader && !cloudDesc.isLeader()) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Cloud state still says we are leader.");
-        }
-        if (cloudDesc.isLeader()) {
-          assert cloudDesc.getReplicaType() != Replica.Type.PULL;
-          // we are now the leader - no one else must have been suitable
-          log.warn("We have not yet recovered - but we are now the leader!");
-          log.info("Finished recovery process.");
-          zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
-          return;
-        }
-
-
-        log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
-            ourUrl);
-        zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
-
-        if (isClosed()) {
-          log.info("Recovery for core {} has been closed", core.getName());
-          break;
-        }
-        log.info("Starting Replication Recovery.");
-
-        try {
-          log.info("Stopping background replicate from leader process");
-          zkController.stopReplicationFromLeader(coreName);
-          replicate(zkController.getNodeName(), core, leaderprops);
-
-          if (isClosed()) {
-            log.info("Recovery for core {} has been closed", core.getName());
-            break;
-          }
-
-          log.info("Replication Recovery was successful.");
-          successfulRecovery = true;
-        } catch (Exception e) {
-          SolrException.log(log, "Error while trying to recover", e);
-        }
-
-      } catch (Exception e) {
-        SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
-      } finally {
-        if (successfulRecovery) {
-          log.info("Restaring background replicate from leader process");
-          zkController.startReplicationFromLeader(coreName, false);
-          log.info("Registering as Active after recovery.");
-          try {
-            zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
-          } catch (Exception e) {
-            log.error("Could not publish as ACTIVE after succesful recovery", e);
-            successfulRecovery = false;
-          }
-
-          if (successfulRecovery) {
-            close = true;
-            recoveryListener.recovered();
-          }
-        }
-      }
-
-      if (!successfulRecovery) {
-        // lets pause for a moment and we need to try again...
-        // TODO: we don't want to retry for some problems?
-        // Or do a fall off retry...
-        try {
-
-          if (isClosed()) {
-            log.info("Recovery for core {} has been closed", core.getName());
-            break;
-          }
-
-          log.error("Recovery failed - trying again... (" + retries + ")");
-
-          retries++;
-          if (retries >= maxRetries) {
-            SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
-            try {
-              recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
-            } catch (Exception e) {
-              SolrException.log(log, "Could not publish that recovery failed", e);
-            }
-            break;
-          }
-        } catch (Exception e) {
-          SolrException.log(log, "An error has occurred during recovery", e);
-        }
-
-        try {
-          // Wait an exponential interval between retries, start at 5 seconds and work up to a minute.
-          // If we're at attempt >= 4, there's no point computing pow(2, retries) because the result
-          // will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
-          // order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
-          int loopCount = retries < 4 ? (int) Math.min(Math.pow(2, retries), 12) : 12;
-          log.info("Wait [{}] seconds before trying to recover again (attempt={})",
-              TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
-          for (int i = 0; i < loopCount; i++) {
-            if (isClosed()) {
-              log.info("Recovery for core {} has been closed", core.getName());
-              break; // check if someone closed us
-            }
-            Thread.sleep(startingRecoveryDelayMilliSeconds);
-          }
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          log.warn("Recovery was interrupted.", e);
-          close = true;
-        }
-      }
-
-    }
-    // We skip core.seedVersionBuckets(); We don't have a transaction log
-    log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
-  }
-
-  // TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
-  public final void doSyncOrReplicateRecovery(SolrCore core) throws Exception {
-    boolean successfulRecovery = false;
-
-    UpdateLog ulog;
-    ulog = core.getUpdateHandler().getUpdateLog();
-    if (ulog == null) {
-      SolrException.log(log, "No UpdateLog found - cannot recover.");
-      recoveryFailed(core, zkController, baseUrl, coreZkNodeName,
-          core.getCoreDescriptor());
-      return;
-    }
-
-    // we temporary ignore peersync for tlog replicas
-    boolean firstTime = replicaType != Replica.Type.TLOG;
-
-    List<Long> recentVersions;
-    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-      recentVersions = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
-    } catch (Exception e) {
-      SolrException.log(log, "Corrupt tlog - ignoring.", e);
-      recentVersions = new ArrayList<>(0);
-    }
-
-    List<Long> startingVersions = ulog.getStartingVersions();
-
-    if (startingVersions != null && recoveringAfterStartup) {
-      try {
-        int oldIdx = 0; // index of the start of the old list in the current list
-        long firstStartingVersion = startingVersions.size() > 0 ? startingVersions.get(0) : 0;
-        
-        for (; oldIdx < recentVersions.size(); oldIdx++) {
-          if (recentVersions.get(oldIdx) == firstStartingVersion) break;
-        }
-        
-        if (oldIdx > 0) {
-          log.info("Found new versions added after startup: num=[{}]", oldIdx);
-          log.info("currentVersions size={} range=[{} to {}]", recentVersions.size(), recentVersions.get(0), recentVersions.get(recentVersions.size()-1));
-        }
-
-        if (startingVersions.isEmpty()) {
-          log.info("startupVersions is empty");
-        } else {
-          log.info("startupVersions size={} range=[{} to {}]", startingVersions.size(), startingVersions.get(0), startingVersions.get(startingVersions.size()-1));
-        }
-      } catch (Exception e) {
-        SolrException.log(log, "Error getting recent versions.", e);
-        recentVersions = new ArrayList<>(0);
-      }
-    }
-
-    if (recoveringAfterStartup) {
-      // if we're recovering after startup (i.e. we have been down), then we need to know what the last versions were
-      // when we went down.  We may have received updates since then.
-      recentVersions = startingVersions;
-      try {
-        if (ulog.existOldBufferLog()) {
-          // this means we were previously doing a full index replication
-          // that probably didn't complete and buffering updates in the
-          // meantime.
-          log.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
-          firstTime = false; // skip peersync
-        }
-      } catch (Exception e) {
-        SolrException.log(log, "Error trying to get ulog starting operation.", e);
-        firstTime = false; // skip peersync
-      }
-    }
-
-    if (replicaType == Replica.Type.TLOG) {
-      zkController.stopReplicationFromLeader(coreName);
-    }
-
-    final String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
-    Future<RecoveryInfo> replayFuture = null;
-    while (!successfulRecovery && !Thread.currentThread().isInterrupted() && !isClosed()) { // don't use interruption or it will close channels though
-      try {
-        CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
-        final Replica leader = pingLeader(ourUrl, core.getCoreDescriptor(), true);
-        if (isClosed()) {
-          log.info("RecoveryStrategy has been closed");
-          break;
-        }
-
-        boolean isLeader = leader.getCoreUrl().equals(ourUrl);
-        if (isLeader && !cloudDesc.isLeader()) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Cloud state still says we are leader.");
-        }
-        if (cloudDesc.isLeader()) {
-          // we are now the leader - no one else must have been suitable
-          log.warn("We have not yet recovered - but we are now the leader!");
-          log.info("Finished recovery process.");
-          zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
-          return;
-        }
-
-        log.info("Begin buffering updates. core=[{}]", coreName);
-        // recalling buffer updates will drop the old buffer tlog
-        ulog.bufferUpdates();
-
-        log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leader.getCoreUrl(),
-            ourUrl);
-        zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
-        
-        
-        final Slice slice = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName())
-            .getSlice(cloudDesc.getShardId());
-            
-        try {
-          prevSendPreRecoveryHttpUriRequest.abort();
-        } catch (NullPointerException e) {
-          // okay
-        }
-        
-        if (isClosed()) {
-          log.info("RecoveryStrategy has been closed");
-          break;
-        }
-
-        sendPrepRecoveryCmd(leader.getBaseUrl(), leader.getCoreName(), slice);
-        
-        if (isClosed()) {
-          log.info("RecoveryStrategy has been closed");
-          break;
-        }
-        
-        // we wait a bit so that any updates on the leader
-        // that started before they saw recovering state 
-        // are sure to have finished (see SOLR-7141 for
-        // discussion around current value)
-        //TODO since SOLR-11216, we probably won't need this
-        try {
-          Thread.sleep(waitForUpdatesWithStaleStatePauseMilliSeconds);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-
-        // first thing we just try to sync
-        if (firstTime) {
-          firstTime = false; // only try sync the first time through the loop
-          log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(), recoveringAfterStartup);
-          // System.out.println("Attempting to PeerSync from " + leaderUrl
-          // + " i am:" + zkController.getNodeName());
-          PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
-              leader.getCoreUrl(), ulog.getNumRecordsToKeep());
-          boolean syncSuccess = peerSyncWithLeader.sync(recentVersions).isSuccess();
-          if (syncSuccess) {
-            SolrQueryRequest req = new LocalSolrQueryRequest(core,
-                new ModifiableSolrParams());
-            // force open a new searcher
-            core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
-            req.close();
-            log.info("PeerSync stage of recovery was successful.");
-
-            // solrcloud_debug
-            cloudDebugLog(core, "synced");
-            
-            log.info("Replaying updates buffered during PeerSync.");
-            replay(core);
-
-            // sync success
-            successfulRecovery = true;
-            return;
-          }
-
-          log.info("PeerSync Recovery was not successful - trying replication.");
-        }
-
-        if (isClosed()) {
-          log.info("RecoveryStrategy has been closed");
-          break;
-        }
-        
-        log.info("Starting Replication Recovery.");
-
-        try {
-
-          replicate(zkController.getNodeName(), core, leader);
-
-          if (isClosed()) {
-            log.info("RecoveryStrategy has been closed");
-            break;
-          }
-
-          replayFuture = replay(core);
-
-          if (isClosed()) {
-            log.info("RecoveryStrategy has been closed");
-            break;
-          }
-
-          log.info("Replication Recovery was successful.");
-          successfulRecovery = true;
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          log.warn("Recovery was interrupted", e);
-          close = true;
-        } catch (Exception e) {
-          SolrException.log(log, "Error while trying to recover", e);
-        }
-
-      } catch (Exception e) {
-        SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
-      } finally {
-        if (successfulRecovery) {
-          log.info("Registering as Active after recovery.");
-          try {
-            if (replicaType == Replica.Type.TLOG) {
-              zkController.startReplicationFromLeader(coreName, true);
-            }
-            zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
-          } catch (Exception e) {
-            log.error("Could not publish as ACTIVE after succesful recovery", e);
-            successfulRecovery = false;
-          }
-          
-          if (successfulRecovery) {
-            close = true;
-            recoveryListener.recovered();
-          }
-        }
-      }
-
-      if (!successfulRecovery) {
-        // lets pause for a moment and we need to try again...
-        // TODO: we don't want to retry for some problems?
-        // Or do a fall off retry...
-        try {
-
-          if (isClosed()) {
-            log.info("RecoveryStrategy has been closed");
-            break;
-          }
-          
-          log.error("Recovery failed - trying again... (" + retries + ")");
-          
-          retries++;
-          if (retries >= maxRetries) {
-            SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
-            try {
-              recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
-            } catch (Exception e) {
-              SolrException.log(log, "Could not publish that recovery failed", e);
-            }
-            break;
-          }
-        } catch (Exception e) {
-          SolrException.log(log, "An error has occurred during recovery", e);
-        }
-
-        try {
-          // Wait an exponential interval between retries, start at 5 seconds and work up to a minute.
-          // If we're at attempt >= 4, there's no point computing pow(2, retries) because the result 
-          // will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
-          // order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
-          double loopCount = retries < 4 ? Math.min(Math.pow(2, retries), 12) : 12;
-          log.info("Wait [{}] seconds before trying to recover again (attempt={})", loopCount, retries);
-          for (int i = 0; i < loopCount; i++) {
-            if (isClosed()) {
-              log.info("RecoveryStrategy has been closed");
-              break; // check if someone closed us
-            }
-            Thread.sleep(startingRecoveryDelayMilliSeconds);
-          }
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          log.warn("Recovery was interrupted.", e);
-          close = true;
-        }
-      }
-
-    }
-
-    // if replay was skipped (possibly to due pulling a full index from the leader),
-    // then we still need to update version bucket seeds after recovery
-    if (successfulRecovery && replayFuture == null) {
-      log.info("Updating version bucket highest from index after successful recovery.");
-      core.seedVersionBuckets();
-    }
-
-    log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
-  }
-
-  private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown) throws Exception {
-    int numTried = 0;
-    while (true) {
-      CloudDescriptor cloudDesc = coreDesc.getCloudDescriptor();
-      DocCollection docCollection = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName());
-      if (!isClosed() && mayPutReplicaAsDown && numTried == 1 &&
-          docCollection.getReplica(coreDesc.getCloudDescriptor().getCoreNodeName()).getState() == Replica.State.ACTIVE) {
-        // this operation may take a long time, by putting replica into DOWN state, client won't query this replica
-        zkController.publish(coreDesc, Replica.State.DOWN);
-      }
-      numTried++;
-      Replica leaderReplica = null;
-
-      if (isClosed()) {
-        return leaderReplica;
-      }
-
-      try {
-        leaderReplica = zkStateReader.getLeaderRetry(
-            cloudDesc.getCollectionName(), cloudDesc.getShardId());
-      } catch (SolrException e) {
-        Thread.sleep(500);
-        continue;
-      }
-
-      if (leaderReplica.getCoreUrl().equals(ourUrl)) {
-        return leaderReplica;
-      }
-
-      try (HttpSolrClient httpSolrClient = new HttpSolrClient.Builder(leaderReplica.getCoreUrl())
-          .withSocketTimeout(1000)
-          .withConnectionTimeout(1000)
-          .build()) {
-        SolrPingResponse resp = httpSolrClient.ping();
-        return leaderReplica;
-      } catch (IOException e) {
-        log.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
-        Thread.sleep(500);
-      } catch (Exception e) {
-        if (e.getCause() instanceof IOException) {
-          log.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
-          Thread.sleep(500);
-        } else {
-          return leaderReplica;
-        }
-      }
-    }
-  }
-
-  public static Runnable testing_beforeReplayBufferingUpdates;
-
-  final private Future<RecoveryInfo> replay(SolrCore core)
-      throws InterruptedException, ExecutionException {
-    if (testing_beforeReplayBufferingUpdates != null) {
-      testing_beforeReplayBufferingUpdates.run();
-    }
-    if (replicaType == Replica.Type.TLOG) {
-      // roll over all updates during buffering to new tlog, make RTG available
-      SolrQueryRequest req = new LocalSolrQueryRequest(core,
-          new ModifiableSolrParams());
-      core.getUpdateHandler().getUpdateLog().copyOverBufferingUpdates(new CommitUpdateCommand(req, false));
-      req.close();
-      return null;
-    }
-    Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
-    if (future == null) {
-      // no replay needed\
-      log.info("No replay needed.");
-    } else {
-      log.info("Replaying buffered documents.");
-      // wait for replay
-      RecoveryInfo report = future.get();
-      if (report.failed) {
-        SolrException.log(log, "Replay failed");
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Replay failed");
-      }
-    }
-
-    // the index may ahead of the tlog's caches after recovery, by calling this tlog's caches will be purged
-    core.getUpdateHandler().getUpdateLog().openRealtimeSearcher();
-    
-    // solrcloud_debug
-    cloudDebugLog(core, "replayed");
-    
-    return future;
-  }
-  
-  final private void cloudDebugLog(SolrCore core, String op) {
-    if (!log.isDebugEnabled()) {
-      return;
-    }
-    try {
-      RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
-      SolrIndexSearcher searcher = searchHolder.get();
-      try {
-        final int totalHits = searcher.count(new MatchAllDocsQuery());
-        final String nodeName = core.getCoreContainer().getZkController().getNodeName();
-        log.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
-      } finally {
-        searchHolder.decref();
-      }
-    } catch (Exception e) {
-      log.debug("Error in solrcloud_debug block", e);
-    }
-  }
-
-  final public boolean isClosed() {
-    return close;
-  }
-  
-  final private void sendPrepRecoveryCmd(String leaderBaseUrl, String leaderCoreName, Slice slice)
-      throws SolrServerException, IOException, InterruptedException, ExecutionException {
-
-    WaitForState prepCmd = new WaitForState();
-    prepCmd.setCoreName(leaderCoreName);
-    prepCmd.setNodeName(zkController.getNodeName());
-    prepCmd.setCoreNodeName(coreZkNodeName);
-    prepCmd.setState(Replica.State.RECOVERING);
-    prepCmd.setCheckLive(true);
-    prepCmd.setOnlyIfLeader(true);
-    final Slice.State state = slice.getState();
-    if (state != Slice.State.CONSTRUCTION && state != Slice.State.RECOVERY && state != Slice.State.RECOVERY_FAILED) {
-      prepCmd.setOnlyIfLeaderActive(true);
-    }
-
-    int conflictWaitMs = zkController.getLeaderConflictResolveWait();
-    // timeout after 5 seconds more than the max timeout (conflictWait + 3 seconds) on the server side
-    int readTimeout = conflictWaitMs + 8000;
-    try (HttpSolrClient client = new HttpSolrClient.Builder(leaderBaseUrl).build()) {
-      client.setConnectionTimeout(10000);
-      client.setSoTimeout(readTimeout);
-      HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);
-      prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest;
-
-      log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString());
-
-      mrr.future.get();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
deleted file mode 100644
index 5fb0946..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.lucene.index.IndexCommit;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrConfig;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.IndexFetcher;
-import org.apache.solr.handler.ReplicationHandler;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.update.CommitUpdateCommand;
-import org.apache.solr.update.SolrIndexWriter;
-import org.apache.solr.update.UpdateLog;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ReplicateFromLeader {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private CoreContainer cc;
-  private String coreName;
-
-  private ReplicationHandler replicationProcess;
-  private long lastVersion = 0;
-
-  public ReplicateFromLeader(CoreContainer cc, String coreName) {
-    this.cc = cc;
-    this.coreName = coreName;
-  }
-
-  /**
-   * Start a replication handler thread that will periodically pull indices from the shard leader
-   * @param switchTransactionLog if true, ReplicationHandler will rotate the transaction log once
-   * the replication is done
-   */
-  public void startReplication(boolean switchTransactionLog) throws InterruptedException {
-    try (SolrCore core = cc.getCore(coreName)) {
-      if (core == null) {
-        if (cc.isShutDown()) {
-          return;
-        } else {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
-        }
-      }
-      SolrConfig.UpdateHandlerInfo uinfo = core.getSolrConfig().getUpdateHandlerInfo();
-      String pollIntervalStr = "00:00:03";
-      if (uinfo.autoCommmitMaxTime != -1) {
-        pollIntervalStr = toPollIntervalStr(uinfo.autoCommmitMaxTime/2);
-      } else if (uinfo.autoSoftCommmitMaxTime != -1) {
-        pollIntervalStr = toPollIntervalStr(uinfo.autoSoftCommmitMaxTime/2);
-      }
-      log.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
-
-      NamedList<Object> slaveConfig = new NamedList<>();
-      slaveConfig.add("fetchFromLeader", Boolean.TRUE);
-      slaveConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog);
-      slaveConfig.add("pollInterval", pollIntervalStr);
-      NamedList<Object> replicationConfig = new NamedList<>();
-      replicationConfig.add("slave", slaveConfig);
-
-      String lastCommitVersion = getCommitVersion(core);
-      if (lastCommitVersion != null) {
-        lastVersion = Long.parseLong(lastCommitVersion);
-      }
-
-      replicationProcess = new ReplicationHandler();
-      if (switchTransactionLog) {
-        replicationProcess.setPollListener((solrCore, fetchResult) -> {
-          if (fetchResult == IndexFetcher.IndexFetchResult.INDEX_FETCH_SUCCESS) {
-            String commitVersion = getCommitVersion(core);
-            if (commitVersion == null) return;
-            if (Long.parseLong(commitVersion) == lastVersion) return;
-            UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
-            SolrQueryRequest req = new LocalSolrQueryRequest(core,
-                new ModifiableSolrParams());
-            CommitUpdateCommand cuc = new CommitUpdateCommand(req, false);
-            cuc.setVersion(Long.parseLong(commitVersion));
-            updateLog.commitAndSwitchToNewTlog(cuc);
-            lastVersion = Long.parseLong(commitVersion);
-          }
-        });
-      }
-      replicationProcess.init(replicationConfig);
-      replicationProcess.inform(core);
-    }
-  }
-
-  public static String getCommitVersion(SolrCore solrCore) {
-    IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
-    try {
-      String commitVersion = commit.getUserData().get(SolrIndexWriter.COMMIT_COMMAND_VERSION);
-      if (commitVersion == null) return null;
-      else return commitVersion;
-    } catch (Exception e) {
-      log.warn("Cannot get commit command version from index commit point ",e);
-      return null;
-    }
-  }
-
-  private static String toPollIntervalStr(int ms) {
-    int sec = ms/1000;
-    int hour = sec / 3600;
-    sec = sec % 3600;
-    int min = sec / 60;
-    sec = sec % 60;
-    return hour + ":" + min + ":" + sec;
-  }
-
-  public void stopReplication() {
-    if (replicationProcess != null) {
-      replicationProcess.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/SizeLimitedDistributedMap.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SizeLimitedDistributedMap.java b/solr/core/src/java/org/apache/solr/cloud/SizeLimitedDistributedMap.java
deleted file mode 100644
index 0cb6cbe..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/SizeLimitedDistributedMap.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.List;
-import org.apache.lucene.util.PriorityQueue;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * A size limited distributed map maintained in zk.
- * Oldest znodes (as per modification time) are evicted as newer ones come in.
- *
- * When the map hits the specified maximum size, the oldest <code>maxSize / 10</code> items
- * are evicted on the next {@link #put(String, byte[])} invocation.
- */
-public class SizeLimitedDistributedMap extends DistributedMap {
-
-  private final int maxSize;
-
-  /**
-   * This observer will be called when this map overflows, and deletes the excess of elements
-   */
-  private final OnOverflowObserver onOverflowObserver;
-
-  public SizeLimitedDistributedMap(SolrZkClient zookeeper, String dir, int maxSize) {
-    this(zookeeper, dir, maxSize, null);
-  }
-  
-  public SizeLimitedDistributedMap(SolrZkClient zookeeper, String dir, int maxSize, OnOverflowObserver onOverflowObserver) {
-    super(zookeeper, dir);
-    this.maxSize = maxSize;
-    this.onOverflowObserver = onOverflowObserver;
-  }
-
-  @Override
-  public void put(String trackingId, byte[] data) throws KeeperException, InterruptedException {
-    if (this.size() >= maxSize) {
-      // Bring down the size
-      List<String> children = zookeeper.getChildren(dir, null, true);
-
-      int cleanupSize = maxSize / 10;
-
-      final PriorityQueue<Long> priorityQueue = new PriorityQueue<Long>(cleanupSize) {
-        @Override
-        protected boolean lessThan(Long a, Long b) {
-          return (a > b);
-        }
-      };
-
-      for (String child : children) {
-        Stat stat = zookeeper.exists(dir + "/" + child, null, true);
-        priorityQueue.insertWithOverflow(stat.getMzxid());
-      }
-
-      long topElementMzxId = priorityQueue.top();
-
-      for (String child : children) {
-        Stat stat = zookeeper.exists(dir + "/" + child, null, true);
-        if (stat.getMzxid() <= topElementMzxId) {
-          zookeeper.delete(dir + "/" + child, -1, true);
-          if (onOverflowObserver != null) onOverflowObserver.onChildDelete(child.substring(PREFIX.length()));
-        }
-      }
-    }
-
-    super.put(trackingId, data);
-  }
-  
-  interface OnOverflowObserver {
-    void onChildDelete(String child) throws KeeperException, InterruptedException;
-  }
-}


[15/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
deleted file mode 100644
index 276ab94..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ /dev/null
@@ -1,1383 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.api.Api;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerSolrResponse;
-import org.apache.solr.cloud.OverseerTaskQueue;
-import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.cloud.ZkShardTerms;
-import org.apache.solr.cloud.overseer.SliceMutator;
-import org.apache.solr.cloud.rule.ReplicaAssigner;
-import org.apache.solr.cloud.rule.Rule;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterProperties;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionProperties;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkCmdExecutor;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CloudConfig;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.client.solrj.response.RequestStatusState.COMPLETED;
-import static org.apache.solr.client.solrj.response.RequestStatusState.FAILED;
-import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND;
-import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING;
-import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED;
-import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
-import static org.apache.solr.cloud.api.collections.TimeRoutedAlias.CREATE_COLLECTION_PREFIX;
-import static org.apache.solr.cloud.api.collections.TimeRoutedAlias.OPTIONAL_ROUTER_PARAMS;
-import static org.apache.solr.cloud.api.collections.TimeRoutedAlias.REQUIRED_ROUTER_PARAMS;
-import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
-import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
-import static org.apache.solr.common.cloud.DocCollection.RULE;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
-import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
-import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
-import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_NAME;
-import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_VALUE;
-import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
-import static org.apache.solr.common.params.CommonAdminParams.SPLIT_METHOD;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.CommonParams.TIMING;
-import static org.apache.solr.common.params.CommonParams.VALUE_LONG;
-import static org.apache.solr.common.params.CoreAdminParams.DATA_DIR;
-import static org.apache.solr.common.params.CoreAdminParams.DELETE_DATA_DIR;
-import static org.apache.solr.common.params.CoreAdminParams.DELETE_INDEX;
-import static org.apache.solr.common.params.CoreAdminParams.DELETE_INSTANCE_DIR;
-import static org.apache.solr.common.params.CoreAdminParams.DELETE_METRICS_HISTORY;
-import static org.apache.solr.common.params.CoreAdminParams.INSTANCE_DIR;
-import static org.apache.solr.common.params.CoreAdminParams.ULOG_DIR;
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
-import static org.apache.solr.common.util.StrUtils.formatString;
-
-public class CollectionsHandler extends RequestHandlerBase implements PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final CoreContainer coreContainer;
-  private final CollectionHandlerApi v2Handler ;
-
-  public CollectionsHandler() {
-    super();
-    // Unlike most request handlers, CoreContainer initialization
-    // should happen in the constructor...
-    this.coreContainer = null;
-    v2Handler = new CollectionHandlerApi(this);
-  }
-
-
-  /**
-   * Overloaded ctor to inject CoreContainer into the handler.
-   *
-   * @param coreContainer Core Container of the solr webapp installed.
-   */
-  public CollectionsHandler(final CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-    v2Handler = new CollectionHandlerApi(this);
-  }
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext ctx) {
-    String action = ctx.getParams().get("action");
-    if (action == null) return PermissionNameProvider.Name.COLL_READ_PERM;
-    CollectionParams.CollectionAction collectionAction = CollectionParams.CollectionAction.get(action);
-    if (collectionAction == null) return null;
-    return collectionAction.isWrite ?
-        PermissionNameProvider.Name.COLL_EDIT_PERM :
-        PermissionNameProvider.Name.COLL_READ_PERM;
-  }
-
-  @Override
-  final public void init(NamedList args) {
-
-  }
-
-  /**
-   * The instance of CoreContainer this handler handles. This should be the CoreContainer instance that created this
-   * handler.
-   *
-   * @return a CoreContainer instance
-   */
-  public CoreContainer getCoreContainer() {
-    return this.coreContainer;
-  }
-
-  protected void copyFromClusterProp(Map<String, Object> props, String prop) throws IOException {
-    if (props.get(prop) != null) return;//if it's already specified , return
-    Object defVal = new ClusterProperties(coreContainer.getZkController().getZkStateReader().getZkClient())
-        .getClusterProperty(ImmutableList.of(CollectionAdminParams.DEFAULTS, CollectionAdminParams.COLLECTION, prop), null);
-    if (defVal != null) props.put(prop, String.valueOf(defVal));
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    // Make sure the cores is enabled
-    CoreContainer cores = getCoreContainer();
-    if (cores == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Core container instance missing");
-    }
-
-    // Make sure that the core is ZKAware
-    if(!cores.isZooKeeperAware()) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "Solr instance is not running in SolrCloud mode.");
-    }
-
-    // Pick the action
-    SolrParams params = req.getParams();
-    String a = params.get(CoreAdminParams.ACTION);
-    if (a != null) {
-      CollectionAction action = CollectionAction.get(a);
-      if (action == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
-      }
-      CollectionOperation operation = CollectionOperation.get(action);
-      log.info("Invoked Collection Action :{} with params {} and sendToOCPQueue={}", action.toLower(), req.getParamString(), operation.sendToOCPQueue);
-      MDCLoggingContext.setCollection(req.getParams().get(COLLECTION));
-      invokeAction(req, rsp, cores, action, operation);
-    } else {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "action is a required param");
-    }
-    rsp.setHttpCaching(false);
-  }
-
-  void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer cores, CollectionAction action, CollectionOperation operation) throws Exception {
-    if (!coreContainer.isZooKeeperAware()) {
-      throw new SolrException(BAD_REQUEST,
-          "Invalid request. collections can be accessed only in SolrCloud mode");
-    }
-    Map<String, Object> props = operation.execute(req, rsp, this);
-    if (props == null) {
-      return;
-    }
-
-    String asyncId = req.getParams().get(ASYNC);
-    if (asyncId != null) {
-      props.put(ASYNC, asyncId);
-    }
-
-    props.put(QUEUE_OPERATION, operation.action.toLower());
-
-    if (operation.sendToOCPQueue) {
-      ZkNodeProps zkProps = new ZkNodeProps(props);
-      SolrResponse overseerResponse = sendToOCPQueue(zkProps, operation.timeOut);
-      rsp.getValues().addAll(overseerResponse.getResponse());
-      Exception exp = overseerResponse.getException();
-      if (exp != null) {
-        rsp.setException(exp);
-      }
-
-      //TODO yuck; shouldn't create-collection at the overseer do this?  (conditionally perhaps)
-      if (action.equals(CollectionAction.CREATE) && asyncId == null) {
-        if (rsp.getException() == null) {
-          waitForActiveCollection(zkProps.getStr(NAME), cores, overseerResponse);
-        }
-      }
-
-    } else {
-      // submits and doesn't wait for anything (no response)
-      Overseer.getStateUpdateQueue(coreContainer.getZkController().getZkClient()).offer(Utils.toJSON(props));
-    }
-
-  }
-
-
-  static final Set<String> KNOWN_ROLES = ImmutableSet.of("overseer");
-
-  /*
-   * In SOLR-11739 we change the way the async IDs are checked to decide if one has
-   * already been used or not. For backward compatibility, we continue to check in the
-   * old way (meaning, in all the queues) for now. This extra check should be removed
-   * in Solr 9
-   */
-  private static final boolean CHECK_ASYNC_ID_BACK_COMPAT_LOCATIONS = true;
-
-  public static long DEFAULT_COLLECTION_OP_TIMEOUT = 180*1000;
-
-  public SolrResponse sendToOCPQueue(ZkNodeProps m) throws KeeperException, InterruptedException {
-    return sendToOCPQueue(m, DEFAULT_COLLECTION_OP_TIMEOUT);
-  }
-
-  public SolrResponse sendToOCPQueue(ZkNodeProps m, long timeout) throws KeeperException, InterruptedException {
-    String operation = m.getStr(QUEUE_OPERATION);
-    if (operation == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "missing key " + QUEUE_OPERATION);
-    }
-    if (m.get(ASYNC) != null) {
-
-       String asyncId = m.getStr(ASYNC);
-
-       if (asyncId.equals("-1")) {
-         throw new SolrException(ErrorCode.BAD_REQUEST, "requestid can not be -1. It is reserved for cleanup purposes.");
-       }
-
-       NamedList<String> r = new NamedList<>();
-
-       if (CHECK_ASYNC_ID_BACK_COMPAT_LOCATIONS && (
-           coreContainer.getZkController().getOverseerCompletedMap().contains(asyncId) ||
-           coreContainer.getZkController().getOverseerFailureMap().contains(asyncId) ||
-           coreContainer.getZkController().getOverseerRunningMap().contains(asyncId) ||
-           overseerCollectionQueueContains(asyncId))) {
-         // for back compatibility, check in the old places. This can be removed in Solr 9
-         r.add("error", "Task with the same requestid already exists.");
-       } else {
-         if (coreContainer.getZkController().claimAsyncId(asyncId)) {
-           boolean success = false;
-           try {
-             coreContainer.getZkController().getOverseerCollectionQueue()
-             .offer(Utils.toJSON(m));
-             success = true;
-           } finally {
-             if (!success) {
-               try {
-                 coreContainer.getZkController().clearAsyncId(asyncId);
-               } catch (Exception e) {
-                 // let the original exception bubble up
-                 log.error("Unable to release async ID={}", asyncId, e);
-                 SolrZkClient.checkInterrupted(e);
-               }
-             }
-           }
-         } else {
-           r.add("error", "Task with the same requestid already exists.");
-         }
-       }
-       r.add(CoreAdminParams.REQUESTID, (String) m.get(ASYNC));
-
-      return new OverseerSolrResponse(r);
-    }
-
-    long time = System.nanoTime();
-    QueueEvent event = coreContainer.getZkController()
-        .getOverseerCollectionQueue()
-        .offer(Utils.toJSON(m), timeout);
-    if (event.getBytes() != null) {
-      return SolrResponse.deserialize(event.getBytes());
-    } else {
-      if (System.nanoTime() - time >= TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, operation
-            + " the collection time out:" + timeout / 1000 + "s");
-      } else if (event.getWatchedEvent() != null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, operation
-            + " the collection error [Watcher fired on path: "
-            + event.getWatchedEvent().getPath() + " state: "
-            + event.getWatchedEvent().getState() + " type "
-            + event.getWatchedEvent().getType() + "]");
-      } else {
-        throw new SolrException(ErrorCode.SERVER_ERROR, operation
-            + " the collection unknown case");
-      }
-    }
-  }
-
-  private boolean overseerCollectionQueueContains(String asyncId) throws KeeperException, InterruptedException {
-    OverseerTaskQueue collectionQueue = coreContainer.getZkController().getOverseerCollectionQueue();
-    return collectionQueue.containsTaskWithRequestId(ASYNC, asyncId);
-  }
-
-  /**
-   * Copy prefixed params into a map.  There must only be one value for these parameters.
-   *
-   * @param params The source of params from which copies should be made
-   * @param props The map into which param names and values should be copied as keys and values respectively
-   * @param prefix The prefix to select.
-   * @return the map supplied in the props parameter, modified to contain the prefixed params.
-   */
-  private static Map<String, Object> copyPropertiesWithPrefix(SolrParams params, Map<String, Object> props, String prefix) {
-    Iterator<String> iter =  params.getParameterNamesIterator();
-    while (iter.hasNext()) {
-      String param = iter.next();
-      if (param.startsWith(prefix)) {
-        final String[] values = params.getParams(param);
-        if (values.length != 1) {
-          throw new SolrException(BAD_REQUEST, "Only one value can be present for parameter " + param);
-        }
-        props.put(param, values[0]);
-      }
-    }
-    return props;
-  }
-
-  public static ModifiableSolrParams params(String... params) {
-    ModifiableSolrParams msp = new ModifiableSolrParams();
-    for (int i = 0; i < params.length; i += 2) {
-      msp.add(params[i], params[i + 1]);
-    }
-    return msp;
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Manage SolrCloud Collections";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
-    SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
-    ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
-
-    try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
-      assert data != null && data.length > 0;
-      cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
-      assert data != null && data.length > 0;
-      cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    }
-
-
-  }
-
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
-    SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
-    status.add("state", state.getKey());
-    status.add("msg", msg);
-    results.add("status", status);
-  }
-
-  public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          MAX_SHARDS_PER_NODE,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          STATE_FORMAT,
-          AUTO_ADD_REPLICAS,
-          RULE,
-          SNITCH,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          POLICY,
-          WAIT_FOR_FINAL_STATE,
-          WITH_COLLECTION);
-
-      props.putIfAbsent(STATE_FORMAT, "2");
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      addMapObject(props, RULE);
-      addMapObject(props, SNITCH);
-      verifyRuleParams(h.coreContainer, props);
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
-
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
-
-    RELOAD_OP(RELOAD, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String collection = req.getParams().required().get("collection");
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      Map<String, Object> result = copy(req.getParams(), null, REQUIRED_ROUTER_PARAMS);
-      copy(req.getParams(), result, OPTIONAL_ROUTER_PARAMS);
-      if (collections != null) {
-        if (result.size() > 1) { // (NAME should be there, and if it's not we will fail below)
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a time routed alias.");
-        }
-        // regular alias creation...
-        return copy(req.getParams().required(), null, NAME, "collections");
-      }
-
-      // Ok so we are creating a time routed alias from here
-
-      // for validation....
-      copy(req.getParams().required(), null, REQUIRED_ROUTER_PARAMS);
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : req.getParams()) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
-          }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
-
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF );
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
-
-      return result;
-    }),
-
-    DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
-
-    /**
-     * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
-     */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
-    LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> {
-      ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
-      // if someone calls listAliases, lets ensure we return an up to date response
-      zkStateReader.aliasesManager.update();
-      Aliases aliases = zkStateReader.getAliases();
-      if (aliases != null) {
-        // the aliases themselves...
-        rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
-        // Any properties for the above aliases.
-        Map<String,Map<String,String>> meta = new LinkedHashMap<>();
-        for (String alias : aliases.getCollectionAliasListMap().keySet()) {
-          Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias);
-          if (collectionAliasProperties != null) {
-            meta.put(alias, collectionAliasProperties);
-          }
-        }
-        rsp.getValues().add("properties", meta);
-      }
-      return null;
-    }),
-    SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true, (req, rsp, h) -> {
-      String name = req.getParams().required().get(COLLECTION_PROP);
-      // TODO : add support for multiple shards
-      String shard = req.getParams().get(SHARD_ID_PROP);
-      String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
-      String splitKey = req.getParams().get("split.key");
-
-      if (splitKey == null && shard == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
-      }
-      if (splitKey != null && shard != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'shard' or 'split.key' should be specified");
-      }
-      if (splitKey != null && rangesStr != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'ranges' or 'split.key' should be specified");
-      }
-
-      Map<String, Object> map = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          "split.key",
-          CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE,
-          TIMING,
-          SPLIT_METHOD);
-      return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
-    }),
-    DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          DELETE_METRICS_HISTORY);
-      return map;
-    }),
-    FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
-      forceLeaderElection(req, h);
-      return null;
-    }),
-    CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
-      if (!ImplicitDocRouter.NAME.equals(((Map) clusterState.getCollection(req.getParams().get(COLLECTION_PROP)).get(DOC_ROUTER)).get(NAME)))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
-      copy(req.getParams(), map,
-          REPLICATION_FACTOR,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          WAIT_FOR_FINAL_STATE);
-      return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
-    }),
-    DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          DELETE_METRICS_HISTORY,
-              COUNT_PROP, REPLICA_PROP,
-              SHARD_ID_PROP,
-          ONLY_IF_DOWN);
-    }),
-    MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection");
-      return copy(req.getParams(), map, "forward.timeout");
-    }),
-    ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
-      String name = req.getParams().required().get(NAME);
-      String val = req.getParams().get(VALUE_LONG);
-      ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setClusterProperty(name, val);
-      return null;
-    }),
-    COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> {
-      String collection = req.getParams().required().get(NAME);
-      String name = req.getParams().required().get(PROPERTY_NAME);
-      String val = req.getParams().get(PROPERTY_VALUE);
-      CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setCollectionProperty(collection, name, val);
-      return null;
-    }),
-    REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
-      req.getParams().required().check(REQUESTID);
-
-      final CoreContainer coreContainer1 = h.coreContainer;
-      final String requestId = req.getParams().get(REQUESTID);
-      final ZkController zkController = coreContainer1.getZkController();
-
-      final NamedList<Object> results = new NamedList<>();
-      if (zkController.getOverseerCompletedMap().contains(requestId)) {
-        final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
-        rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
-        addStatusToResponse(results, COMPLETED, "found [" + requestId + "] in completed tasks");
-      } else if (zkController.getOverseerFailureMap().contains(requestId)) {
-        final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
-        rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
-        addStatusToResponse(results, FAILED, "found [" + requestId + "] in failed tasks");
-      } else if (zkController.getOverseerRunningMap().contains(requestId)) {
-        addStatusToResponse(results, RUNNING, "found [" + requestId + "] in running tasks");
-      } else if (h.overseerCollectionQueueContains(requestId)) {
-        addStatusToResponse(results, SUBMITTED, "found [" + requestId + "] in submitted tasks");
-      } else {
-        addStatusToResponse(results, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
-      }
-
-      final SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
-    DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
-      @SuppressWarnings("unchecked")
-      @Override
-      public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
-        final CoreContainer coreContainer = h.coreContainer;
-        final String requestId = req.getParams().get(REQUESTID);
-        final ZkController zkController = coreContainer.getZkController();
-        Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
-
-        if (requestId == null && !flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
-        }
-
-        if (requestId != null && flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Both requestid and flush parameters can not be specified together.");
-        }
-
-        if (flush) {
-          Collection<String> completed = zkController.getOverseerCompletedMap().keys();
-          Collection<String> failed = zkController.getOverseerFailureMap().keys();
-          for (String asyncId:completed) {
-            zkController.getOverseerCompletedMap().remove(asyncId);
-            zkController.clearAsyncId(asyncId);
-          }
-          for (String asyncId:failed) {
-            zkController.getOverseerFailureMap().remove(asyncId);
-            zkController.clearAsyncId(asyncId);
-          }
-          rsp.getValues().add("status", "successfully cleared stored collection api responses");
-          return null;
-        } else {
-          // Request to cleanup
-          if (zkController.getOverseerCompletedMap().remove(requestId)) {
-            zkController.clearAsyncId(requestId);
-            rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-          } else if (zkController.getOverseerFailureMap().remove(requestId)) {
-            zkController.clearAsyncId(requestId);
-            rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-          } else {
-            rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
-            // Don't call zkController.clearAsyncId for this, since it could be a running/pending task
-          }
-        }
-        return null;
-      }
-    }),
-    ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          "node",
-          SHARD_ID_PROP,
-          _ROUTE_,
-          CoreAdminParams.NAME,
-          INSTANCE_DIR,
-          DATA_DIR,
-          ULOG_DIR,
-          REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET);
-      return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
-    }),
-    OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> (Map) new LinkedHashMap<>()),
-
-    /**
-     * Handle list collection request.
-     * Do list collection request to zk host
-     */
-    LIST_OP(LIST, (req, rsp, h) -> {
-      NamedList<Object> results = new NamedList<>();
-      Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
-      List<String> collectionList = new ArrayList<>(collections.keySet());
-      results.add("collections", collectionList);
-      SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
-    /**
-     * Handle cluster status request.
-     * Can return status per specific collection/shard or per all collections.
-     */
-    CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
-      Map<String, Object> all = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          _ROUTE_);
-      new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
-          new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
-      return null;
-    }),
-    UTILIZENODE_OP(UTILIZENODE, (req, rsp, h) -> {
-      return copy(req.getParams().required(), null, AutoScalingParams.NODE);
-    }),
-    ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP,
-          PROPERTY_VALUE_PROP);
-      copy(req.getParams(), map, SHARD_UNIQUE);
-      String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(COLL_PROP_PREFIX)) {
-        property = COLL_PROP_PREFIX + property;
-      }
-
-      boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
-
-      // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
-      // in a slice on properties that are known to only be one-per-slice and error out if so.
-      if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
-          SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
-          uniquePerSlice == false) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Overseer replica property command received for property " + property +
-                " with the " + SHARD_UNIQUE +
-                " parameter set to something other than 'true'. No action taken.");
-      }
-      return map;
-    }),
-    DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP);
-      return copy(req.getParams(), map, PROPERTY_PROP);
-    }),
-    BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP);
-      Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
-      String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) {
-        prop = COLL_PROP_PREFIX + prop;
-      }
-
-      if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
-            + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
-            " Property: " + prop + " shardUnique: " + Boolean.toString(shardUnique));
-      }
-
-      return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
-    }),
-    REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
-      new RebalanceLeaders(req, rsp, h).execute();
-      return null;
-    }),
-    MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
-      if (m.isEmpty())  {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
-      }
-      copy(req.getParams().required(), m, COLLECTION_PROP);
-      addMapObject(m, RULE);
-      addMapObject(m, SNITCH);
-      for (String prop : CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES) {
-        if ("".equals(m.get(prop)))  {
-          // set to an empty string is equivalent to removing the property, see SOLR-12507
-          m.put(prop, null);
-        }
-        DocCollection.verifyProp(m, prop);
-      }
-      verifyRuleParams(h.coreContainer, m);
-      if (m.get(REPLICATION_FACTOR) != null) {
-        m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
-      }
-      return m;
-    }),
-    MIGRATESTATEFORMAT_OP(MIGRATESTATEFORMAT, (req, rsp, h) -> copy(req.getParams().required(), null, COLLECTION_PROP)),
-
-    BACKUP_OP(BACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
-
-      String collectionName = req.getParams().get(COLLECTION_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
-
-      // Check if the specified location is valid for this repository.
-      URI uri = repository.createURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
-      }
-
-      String strategy = req.getParams().get(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-      if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
-      }
-
-      Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP, CoreAdminParams.COMMIT_NAME);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
-      return params;
-    }),
-    RESTORE_OP(RESTORE, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
-
-      String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      //We always want to restore into an collection name which doesn't  exist yet.
-      if (clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken.");
-      }
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
-
-      // Check if the specified location is valid for this repository.
-      URI uri = repository.createURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
-      }
-
-      String createNodeArg = req.getParams().get(CREATE_NODE_SET);
-      if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY."
-        );
-      }
-      if (req.getParams().get(NRT_REPLICAS) != null && req.getParams().get(REPLICATION_FACTOR) != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
-      }
-
-      Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      // from CREATE_OP:
-      copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
-          PULL_REPLICAS, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE);
-      copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
-      return params;
-    }),
-    CREATESNAPSHOT_OP(CREATESNAPSHOT, (req, rsp, h) -> {
-      req.getParams().required().check(COLLECTION_PROP, CoreAdminParams.COMMIT_NAME);
-
-      String collectionName = req.getParams().get(COLLECTION_PROP);
-      String commitName = req.getParams().get(CoreAdminParams.COMMIT_NAME);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      SolrZkClient client = h.coreContainer.getZkController().getZkClient();
-      if (SolrSnapshotManager.snapshotExists(client, collectionName, commitName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Snapshot with name '" + commitName + "' already exists for collection '"
-                + collectionName + "', no action taken.");
-      }
-
-      Map<String, Object> params = copy(req.getParams(), null, COLLECTION_PROP, CoreAdminParams.COMMIT_NAME);
-      return params;
-    }),
-    DELETESNAPSHOT_OP(DELETESNAPSHOT, (req, rsp, h) -> {
-      req.getParams().required().check(COLLECTION_PROP, CoreAdminParams.COMMIT_NAME);
-
-      String collectionName = req.getParams().get(COLLECTION_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      Map<String, Object> params = copy(req.getParams(), null, COLLECTION_PROP, CoreAdminParams.COMMIT_NAME);
-      return params;
-    }),
-    LISTSNAPSHOTS_OP(LISTSNAPSHOTS, (req, rsp, h) -> {
-      req.getParams().required().check(COLLECTION_PROP);
-
-      String collectionName = req.getParams().get(COLLECTION_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      NamedList<Object> snapshots = new NamedList<Object>();
-      SolrZkClient client = h.coreContainer.getZkController().getZkClient();
-      Collection<CollectionSnapshotMetaData> m = SolrSnapshotManager.listSnapshots(client, collectionName);
-      for (CollectionSnapshotMetaData meta : m) {
-        snapshots.add(meta.getName(), meta.toNamedList());
-      }
-
-      rsp.add(SolrSnapshotManager.SNAPSHOTS_INFO, snapshots);
-      return null;
-    }),
-    REPLACENODE_OP(REPLACENODE, (req, rsp, h) -> {
-      return copy(req.getParams(), null,
-          "source", //legacy
-          "target",//legacy
-          WAIT_FOR_FINAL_STATE,
-          CollectionParams.SOURCE_NODE,
-          CollectionParams.TARGET_NODE);
-    }),
-    MOVEREPLICA_OP(MOVEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          CollectionParams.FROM_NODE,
-          CollectionParams.SOURCE_NODE,
-          CollectionParams.TARGET_NODE,
-          WAIT_FOR_FINAL_STATE,
-          IN_PLACE_MOVE,
-          "replica",
-          "shard");
-    }),
-    DELETENODE_OP(DELETENODE, (req, rsp, h) -> copy(req.getParams().required(), null, "node"));
-
-    /**
-     * Places all prefixed properties in the sink map (or a new map) using the prefix as the key and a map of
-     * all prefixed properties as the value. The sub-map keys have the prefix removed.
-     *
-     * @param params The solr params from which to extract prefixed properties.
-     * @param sink The map to add the properties too.
-     * @param prefix The prefix to identify properties to be extracted
-     * @return The sink map, or a new map if the sink map was null
-     */
-    private static Map<String, Object> convertPrefixToMap(SolrParams params, Map<String, Object> sink, String prefix) {
-      Map<String,Object> result = new LinkedHashMap<>();
-      Iterator<String> iter =  params.getParameterNamesIterator();
-      while (iter.hasNext()) {
-        String param = iter.next();
-        if (param.startsWith(prefix)) {
-          result.put(param.substring(prefix.length()+1), params.get(param));
-        }
-      }
-      if (sink == null) {
-        sink = new LinkedHashMap<>();
-      }
-      sink.put(prefix, result);
-      return sink;
-    }
-
-    public final CollectionOp fun;
-    CollectionAction action;
-    long timeOut;
-    boolean sendToOCPQueue;
-
-    CollectionOperation(CollectionAction action, CollectionOp fun) {
-      this(action, DEFAULT_COLLECTION_OP_TIMEOUT, true, fun);
-    }
-
-    CollectionOperation(CollectionAction action, long timeOut, boolean sendToOCPQueue, CollectionOp fun) {
-      this.action = action;
-      this.timeOut = timeOut;
-      this.sendToOCPQueue = sendToOCPQueue;
-      this.fun = fun;
-
-    }
-
-    public static CollectionOperation get(CollectionAction action) {
-      for (CollectionOperation op : values()) {
-        if (op.action == action) return op;
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "No such action " + action);
-    }
-
-    @Override
-    public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
-        throws Exception {
-      return fun.execute(req, rsp, h);
-    }
-  }
-
-  private static void forceLeaderElection(SolrQueryRequest req, CollectionsHandler handler) {
-    ZkController zkController = handler.coreContainer.getZkController();
-    ClusterState clusterState = zkController.getClusterState();
-    String collectionName = req.getParams().required().get(COLLECTION_PROP);
-    String sliceId = req.getParams().required().get(SHARD_ID_PROP);
-
-    log.info("Force leader invoked, state: {}", clusterState);
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Slice slice = collection.getSlice(sliceId);
-    if (slice == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "No shard with name " + sliceId + " exists for collection " + collectionName);
-    }
-
-    try (ZkShardTerms zkShardTerms = new ZkShardTerms(collectionName, slice.getName(), zkController.getZkClient())) {
-      // if an active replica is the leader, then all is fine already
-      Replica leader = slice.getLeader();
-      if (leader != null && leader.getState() == State.ACTIVE) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "The shard already has an active leader. Force leader is not applicable. State: " + slice);
-      }
-
-      final Set<String> liveNodes = clusterState.getLiveNodes();
-      List<Replica> liveReplicas = slice.getReplicas().stream()
-          .filter(rep -> liveNodes.contains(rep.getNodeName())).collect(Collectors.toList());
-      boolean shouldIncreaseReplicaTerms = liveReplicas.stream()
-          .noneMatch(rep -> zkShardTerms.registered(rep.getName()) && zkShardTerms.canBecomeLeader(rep.getName()));
-      // we won't increase replica's terms if exist a live replica with term equals to leader
-      if (shouldIncreaseReplicaTerms) {
-        //TODO only increase terms of replicas less out-of-sync
-        liveReplicas.stream()
-            .filter(rep -> zkShardTerms.registered(rep.getName()))
-            .forEach(rep -> zkShardTerms.setTermEqualsToLeader(rep.getName()));
-      }
-
-      // Wait till we have an active leader
-      boolean success = false;
-      for (int i = 0; i < 9; i++) {
-        Thread.sleep(5000);
-        clusterState = handler.coreContainer.getZkController().getClusterState();
-        collection = clusterState.getCollection(collectionName);
-        slice = collection.getSlice(sliceId);
-        if (slice.getLeader() != null && slice.getLeader().getState() == State.ACTIVE) {
-          success = true;
-          break;
-        }
-        log.warn("Force leader attempt {}. Waiting 5 secs for an active leader. State of the slice: {}", (i + 1), slice);
-      }
-
-      if (success) {
-        log.info("Successfully issued FORCELEADER command for collection: {}, shard: {}", collectionName, sliceId);
-      } else {
-        log.info("Couldn't successfully force leader, collection: {}, shard: {}. Cluster state: {}", collectionName, sliceId, clusterState);
-      }
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          "Error executing FORCELEADER operation for collection: " + collectionName + " shard: " + sliceId, e);
-    }
-  }
-
-  public static void waitForActiveCollection(String collectionName, CoreContainer cc, SolrResponse createCollResponse)
-      throws KeeperException, InterruptedException {
-
-    if (createCollResponse.getResponse().get("exception") != null) {
-      // the main called failed, don't wait
-      log.info("Not waiting for active collection due to exception: " + createCollResponse.getResponse().get("exception"));
-      return;
-    }
-    
-    if (createCollResponse.getResponse().get("failure") != null) {
-      // TODO: we should not wait for Replicas we know failed
-    }
-    
-    String replicaNotAlive = null;
-    String replicaState = null;
-    String nodeNotLive = null;
-
-    CloudConfig ccfg = cc.getConfig().getCloudConfig();
-    Integer numRetries = ccfg.getCreateCollectionWaitTimeTillActive(); // this config is actually # seconds, not # tries
-    Boolean checkLeaderOnly = ccfg.isCreateCollectionCheckLeaderActive();
-    log.info("Wait for new collection to be active for at most " + numRetries + " seconds. Check all shard "
-        + (checkLeaderOnly ? "leaders" : "replicas"));
-    ZkStateReader zkStateReader = cc.getZkController().getZkStateReader();
-    for (int i = 0; i < numRetries; i++) {
-      ClusterState clusterState = zkStateReader.getClusterState();
-
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      
-      if (docCollection != null && docCollection.getSlices() != null) {
-        Collection<Slice> shards = docCollection.getSlices();
-        replicaNotAlive = null;
-        for (Slice shard : shards) {
-          Collection<Replica> replicas;
-          if (!checkLeaderOnly) replicas = shard.getReplicas();
-          else {
-            replicas = new ArrayList<Replica>();
-            replicas.add(shard.getLeader());
-          }
-          for (Replica replica : replicas) {
-            String state = replica.getStr(ZkStateReader.STATE_PROP);
-            log.debug("Checking replica status, collection={} replica={} state={}", collectionName,
-                replica.getCoreUrl(), state);
-            if (!clusterState.liveNodesContain(replica.getNodeName())
-                || !state.equals(Replica.State.ACTIVE.toString())) {
-              replicaNotAlive = replica.getCoreUrl();
-              nodeNotLive = replica.getNodeName();
-              replicaState = state;
-              break;
-            }
-          }
-          if (replicaNotAlive != null) break;
-        }
-
-        if (replicaNotAlive == null) return;
-      }
-      Thread.sleep(1000); // thus numRetries is roughly number of seconds
-    }
-    if (nodeNotLive != null && replicaState != null) {
-      log.error("Timed out waiting for new collection's replicas to become ACTIVE "
-              + (replicaState.equals(Replica.State.ACTIVE.toString()) ? "node " + nodeNotLive + " is not live"
-                  : "replica " + replicaNotAlive + " is in state of " + replicaState.toString()) + " with timeout=" + numRetries);
-    } else {
-      log.error("Timed out waiting for new collection's replicas to become ACTIVE with timeout=" + numRetries);
-    }
-  }
-  
-  public static void verifyRuleParams(CoreContainer cc, Map<String, Object> m) {
-    List l = (List) m.get(RULE);
-    if (l != null) {
-      for (Object o : l) {
-        Map map = (Map) o;
-        try {
-          new Rule(map);
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error in rule " + m, e);
-        }
-      }
-    }
-    if (cc != null && cc.isZooKeeperAware())
-      ReplicaAssigner.verifySnitchConf(cc.getZkController().getSolrCloudManager(), (List) m.get(SNITCH));
-  }
-
-  /**
-   * Converts a String of the form a:b,c:d to a Map
-   */
-  private static Map<String, Object> addMapObject(Map<String, Object> props, String key) {
-    Object v = props.get(key);
-    if (v == null) return props;
-    List<String> val = new ArrayList<>();
-    if (v instanceof String[]) {
-      val.addAll(Arrays.asList((String[]) v));
-    } else {
-      val.add(v.toString());
-    }
-    if (val.size() > 0) {
-      ArrayList<Map> l = new ArrayList<>();
-      for (String rule : val) l.add(Rule.parseRule(rule));
-      props.put(key, l);
-    }
-    return props;
-  }
-  
-  private static void verifyShardsParam(String shardsParam) {
-    for (String shard : shardsParam.split(",")) {
-      SolrIdentifierValidator.validateShardName(shard);
-    }
-  }
-
-  interface CollectionOp {
-    Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception;
-
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return v2Handler.getApis();
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  // These "copy" methods were once SolrParams.getAll but were moved here as there is no universal way that
-  //  a SolrParams can be represented in a Map; there are various choices.
-
-  /**Copy all params to the given map or if the given map is null create a new one */
-  static Map<String, Object> copy(SolrParams source, Map<String, Object> sink, Collection<String> paramNames) {
-    if (sink == null) sink = new LinkedHashMap<>();
-    for (String param : paramNames) {
-      String[] v = source.getParams(param);
-      if (v != null && v.length > 0) {
-        if (v.length == 1) {
-          sink.put(param, v[0]);
-        } else {
-          sink.put(param, v);
-        }
-      }
-    }
-    return sink;
-  }
-
-  /**Copy all params to the given map or if the given map is null create a new one */
-  static Map<String, Object> copy(SolrParams source, Map<String, Object> sink, String... paramNames){
-    return copy(source, sink, paramNames == null ? Collections.emptyList() : Arrays.asList(paramNames));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
deleted file mode 100644
index fbd0fb6..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipInputStream;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.fs.Path;
-import org.apache.solr.api.Api;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.cloud.OverseerSolrResponse;
-import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.ConfigSetParams;
-import org.apache.solr.common.params.ConfigSetParams.ConfigSetAction;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthenticationPlugin;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.BASE_CONFIGSET;
-import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.CONFIGSETS_ACTION_PREFIX;
-import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.PROPERTY_PREFIX;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.CREATE;
-import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.DELETE;
-import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.LIST;
-import static org.apache.solr.handler.admin.ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME;
-
-/**
- * A {@link org.apache.solr.request.SolrRequestHandler} for ConfigSets API requests.
- */
-public class ConfigSetsHandler extends RequestHandlerBase implements PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected final CoreContainer coreContainer;
-  public static long DEFAULT_ZK_TIMEOUT = 300 * 1000;
-  private final ConfigSetsHandlerApi configSetsHandlerApi = new ConfigSetsHandlerApi(this);
-
-  /**
-   * Overloaded ctor to inject CoreContainer into the handler.
-   *
-   * @param coreContainer Core Container of the solr webapp installed.
-   */
-  public ConfigSetsHandler(final CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-  }
-
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    if (coreContainer == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Core container instance missing");
-    }
-
-    // Make sure that the core is ZKAware
-    if (!coreContainer.isZooKeeperAware()) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "Solr instance is not running in SolrCloud mode.");
-    }
-
-    // Pick the action
-    SolrParams params = req.getParams();
-    String a = params.get(ConfigSetParams.ACTION);
-    if (a != null) {
-      ConfigSetAction action = ConfigSetAction.get(a);
-      if (action == null)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
-      if (action == ConfigSetAction.UPLOAD) {
-        handleConfigUploadRequest(req, rsp);
-        return;
-      }
-      invokeAction(req, rsp, action);
-    } else {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "action is a required param");
-    }
-
-    rsp.setHttpCaching(false);
-  }
-
-  void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetAction action) throws Exception {
-    ConfigSetOperation operation = ConfigSetOperation.get(action);
-    log.info("Invoked ConfigSet Action :{} with params {} ", action.toLower(), req.getParamString());
-    Map<String, Object> result = operation.call(req, rsp, this);
-    sendToZk(rsp, operation, result);
-  }
-
-  protected void sendToZk(SolrQueryResponse rsp, ConfigSetOperation operation, Map<String, Object> result)
-      throws KeeperException, InterruptedException {
-    if (result != null) {
-      // We need to differentiate between collection and configsets actions since they currently
-      // use the same underlying queue.
-      result.put(QUEUE_OPERATION, CONFIGSETS_ACTION_PREFIX + operation.action.toLower());
-      ZkNodeProps props = new ZkNodeProps(result);
-      handleResponse(operation.action.toLower(), props, rsp, DEFAULT_ZK_TIMEOUT);
-    }
-  }
-
-  private void handleConfigUploadRequest(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    if (!"true".equals(System.getProperty("configset.upload.enabled", "true"))) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "Configset upload feature is disabled. To enable this, start Solr with '-Dconfigset.upload.enabled=true'.");
-    }
-
-    String configSetName = req.getParams().get(NAME);
-    if (StringUtils.isBlank(configSetName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "The configuration name should be provided in the \"name\" parameter");
-    }
-
-    SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
-    String configPathInZk = ZkConfigManager.CONFIGS_ZKNODE + Path.SEPARATOR + configSetName;
-
-    if (zkClient.exists(configPathInZk, true)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "The configuration " + configSetName + " already exists in zookeeper");
-    }
-
-    Iterator<ContentStream> contentStreamsIterator = req.getContentStreams().iterator();
-
-    if (!contentStreamsIterator.hasNext()) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "No stream found for the config data to be uploaded");
-    }
-
-    InputStream inputStream = contentStreamsIterator.next().getStream();
-
-    // Create a node for the configuration in zookeeper
-    boolean trusted = getTrusted(req);
-    zkClient.makePath(configPathInZk, ("{\"trusted\": " + Boolean.toString(trusted) + "}").
-        getBytes(StandardCharsets.UTF_8), true);
-
-    ZipInputStream zis = new ZipInputStream(inputStream, StandardCharsets.UTF_8);
-    ZipEntry zipEntry = null;
-    while ((zipEntry = zis.getNextEntry()) != null) {
-      String filePathInZk = configPathInZk + "/" + zipEntry.getName();
-      if (zipEntry.isDirectory()) {
-        zkClient.makePath(filePathInZk, true);
-      } else {
-        createZkNodeIfNotExistsAndSetData(zkClient, filePathInZk,
-            IOUtils.toByteArray(zis));
-      }
-    }
-    zis.close();
-  }
-
-  boolean getTrusted(SolrQueryRequest req) {
-    AuthenticationPlugin authcPlugin = coreContainer.getAuthenticationPlugin();
-    log.info("Trying to upload a configset. authcPlugin: {}, user principal: {}",
-        authcPlugin, req.getUserPrincipal());
-    if (authcPlugin != null && req.getUserPrincipal() != null) {
-      return true;
-    }
-    return false;
-  }
-
-  private void createZkNodeIfNotExistsAndSetData(SolrZkClient zkClient,
-                                                 String filePathInZk, byte[] data) throws Exception {
-    if (!zkClient.exists(filePathInZk, true)) {
-      zkClient.create(filePathInZk, data, CreateMode.PERSISTENT, true);
-    } else {
-      zkClient.setData(filePathInZk, data, true);
-    }
-  }
-
-  private void handleResponse(String operation, ZkNodeProps m,
-                              SolrQueryResponse rsp, long timeout) throws KeeperException, InterruptedException {
-    long time = System.nanoTime();
-
-    QueueEvent event = coreContainer.getZkController()
-        .getOverseerConfigSetQueue()
-        .offer(Utils.toJSON(m), timeout);
-    if (event.getBytes() != null) {
-      SolrResponse response = SolrResponse.deserialize(event.getBytes());
-      rsp.getValues().addAll(response.getResponse());
-      SimpleOrderedMap exp = (SimpleOrderedMap) response.getResponse().get("exception");
-      if (exp != null) {
-        Integer code = (Integer) exp.get("rspCode");
-        rsp.setException(new SolrException(code != null && code != -1 ? ErrorCode.getErrorCode(code) : ErrorCode.SERVER_ERROR, (String) exp.get("msg")));
-      }
-    } else {
-      if (System.nanoTime() - time >= TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, operation
-            + " the configset time out:" + timeout / 1000 + "s");
-      } else if (event.getWatchedEvent() != null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, operation
-            + " the configset error [Watcher fired on path: "
-            + event.getWatchedEvent().getPath() + " state: "
-            + event.getWatchedEvent().getState() + " type "
-            + event.getWatchedEvent().getType() + "]");
-      } else {
-        throw new SolrException(ErrorCode.SERVER_ERROR, operation
-            + " the configset unknown case");
-      }
-    }
-  }
-
-  private static Map<String, Object> copyPropertiesWithPrefix(SolrParams params, Map<String, Object> props, String prefix) {
-    Iterator<String> iter = params.getParameterNamesIterator();
-    while (iter.hasNext()) {
-      String param = iter.next();
-      if (param.startsWith(prefix)) {
-        props.put(param, params.get(param));
-      }
-    }
-
-    // The configset created via an API should be mutable.
-    props.put("immutable", "false");
-
-    return props;
-  }
-
-  @Override
-  public String getDescription() {
-    return "Manage SolrCloud ConfigSets";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  enum ConfigSetOperation {
-    CREATE_OP(CREATE) {
-      @Override
-      Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {
-        String baseConfigSetName = req.getParams().get(BASE_CONFIGSET, DEFAULT_CONFIGSET_NAME);
-        Map<String, Object> props = CollectionsHandler.copy(req.getParams().required(), null, NAME);
-        props.put(BASE_CONFIGSET, baseConfigSetName);
-        return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX + ".");
-      }
-    },
-    DELETE_OP(DELETE) {
-      @Override
-      Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {
-        return CollectionsHandler.copy(req.getParams().required(), null, NAME);
-      }
-    },
-    LIST_OP(LIST) {
-      @Override
-      Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {
-        NamedList<Object> results = new NamedList<>();
-        SolrZkClient zk = h.coreContainer.getZkController().getZkStateReader().getZkClient();
-        ZkConfigManager zkConfigManager = new ZkConfigManager(zk);
-        List<String> configSetsList = zkConfigManager.listConfigs();
-        results.add("configSets", configSetsList);
-        SolrResponse response = new OverseerSolrResponse(results);
-        rsp.getValues().addAll(response.getResponse());
-        return null;
-      }
-    };
-
-    ConfigSetAction action;
-
-    ConfigSetOperation(ConfigSetAction action) {
-      this.action = action;
-    }
-
-    abstract Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception;
-
-    public static ConfigSetOperation get(ConfigSetAction action) {
-      for (ConfigSetOperation op : values()) {
-        if (op.action == action) return op;
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "No such action" + action);
-    }
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return configSetsHandlerApi.getApis();
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext ctx) {
-    String a = ctx.getParams().get(ConfigSetParams.ACTION);
-    if (a != null) {
-      ConfigSetAction action = ConfigSetAction.get(a);
-      if (action == ConfigSetAction.CREATE || action == ConfigSetAction.DELETE || action == ConfigSetAction.UPLOAD) {
-        return Name.CONFIG_EDIT_PERM;
-      } else if (action == ConfigSetAction.LIST) {
-        return Name.CONFIG_READ_PERM;
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
deleted file mode 100644
index 1a5f6f3..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.request.CollectionApiMapping;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.ConfigSetMeta;
-import org.apache.solr.handler.admin.ConfigSetsHandler.ConfigSetOperation;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-public class ConfigSetsHandlerApi extends BaseHandlerApiSupport {
-
-  final public static String DEFAULT_CONFIGSET_NAME = "_default";
-  final public static String AUTOCREATED_CONFIGSET_SUFFIX = ".AUTOCREATED";
-
-
-  final ConfigSetsHandler configSetHandler;
-  static Collection<ApiCommand> apiCommands = createMapping();
-
-  public static String getSuffixedNameForAutoGeneratedConfigSet(String configName) {
-    return configName + AUTOCREATED_CONFIGSET_SUFFIX;
-  }
-
-  private static Collection<ApiCommand> createMapping() {
-    Map<ConfigSetMeta, ApiCommand> result = new EnumMap<>(ConfigSetMeta.class);
-
-    for (ConfigSetMeta meta : ConfigSetMeta.values())
-      for (ConfigSetOperation op : ConfigSetOperation.values()) {
-        if (op.action == meta.action) {
-          result.put(meta, new ApiCommand() {
-            @Override
-            public CollectionApiMapping.CommandMeta meta() {
-              return meta;
-            }
-
-            @Override
-            public void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) throws Exception {
-              ((ConfigSetsHandlerApi) apiHandler).configSetHandler.invokeAction(req, rsp, op.action);
-            }
-          });
-        }
-      }
-
-    for (ConfigSetMeta meta : ConfigSetMeta.values()) {
-      if(result.get(meta) == null){
-        throw new RuntimeException("No implementation for "+ meta.name());
-      }
-    }
-
-    return result.values();
-  }
-
-  public ConfigSetsHandlerApi(ConfigSetsHandler configSetHandler) {
-    this.configSetHandler = configSetHandler;
-  }
-
-
-  @Override
-  protected Collection<ApiCommand> getCommands() {
-    return apiCommands;
-  }
-
-  @Override
-  protected List<CollectionApiMapping.V2EndPoint> getEndPoints() {
-    return Arrays.asList(CollectionApiMapping.ConfigSetEndPoint.values());
-  }
-
-}


[45/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java b/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
deleted file mode 100644
index 664b541..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.common.SolrException;
-import org.apache.zookeeper.server.ServerConfig;
-import org.apache.zookeeper.server.ZooKeeperServerMain;
-import org.apache.zookeeper.server.quorum.QuorumPeer;
-import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
-import org.apache.zookeeper.server.quorum.QuorumPeerMain;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.StandardCharsets;
-import java.util.Map;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-
-public class SolrZkServer {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  String zkRun;
-  String zkHost;
-
-  int solrPort;
-  Properties props;
-  SolrZkServerProps zkProps;
-
-  private Thread zkThread;  // the thread running a zookeeper server, only if zkRun is set
-
-  private String dataHome;
-  private String confHome;
-
-  public SolrZkServer(String zkRun, String zkHost, String dataHome, String confHome, int solrPort) {
-    this.zkRun = zkRun;
-    this.zkHost = zkHost;
-    this.dataHome = dataHome;
-    this.confHome = confHome;
-    this.solrPort = solrPort;
-  }
-
-  public String getClientString() {
-    if (zkHost != null) return zkHost;
-    
-    if (zkProps == null) return null;
-
-    // if the string wasn't passed as zkHost, then use the standalone server we started
-    if (zkRun == null) return null;
-    return "localhost:" + zkProps.getClientPortAddress().getPort();
-  }
-
-  public void parseConfig() {
-    if (zkProps == null) {
-      zkProps = new SolrZkServerProps();
-      // set default data dir
-      // TODO: use something based on IP+port???  support ensemble all from same solr home?
-      zkProps.setDataDir(dataHome);
-      zkProps.zkRun = zkRun;
-      zkProps.solrPort = Integer.toString(solrPort);
-    }
-    
-    try {
-      props = SolrZkServerProps.getProperties(confHome + '/' + "zoo.cfg");
-      SolrZkServerProps.injectServers(props, zkRun, zkHost);
-      if (props.getProperty("clientPort") == null) {
-        props.setProperty("clientPort", Integer.toString(solrPort + 1000));
-      }
-      zkProps.parseProperties(props);
-    } catch (QuorumPeerConfig.ConfigException | IOException e) {
-      if (zkRun != null)
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-    }
-  }
-
-  public Map<Long, QuorumPeer.QuorumServer> getServers() {
-    return zkProps.getServers();
-  }
-
-  public void start() {
-    if (zkRun == null) return;
-
-    zkThread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          if (zkProps.getServers().size() > 1) {
-            QuorumPeerMain zkServer = new QuorumPeerMain();
-            zkServer.runFromConfig(zkProps);
-          } else {
-            ServerConfig sc = new ServerConfig();
-            sc.readFrom(zkProps);
-            ZooKeeperServerMain zkServer = new ZooKeeperServerMain();
-            zkServer.runFromConfig(sc);
-          }
-          log.info("ZooKeeper Server exited.");
-        } catch (Exception e) {
-          log.error("ZooKeeper Server ERROR", e);
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-        }
-      }
-    };
-
-    if (zkProps.getServers().size() > 1) {
-      log.info("STARTING EMBEDDED ENSEMBLE ZOOKEEPER SERVER at port " + zkProps.getClientPortAddress().getPort());
-    } else {
-      log.info("STARTING EMBEDDED STANDALONE ZOOKEEPER SERVER at port " + zkProps.getClientPortAddress().getPort());
-    }
-
-    zkThread.setDaemon(true);
-    zkThread.start();
-    try {
-      Thread.sleep(500); // pause for ZooKeeper to start
-    } catch (Exception e) {
-      log.error("STARTING ZOOKEEPER", e);
-    }
-  }
-
-  public void stop() {
-    if (zkRun == null) return;
-    zkThread.interrupt();
-  }
-}
-
-
-
-
-// Allows us to set a default for the data dir before parsing
-// zoo.cfg (which validates that there is a dataDir)
-class SolrZkServerProps extends QuorumPeerConfig {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final Pattern MISSING_MYID_FILE_PATTERN = Pattern.compile(".*myid file is missing$");
-
-  String solrPort; // port that Solr is listening on
-  String zkRun;
-
-  /**
-   * Parse a ZooKeeper configuration file
-   * @param path the patch of the configuration file
-   * @throws ConfigException error processing configuration
-   */
-  public static Properties getProperties(String path) throws ConfigException {
-    File configFile = new File(path);
-
-    log.info("Reading configuration from: " + configFile);
-
-    try {
-      if (!configFile.exists()) {
-        throw new IllegalArgumentException(configFile.toString()
-            + " file is missing");
-      }
-
-      Properties cfg = new Properties();
-      FileInputStream in = new FileInputStream(configFile);
-      try {
-        cfg.load(new InputStreamReader(in, StandardCharsets.UTF_8));
-      } finally {
-        in.close();
-      }
-
-      return cfg;
-
-    } catch (IOException | IllegalArgumentException e) {
-      throw new ConfigException("Error processing " + path, e);
-    }
-  }
-
-
-  // Adds server.x if they don't exist, based on zkHost if it does exist.
-  // Given zkHost=localhost:1111,localhost:2222 this will inject
-  // server.0=localhost:1112:1113
-  // server.1=localhost:2223:2224
-  public static void injectServers(Properties props, String zkRun, String zkHost) {
-
-    // if clientPort not already set, use zkRun
-    if (zkRun != null && props.getProperty("clientPort")==null) {
-      int portIdx = zkRun.lastIndexOf(':');
-      if (portIdx > 0) {
-        String portStr = zkRun.substring(portIdx+1);
-        props.setProperty("clientPort", portStr);
-      }
-    }
-
-    boolean hasServers = hasServers(props);
-
-    if (!hasServers && zkHost != null) {
-      int alg = Integer.parseInt(props.getProperty("electionAlg","3").trim());
-      String[] hosts = zkHost.split(",");
-      int serverNum = 0;
-      for (String hostAndPort : hosts) {
-        hostAndPort = hostAndPort.trim();
-        int portIdx = hostAndPort.lastIndexOf(':');
-        String clientPortStr = hostAndPort.substring(portIdx+1);
-        int clientPort = Integer.parseInt(clientPortStr);
-        String host = hostAndPort.substring(0,portIdx);
-
-        String serverStr = host + ':' + (clientPort+1);
-        // zk leader election algorithms other than 0 need an extra port for leader election.
-        if (alg != 0) {
-          serverStr = serverStr + ':' + (clientPort+2);
-        }
-
-        props.setProperty("server."+serverNum, serverStr);
-        serverNum++;
-      }
-    }
-  }
-
-  public static boolean hasServers(Properties props) {
-    for (Object key : props.keySet())
-      if (((String)key).startsWith("server."))
-        return true;
-    return false;
-  }
-
-  // called by the modified version of parseProperties
-  // when the myid file is missing.
-  public Long getMyServerId() {
-    if (zkRun == null && solrPort == null) return null;
-
-    Map<Long, QuorumPeer.QuorumServer> slist = getServers();
-
-    String myHost = "localhost";
-    InetSocketAddress thisAddr = null;
-
-    if (zkRun != null && zkRun.length()>0) {
-      String parts[] = zkRun.split(":");
-      myHost = parts[0];
-      thisAddr = new InetSocketAddress(myHost, Integer.parseInt(parts[1]) + 1);
-    } else {
-      // default to localhost:<solrPort+1001>
-      thisAddr = new InetSocketAddress(myHost, Integer.parseInt(solrPort)+1001);
-    }
-
-
-    // first try a straight match by host
-    Long me = null;
-    boolean multiple = false;
-    int port = 0;
-    for (QuorumPeer.QuorumServer server : slist.values()) {
-      if (server.addr.getHostName().equals(myHost)) {
-        multiple = me!=null;
-        me = server.id;
-        port = server.addr.getPort();
-      }
-    }
-
-    if (!multiple) {
-      // only one host matched... assume it's me.
-      setClientPort(port - 1);
-      return me;
-    }
-
-    if (me == null) {
-      // no hosts matched.
-      return null;
-    }
-
-
-    // multiple matches... try to figure out by port.
-    for (QuorumPeer.QuorumServer server : slist.values()) {
-      if (server.addr.equals(thisAddr)) {
-        if (clientPortAddress == null || clientPortAddress.getPort() <= 0)
-          setClientPort(server.addr.getPort() - 1);
-        return server.id;
-      }
-    }
-
-    return null;
-  }
-
-
-
-  public void setDataDir(String dataDir) {
-    this.dataDir = dataDir;
-  }
-
-  public void setClientPort(int clientPort) {
-    if (clientPortAddress != null) {
-      try {
-        this.clientPortAddress = new InetSocketAddress(
-                InetAddress.getByName(clientPortAddress.getHostName()), clientPort);
-      } catch (UnknownHostException e) {
-        throw new RuntimeException(e);
-      }
-    } else {
-      this.clientPortAddress = new InetSocketAddress(clientPort);
-    }
-  }
-
-  /**
-   * Parse config from a Properties.
-   * @param zkProp Properties to parse from.
-   */
-  @Override
-  public void parseProperties(Properties zkProp)
-      throws IOException, ConfigException {
-    try {
-      super.parseProperties(zkProp);
-    } catch (IllegalArgumentException e) {
-      if (MISSING_MYID_FILE_PATTERN.matcher(e.getMessage()).matches()) {
-        Long myid = getMyServerId();
-        if (myid != null) {
-          serverId = myid;
-          return;
-        }
-        if (zkRun == null) return;
-      }
-      throw e;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/Stats.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Stats.java b/solr/core/src/java/org/apache/solr/cloud/Stats.java
deleted file mode 100644
index 36593f6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/Stats.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.codahale.metrics.Timer;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.common.cloud.ZkNodeProps;
-
-/**
- * Used to hold statistics about some SolrCloud operations.
- *
- * This is experimental API and subject to change.
- */
-public class Stats {
-  static final int MAX_STORED_FAILURES = 10;
-
-  final Map<String, Stat> stats = new ConcurrentHashMap<>();
-  private volatile int queueLength;
-
-  public Map<String, Stat> getStats() {
-    return stats;
-  }
-
-  public int getSuccessCount(String operation) {
-    Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
-    return stat == null ? 0 : stat.success.get();
-  }
-
-  public int getErrorCount(String operation)  {
-    Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
-    return stat == null ? 0 : stat.errors.get();
-  }
-
-  public void success(String operation) {
-    String op = operation.toLowerCase(Locale.ROOT);
-    Stat stat = stats.get(op);
-    if (stat == null) {
-      stat = new Stat();
-      stats.put(op, stat);
-    }
-    stat.success.incrementAndGet();
-  }
-
-  public void error(String operation) {
-    String op = operation.toLowerCase(Locale.ROOT);
-    Stat stat = stats.get(op);
-    if (stat == null) {
-      stat = new Stat();
-      stats.put(op, stat);
-    }
-    stat.errors.incrementAndGet();
-  }
-
-  public Timer.Context time(String operation) {
-    String op = operation.toLowerCase(Locale.ROOT);
-    Stat stat = stats.get(op);
-    if (stat == null) {
-      stat = new Stat();
-      stats.put(op, stat);
-    }
-    return stat.requestTime.time();
-  }
-
-  public void storeFailureDetails(String operation, ZkNodeProps request, SolrResponse resp) {
-    String op = operation.toLowerCase(Locale.ROOT);
-    Stat stat = stats.get(op);
-    if (stat == null) {
-      stat = new Stat();
-      stats.put(op, stat);
-    }
-    LinkedList<FailedOp> failedOps = stat.failureDetails;
-    synchronized (failedOps)  {
-      if (failedOps.size() >= MAX_STORED_FAILURES)  {
-        failedOps.removeFirst();
-      }
-      failedOps.addLast(new FailedOp(request, resp));
-    }
-  }
-
-  public List<FailedOp> getFailureDetails(String operation) {
-    Stat stat = stats.get(operation.toLowerCase(Locale.ROOT));
-    if (stat == null || stat.failureDetails.isEmpty()) return null;
-    LinkedList<FailedOp> failedOps = stat.failureDetails;
-    synchronized (failedOps)  {
-      ArrayList<FailedOp> ret = new ArrayList<>(failedOps);
-      return ret;
-    }
-  }
-
-  public int getQueueLength() {
-    return queueLength;
-  }
-
-  public void setQueueLength(int queueLength) {
-    this.queueLength = queueLength;
-  }
-
-  public void clear() {
-    stats.clear();
-  }
-
-  public static class Stat  {
-    public final AtomicInteger success;
-    public final AtomicInteger errors;
-    public final Timer requestTime;
-    public final LinkedList<FailedOp> failureDetails;
-
-    public Stat() {
-      this.success = new AtomicInteger();
-      this.errors = new AtomicInteger();
-      this.requestTime = new Timer();
-      this.failureDetails = new LinkedList<>();
-    }
-  }
-
-  public static class FailedOp  {
-    public final ZkNodeProps req;
-    public final SolrResponse resp;
-
-    public FailedOp(ZkNodeProps req, SolrResponse resp) {
-      this.req = req;
-      this.resp = resp;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
deleted file mode 100644
index 3d9a964..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-
-import org.apache.http.client.HttpClient;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.update.PeerSync;
-import org.apache.solr.update.UpdateShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-
-public class SyncStrategy {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final boolean SKIP_AUTO_RECOVERY = Boolean.getBoolean("solrcloud.skip.autorecovery");
-  
-  private final ShardHandler shardHandler;
-
-  private volatile boolean isClosed;
-  
-  private final HttpClient client;
-
-  private final ExecutorService updateExecutor;
-  
-  private final List<RecoveryRequest> recoveryRequests = new ArrayList<>();
-  
-  private static class RecoveryRequest {
-    ZkNodeProps leaderProps;
-    String baseUrl;
-    String coreName;
-  }
-  
-  public SyncStrategy(CoreContainer cc) {
-    UpdateShardHandler updateShardHandler = cc.getUpdateShardHandler();
-    client = updateShardHandler.getDefaultHttpClient();
-    shardHandler = cc.getShardHandlerFactory().getShardHandler();
-    updateExecutor = updateShardHandler.getUpdateExecutor();
-  }
-  
-  private static class ShardCoreRequest extends ShardRequest {
-    String coreName;
-    public String baseUrl;
-  }
-  
-  public PeerSync.PeerSyncResult sync(ZkController zkController, SolrCore core, ZkNodeProps leaderProps) {
-    return sync(zkController, core, leaderProps, false);
-  }
-  
-  public PeerSync.PeerSyncResult sync(ZkController zkController, SolrCore core, ZkNodeProps leaderProps,
-      boolean peerSyncOnlyWithActive) {
-    if (SKIP_AUTO_RECOVERY) {
-      return PeerSync.PeerSyncResult.success();
-    }
-    
-    MDCLoggingContext.setCore(core);
-    try {
-      if (isClosed) {
-        log.warn("Closed, skipping sync up.");
-        return PeerSync.PeerSyncResult.failure();
-      }
-      
-      recoveryRequests.clear();
-      
-      log.info("Sync replicas to " + ZkCoreNodeProps.getCoreUrl(leaderProps));
-      
-      if (core.getUpdateHandler().getUpdateLog() == null) {
-        log.error("No UpdateLog found - cannot sync");
-        return PeerSync.PeerSyncResult.failure();
-      }
-
-      return syncReplicas(zkController, core, leaderProps, peerSyncOnlyWithActive);
-    } finally {
-      MDCLoggingContext.clear();
-    }
-  }
-  
-  private PeerSync.PeerSyncResult syncReplicas(ZkController zkController, SolrCore core,
-      ZkNodeProps leaderProps, boolean peerSyncOnlyWithActive) {
-    boolean success = false;
-    PeerSync.PeerSyncResult result = null;
-    CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
-    String collection = cloudDesc.getCollectionName();
-    String shardId = cloudDesc.getShardId();
-
-    if (isClosed) {
-      log.info("We have been closed, won't sync with replicas");
-      return PeerSync.PeerSyncResult.failure();
-    }
-    
-    // first sync ourselves - we are the potential leader after all
-    try {
-      result = syncWithReplicas(zkController, core, leaderProps, collection,
-          shardId, peerSyncOnlyWithActive);
-      success = result.isSuccess();
-    } catch (Exception e) {
-      SolrException.log(log, "Sync Failed", e);
-    }
-    try {
-      if (isClosed) {
-        log.info("We have been closed, won't attempt to sync replicas back to leader");
-        return PeerSync.PeerSyncResult.failure();
-      }
-      
-      if (success) {
-        log.info("Sync Success - now sync replicas to me");
-        
-        syncToMe(zkController, collection, shardId, leaderProps, core.getCoreDescriptor(), core.getUpdateHandler().getUpdateLog().getNumRecordsToKeep());
-        
-      } else {
-        log.info("Leader's attempt to sync with shard failed, moving to the next candidate");
-        // lets see who seems ahead...
-      }
-      
-    } catch (Exception e) {
-      SolrException.log(log, "Sync Failed", e);
-    }
-    
-    return result == null ? PeerSync.PeerSyncResult.failure() : result;
-  }
-  
-  private PeerSync.PeerSyncResult syncWithReplicas(ZkController zkController, SolrCore core,
-      ZkNodeProps props, String collection, String shardId, boolean peerSyncOnlyWithActive) {
-    List<ZkCoreNodeProps> nodes = zkController.getZkStateReader()
-        .getReplicaProps(collection, shardId,core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-    
-    if (nodes == null) {
-      // I have no replicas
-      return PeerSync.PeerSyncResult.success();
-    }
-    
-    List<String> syncWith = new ArrayList<>(nodes.size());
-    for (ZkCoreNodeProps node : nodes) {
-      syncWith.add(node.getCoreUrl());
-    }
-    
-    // if we can't reach a replica for sync, we still consider the overall sync a success
-    // TODO: as an assurance, we should still try and tell the sync nodes that we couldn't reach
-    // to recover once more?
-    // Fingerprinting here is off because the we currently rely on having at least one of the nodes return "true", and if replicas are out-of-sync
-    // we still need to pick one as leader.  A followup sync from the replica to the new leader (with fingerprinting on) should then fail and
-    // initiate recovery-by-replication.
-    PeerSync peerSync = new PeerSync(core, syncWith, core.getUpdateHandler().getUpdateLog().getNumRecordsToKeep(), true, peerSyncOnlyWithActive, false);
-    return peerSync.sync();
-  }
-  
-  private void syncToMe(ZkController zkController, String collection,
-                        String shardId, ZkNodeProps leaderProps, CoreDescriptor cd,
-                        int nUpdates) {
-    
-    // sync everyone else
-    // TODO: we should do this in parallel at least
-    List<ZkCoreNodeProps> nodes = zkController
-        .getZkStateReader()
-        .getReplicaProps(collection, shardId,
-            cd.getCloudDescriptor().getCoreNodeName());
-    if (nodes == null) {
-      log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + " has no replicas");
-      return;
-    }
-
-    ZkCoreNodeProps zkLeader = new ZkCoreNodeProps(leaderProps);
-    for (ZkCoreNodeProps node : nodes) {
-      try {
-        log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": try and ask " + node.getCoreUrl() + " to sync");
-        
-        requestSync(node.getBaseUrl(), node.getCoreUrl(), zkLeader.getCoreUrl(), node.getCoreName(), nUpdates);
-        
-      } catch (Exception e) {
-        SolrException.log(log, "Error syncing replica to leader", e);
-      }
-    }
-    
-    
-    for(;;) {
-      ShardResponse srsp = shardHandler.takeCompletedOrError();
-      if (srsp == null) break;
-      boolean success = handleResponse(srsp);
-      if (srsp.getException() != null) {
-        SolrException.log(log, "Sync request error: " + srsp.getException());
-      }
-      
-      if (!success) {
-        log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Sync failed - we will ask replica (" + srsp.getShardAddress()
-            + ") to recover.");
-        if (isClosed) {
-          log.info("We have been closed, don't request that a replica recover");
-        } else {
-          RecoveryRequest rr = new RecoveryRequest();
-          rr.leaderProps = leaderProps;
-          rr.baseUrl = ((ShardCoreRequest) srsp.getShardRequest()).baseUrl;
-          rr.coreName = ((ShardCoreRequest) srsp.getShardRequest()).coreName;
-          recoveryRequests.add(rr);
-        }
-      } else {
-        log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": " + " sync completed with " + srsp.getShardAddress());
-      }
-      
-    }
-
-  }
-  
-  private boolean handleResponse(ShardResponse srsp) {
-    NamedList<Object> response = srsp.getSolrResponse().getResponse();
-    // TODO: why does this return null sometimes?
-    if (response == null) {
-      return false;
-    }
-    Boolean success = (Boolean) response.get("sync");
-    
-    if (success == null) {
-      success = false;
-    }
-    
-    return success;
-  }
-
-  private void requestSync(String baseUrl, String replica, String leaderUrl, String coreName, int nUpdates) {
-    //TODO should we use peerSyncWithLeader instead?
-    ShardCoreRequest sreq = new ShardCoreRequest();
-    sreq.coreName = coreName;
-    sreq.baseUrl = baseUrl;
-    sreq.purpose = 1;
-    sreq.shards = new String[]{replica};
-    sreq.actualShards = sreq.shards;
-    sreq.params = new ModifiableSolrParams();
-    sreq.params.set("qt","/get");
-    sreq.params.set(DISTRIB,false);
-    sreq.params.set("getVersions",Integer.toString(nUpdates));
-    sreq.params.set("sync",leaderUrl);
-    
-    shardHandler.submit(sreq, replica, sreq.params);
-  }
-  
-  public void close() {
-    this.isClosed = true;
-  }
-  
-  public void requestRecoveries() {
-    for (RecoveryRequest rr : recoveryRequests) {
-      try {
-        requestRecovery(rr.leaderProps, rr.baseUrl, rr.coreName);
-      } catch (SolrServerException | IOException e) {
-        log.error("Problem requesting that a replica recover", e);
-      }
-    }
-  }
-  
-  private void requestRecovery(final ZkNodeProps leaderProps, final String baseUrl, final String coreName) throws SolrServerException, IOException {
-    Thread thread = new Thread() {
-      {
-        setDaemon(true);
-      }
-      @Override
-      public void run() {
-        RequestRecovery recoverRequestCmd = new RequestRecovery();
-        recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
-        recoverRequestCmd.setCoreName(coreName);
-        
-        try (HttpSolrClient client = new HttpSolrClient.Builder(baseUrl)
-            .withHttpClient(SyncStrategy.this.client)
-            .withConnectionTimeout(30000)
-            .withSocketTimeout(120000)
-            .build()) {
-          client.request(recoverRequestCmd);
-        } catch (Throwable t) {
-          SolrException.log(log, ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Could not tell a replica to recover", t);
-          if (t instanceof Error) {
-            throw (Error) t;
-          }
-        }
-      }
-    };
-    updateExecutor.execute(thread);
-  }
-  
-  public static ModifiableSolrParams params(String... params) {
-    ModifiableSolrParams msp = new ModifiableSolrParams();
-    for (int i = 0; i < params.length; i += 2) {
-      msp.add(params[i], params[i + 1]);
-    }
-    return msp;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java b/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
deleted file mode 100644
index 2df87a0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import javax.xml.parsers.ParserConfigurationException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintStream;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Paths;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-import java.util.regex.Pattern;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.common.cloud.ClusterProperties;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.core.CoreContainer;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.xml.sax.SAXException;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.CommonParams.VALUE_LONG;
-
-public class ZkCLI {
-  
-  private static final String MAKEPATH = "makepath";
-  private static final String PUT = "put";
-  private static final String PUT_FILE = "putfile";
-  private static final String GET = "get";
-  private static final String GET_FILE = "getfile";
-  private static final String DOWNCONFIG = "downconfig";
-  private static final String ZK_CLI_NAME = "ZkCLI";
-  private static final String HELP = "help";
-  private static final String LINKCONFIG = "linkconfig";
-  private static final String CONFDIR = "confdir";
-  private static final String CONFNAME = "confname";
-  private static final String ZKHOST = "zkhost";
-  private static final String RUNZK = "runzk";
-  private static final String SOLRHOME = "solrhome";
-  private static final String BOOTSTRAP = "bootstrap";
-  static final String UPCONFIG = "upconfig";
-  static final String EXCLUDE_REGEX_SHORT = "x";
-  static final String EXCLUDE_REGEX = "excluderegex";
-  static final String EXCLUDE_REGEX_DEFAULT = ZkConfigManager.UPLOAD_FILENAME_EXCLUDE_REGEX;
-  private static final String COLLECTION = "collection";
-  private static final String CLEAR = "clear";
-  private static final String LIST = "list";
-  private static final String LS = "ls";
-  private static final String CMD = "cmd";
-  private static final String CLUSTERPROP = "clusterprop";
-  private static final String UPDATEACLS = "updateacls";
-
-  @VisibleForTesting
-  public static void setStdout(PrintStream stdout) {
-    ZkCLI.stdout = stdout;
-  }
-
-  private static PrintStream stdout = System.out;
-  
-  /**
-   * Allows you to perform a variety of zookeeper related tasks, such as:
-   * 
-   * Bootstrap the current configs for all collections in solr.xml.
-   * 
-   * Upload a named config set from a given directory.
-   * 
-   * Link a named config set explicity to a collection.
-   * 
-   * Clear ZooKeeper info.
-   * 
-   * If you also pass a solrPort, it will be used to start an embedded zk useful
-   * for single machine, multi node tests.
-   */
-  public static void main(String[] args) throws InterruptedException,
-      TimeoutException, IOException, ParserConfigurationException,
-      SAXException, KeeperException {
-
-    CommandLineParser parser = new PosixParser();
-    Options options = new Options();
-    
-    options.addOption(OptionBuilder
-        .hasArg(true)
-        .withDescription(
-            "cmd to run: " + BOOTSTRAP + ", " + UPCONFIG + ", " + DOWNCONFIG
-                + ", " + LINKCONFIG + ", " + MAKEPATH + ", " + PUT + ", " + PUT_FILE + ","
-                + GET + "," + GET_FILE + ", " + LIST + ", " + CLEAR
-                + ", " + UPDATEACLS + ", " + LS).create(CMD));
-
-    Option zkHostOption = new Option("z", ZKHOST, true,
-        "ZooKeeper host address");
-    options.addOption(zkHostOption);
-    Option solrHomeOption = new Option("s", SOLRHOME, true,
-        "for " + BOOTSTRAP + ", " + RUNZK + ": solrhome location");
-    options.addOption(solrHomeOption);
-    
-    options.addOption("d", CONFDIR, true,
-        "for " + UPCONFIG + ": a directory of configuration files");
-    options.addOption("n", CONFNAME, true,
-        "for " + UPCONFIG + ", " + LINKCONFIG + ": name of the config set");
-
-    
-    options.addOption("c", COLLECTION, true,
-        "for " + LINKCONFIG + ": name of the collection");
-    
-    options.addOption(EXCLUDE_REGEX_SHORT, EXCLUDE_REGEX, true,
-        "for " + UPCONFIG + ": files matching this regular expression won't be uploaded");
-
-    options
-        .addOption(
-            "r",
-            RUNZK,
-            true,
-            "run zk internally by passing the solr run port - only for clusters on one machine (tests, dev)");
-    
-    options.addOption("h", HELP, false, "bring up this help page");
-    options.addOption(NAME, true, "name of the cluster property to set");
-    options.addOption(VALUE_LONG, true, "value of the cluster to set");
-
-    try {
-      // parse the command line arguments
-      CommandLine line = parser.parse(options, args);
-      
-      if (line.hasOption(HELP) || !line.hasOption(ZKHOST)
-          || !line.hasOption(CMD)) {
-        // automatically generate the help statement
-        HelpFormatter formatter = new HelpFormatter();
-        formatter.printHelp(ZK_CLI_NAME, options);
-        stdout.println("Examples:");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + BOOTSTRAP + " -" + SOLRHOME + " /opt/solr");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + UPCONFIG + " -" + CONFDIR + " /opt/solr/collection1/conf" + " -" + CONFNAME + " myconf");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + DOWNCONFIG + " -" + CONFDIR + " /opt/solr/collection1/conf" + " -" + CONFNAME + " myconf");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + LINKCONFIG + " -" + COLLECTION + " collection1" + " -" + CONFNAME + " myconf");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + MAKEPATH + " /apache/solr");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + PUT + " /solr.conf 'conf data'");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + PUT_FILE + " /solr.xml /User/myuser/solr/solr.xml");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + GET + " /solr.xml");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + GET_FILE + " /solr.xml solr.xml.file");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + CLEAR + " /solr");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + LIST);
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + LS + " /solr/live_nodes");
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + CLUSTERPROP + " -" + NAME + " urlScheme -" + VALUE_LONG + " https" );
-        stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + UPDATEACLS + " /solr");
-        return;
-      }
-      
-      // start up a tmp zk server first
-      String zkServerAddress = line.getOptionValue(ZKHOST);
-      String solrHome = line.getOptionValue(SOLRHOME);
-      
-      String solrPort = null;
-      if (line.hasOption(RUNZK)) {
-        if (!line.hasOption(SOLRHOME)) {
-          stdout.println("-" + SOLRHOME + " is required for " + RUNZK);
-          System.exit(1);
-        }
-        solrPort = line.getOptionValue(RUNZK);
-      }
-      
-      SolrZkServer zkServer = null;
-      if (solrPort != null) {
-        zkServer = new SolrZkServer("true", null, solrHome + "/zoo_data",
-            solrHome, Integer.parseInt(solrPort));
-        zkServer.parseConfig();
-        zkServer.start();
-      }
-      SolrZkClient zkClient = null;
-      try {
-        zkClient = new SolrZkClient(zkServerAddress, 30000, 30000,
-            () -> {
-            });
-        
-        if (line.getOptionValue(CMD).equalsIgnoreCase(BOOTSTRAP)) {
-          if (!line.hasOption(SOLRHOME)) {
-            stdout.println("-" + SOLRHOME
-                + " is required for " + BOOTSTRAP);
-            System.exit(1);
-          }
-
-          CoreContainer cc = new CoreContainer(solrHome);
-
-          if(!ZkController.checkChrootPath(zkServerAddress, true)) {
-            stdout.println("A chroot was specified in zkHost but the znode doesn't exist. ");
-            System.exit(1);
-          }
-
-          ZkController.bootstrapConf(zkClient, cc, solrHome);
-
-          // No need to close the CoreContainer, as it wasn't started
-          // up in the first place...
-          
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(UPCONFIG)) {
-          if (!line.hasOption(CONFDIR) || !line.hasOption(CONFNAME)) {
-            stdout.println("-" + CONFDIR + " and -" + CONFNAME
-                + " are required for " + UPCONFIG);
-            System.exit(1);
-          }
-          String confDir = line.getOptionValue(CONFDIR);
-          String confName = line.getOptionValue(CONFNAME);
-          final String excludeExpr = line.getOptionValue(EXCLUDE_REGEX, EXCLUDE_REGEX_DEFAULT);
-          
-          if(!ZkController.checkChrootPath(zkServerAddress, true)) {
-            stdout.println("A chroot was specified in zkHost but the znode doesn't exist. ");
-            System.exit(1);
-          }
-          ZkConfigManager configManager = new ZkConfigManager(zkClient);
-          final Pattern excludePattern = Pattern.compile(excludeExpr);
-          configManager.uploadConfigDir(Paths.get(confDir), confName, excludePattern);
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(DOWNCONFIG)) {
-          if (!line.hasOption(CONFDIR) || !line.hasOption(CONFNAME)) {
-            stdout.println("-" + CONFDIR + " and -" + CONFNAME
-                + " are required for " + DOWNCONFIG);
-            System.exit(1);
-          }
-          String confDir = line.getOptionValue(CONFDIR);
-          String confName = line.getOptionValue(CONFNAME);
-          ZkConfigManager configManager = new ZkConfigManager(zkClient);
-          configManager.downloadConfigDir(confName, Paths.get(confDir));
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(LINKCONFIG)) {
-          if (!line.hasOption(COLLECTION) || !line.hasOption(CONFNAME)) {
-            stdout.println("-" + COLLECTION + " and -" + CONFNAME
-                + " are required for " + LINKCONFIG);
-            System.exit(1);
-          }
-          String collection = line.getOptionValue(COLLECTION);
-          String confName = line.getOptionValue(CONFNAME);
-          
-          ZkController.linkConfSet(zkClient, collection, confName);
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(LIST)) {
-          zkClient.printLayoutToStream(stdout);
-        } else if (line.getOptionValue(CMD).equals(LS)) {
-
-          List argList = line.getArgList();
-          if (argList.size() != 1) {
-            stdout.println("-" + LS + " requires one arg - the path to list");
-            System.exit(1);
-          }
-
-          StringBuilder sb = new StringBuilder();
-          String path = argList.get(0).toString();
-          zkClient.printLayout(path == null ? "/" : path, 0, sb);
-          stdout.println(sb.toString());
-
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(CLEAR)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 1) {
-            stdout.println("-" + CLEAR + " requires one arg - the path to clear");
-            System.exit(1);
-          }
-          zkClient.clean(arglist.get(0).toString());
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(MAKEPATH)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 1) {
-            stdout.println("-" + MAKEPATH + " requires one arg - the path to make");
-            System.exit(1);
-          }
-          zkClient.makePath(arglist.get(0).toString(), true);
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(PUT)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 2) {
-            stdout.println("-" + PUT + " requires two args - the path to create and the data string");
-            System.exit(1);
-          }
-          String path = arglist.get(0).toString();
-          if (zkClient.exists(path, true)) {
-            zkClient.setData(path, arglist.get(1).toString().getBytes(StandardCharsets.UTF_8), true);
-          } else {
-            zkClient.create(path, arglist.get(1).toString().getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
-          }
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(PUT_FILE)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 2) {
-            stdout.println("-" + PUT_FILE + " requires two args - the path to create in ZK and the path to the local file");
-            System.exit(1);
-          }
-
-          String path = arglist.get(0).toString();
-          InputStream is = new FileInputStream(arglist.get(1).toString());
-          try {
-            if (zkClient.exists(path, true)) {
-              zkClient.setData(path, IOUtils.toByteArray(is), true);
-            } else {
-              zkClient.create(path, IOUtils.toByteArray(is), CreateMode.PERSISTENT, true);
-            }
-          } finally {
-            IOUtils.closeQuietly(is);
-          }
-
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(GET)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 1) {
-            stdout.println("-" + GET + " requires one arg - the path to get");
-            System.exit(1);
-          }
-          byte [] data = zkClient.getData(arglist.get(0).toString(), null, null, true);
-          stdout.println(new String(data, StandardCharsets.UTF_8));
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(GET_FILE)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 2) {
-            stdout.println("-" + GET_FILE + "requires two args - the path to get and the file to save it to");
-            System.exit(1);
-          }
-          byte [] data = zkClient.getData(arglist.get(0).toString(), null, null, true);
-          FileUtils.writeByteArrayToFile(new File(arglist.get(1).toString()), data);
-        } else if (line.getOptionValue(CMD).equals(UPDATEACLS)) {
-          List arglist = line.getArgList();
-          if (arglist.size() != 1) {
-            stdout.println("-" + UPDATEACLS + " requires one arg - the path to update");
-            System.exit(1);
-          }
-          zkClient.updateACLs(arglist.get(0).toString());
-        } else if (line.getOptionValue(CMD).equalsIgnoreCase(CLUSTERPROP)) {
-          if(!line.hasOption(NAME)) {
-            stdout.println("-" + NAME + " is required for " + CLUSTERPROP);
-          }
-          String propertyName = line.getOptionValue(NAME);
-          //If -val option is missing, we will use the null value. This is required to maintain
-          //compatibility with Collections API.
-          String propertyValue = line.getOptionValue(VALUE_LONG);
-          ClusterProperties props = new ClusterProperties(zkClient);
-          try {
-            props.setClusterProperty(propertyName, propertyValue);
-          } catch (IOException ex) {
-            stdout.println("Unable to set the cluster property due to following error : " + ex.getLocalizedMessage());
-            System.exit(1);
-          }
-        } else {
-          // If not cmd matches
-          stdout.println("Unknown command "+ line.getOptionValue(CMD) + ". Use -h to get help.");
-          System.exit(1);
-        }
-      } finally {
-        if (solrPort != null) {
-          zkServer.stop();
-        }
-        if (zkClient != null) {
-          zkClient.close();
-        }
-      }
-    } catch (ParseException exp) {
-      stdout.println("Unexpected exception:" + exp.getMessage());
-    }
-    
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkCollectionTerms.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkCollectionTerms.java b/solr/core/src/java/org/apache/solr/cloud/ZkCollectionTerms.java
deleted file mode 100644
index b232f9b..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkCollectionTerms.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.util.ObjectReleaseTracker;
-import org.apache.solr.core.CoreDescriptor;
-
-/**
- * Used to manage all ZkShardTerms of a collection
- */
-class ZkCollectionTerms implements AutoCloseable {
-  private final String collection;
-  private final Map<String, ZkShardTerms> terms;
-  private final SolrZkClient zkClient;
-
-  ZkCollectionTerms(String collection, SolrZkClient client) {
-    this.collection = collection;
-    this.terms = new HashMap<>();
-    this.zkClient = client;
-    ObjectReleaseTracker.track(this);
-  }
-
-
-  public ZkShardTerms getShard(String shardId) {
-    synchronized (terms) {
-      if (!terms.containsKey(shardId)) terms.put(shardId, new ZkShardTerms(collection, shardId, zkClient));
-      return terms.get(shardId);
-    }
-  }
-
-  public void remove(String shardId, CoreDescriptor coreDescriptor) {
-    synchronized (terms) {
-      if (getShard(shardId).removeTerm(coreDescriptor)) {
-        terms.remove(shardId).close();
-      }
-    }
-  }
-
-  public void close() {
-    synchronized (terms) {
-      terms.values().forEach(ZkShardTerms::close);
-    }
-    ObjectReleaseTracker.release(this);
-  }
-
-}


[16/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
deleted file mode 100644
index a447093..0000000
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.solr.client.solrj.io.ModelCache;
-import org.apache.solr.client.solrj.io.SolrClientCache;
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.comp.StreamComparator;
-import org.apache.solr.client.solrj.io.stream.*;
-import org.apache.solr.client.solrj.io.stream.expr.Explanation;
-import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
-import org.apache.solr.client.solrj.io.stream.expr.Expressible;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParser;
-import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.CloseHook;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.ID;
-
-/**
- * @since 5.1.0
- */
-public class StreamHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider {
-
-  static SolrClientCache clientCache = new SolrClientCache();
-  static ModelCache modelCache = null;
-  static ConcurrentMap objectCache = new ConcurrentHashMap();
-  private SolrDefaultStreamFactory streamFactory = new SolrDefaultStreamFactory();
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private String coreName;
-  private Map<String,DaemonStream> daemons = Collections.synchronizedMap(new HashMap());
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext request) {
-    return PermissionNameProvider.Name.READ_PERM;
-  }
-
-  public static SolrClientCache getClientCache() {
-    return clientCache;
-  }
-
-  public void inform(SolrCore core) {
-
-    /*
-     * The stream factory will always contain the zkUrl for the given collection Adds default streams with their
-     * corresponding function names. These defaults can be overridden or added to in the solrConfig in the stream
-     * RequestHandler def. Example config override 
-     * <lst name="streamFunctions"> 
-     *  <str name="group">org.apache.solr.client.solrj.io.stream.ReducerStream</str> 
-     *  <str name="count">org.apache.solr.client.solrj.io.stream.RecordCountStream</str> 
-     * </lst>
-     */
-
-    String defaultCollection;
-    String defaultZkhost;
-    CoreContainer coreContainer = core.getCoreContainer();
-    this.coreName = core.getName();
-
-    if (coreContainer.isZooKeeperAware()) {
-      defaultCollection = core.getCoreDescriptor().getCollectionName();
-      defaultZkhost = core.getCoreContainer().getZkController().getZkServerAddress();
-      streamFactory.withCollectionZkHost(defaultCollection, defaultZkhost);
-      streamFactory.withDefaultZkHost(defaultZkhost);
-      modelCache = new ModelCache(250,
-          defaultZkhost,
-          clientCache);
-    }
-    streamFactory.withSolrResourceLoader(core.getResourceLoader());
-
-    // This pulls all the overrides and additions from the config
-    List<PluginInfo> pluginInfos = core.getSolrConfig().getPluginInfos(Expressible.class.getName());
-    for (PluginInfo pluginInfo : pluginInfos) {
-      Class<? extends Expressible> clazz = core.getMemClassLoader().findClass(pluginInfo.className, Expressible.class);
-      streamFactory.withFunctionName(pluginInfo.name, clazz);
-    }
-
-    core.addCloseHook(new CloseHook() {
-      @Override
-      public void preClose(SolrCore core) {
-        // To change body of implemented methods use File | Settings | File Templates.
-      }
-
-      @Override
-      public void postClose(SolrCore core) {
-        clientCache.close();
-      }
-    });
-  }
-
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    SolrParams params = req.getParams();
-    params = adjustParams(params);
-    req.setParams(params);
-
-    if (params.get("action") != null) {
-      handleAdmin(req, rsp, params);
-      return;
-    }
-
-    TupleStream tupleStream;
-
-    try {
-      StreamExpression streamExpression = StreamExpressionParser.parse(params.get("expr"));
-      if (this.streamFactory.isEvaluator(streamExpression)) {
-        StreamExpression tupleExpression = new StreamExpression("tuple");
-        tupleExpression.addParameter(new StreamExpressionNamedParameter("return-value", streamExpression));
-        tupleStream = this.streamFactory.constructStream(tupleExpression);
-      } else {
-        tupleStream = this.streamFactory.constructStream(streamExpression);
-      }
-    } catch (Exception e) {
-      // Catch exceptions that occur while the stream is being created. This will include streaming expression parse
-      // rules.
-      SolrException.log(log, e);
-      rsp.add("result-set", new DummyErrorStream(e));
-
-      return;
-    }
-
-    int worker = params.getInt("workerID", 0);
-    int numWorkers = params.getInt("numWorkers", 1);
-    StreamContext context = new StreamContext();
-    context.put("shards", getCollectionShards(params));
-    context.workerID = worker;
-    context.numWorkers = numWorkers;
-    context.setSolrClientCache(clientCache);
-    context.setModelCache(modelCache);
-    context.setObjectCache(objectCache);
-    context.put("core", this.coreName);
-    context.put("solr-core", req.getCore());
-    tupleStream.setStreamContext(context);
-
-    // if asking for explanation then go get it
-    if (params.getBool("explain", false)) {
-      rsp.add("explanation", tupleStream.toExplanation(this.streamFactory));
-    }
-
-    if (tupleStream instanceof DaemonStream) {
-      DaemonStream daemonStream = (DaemonStream) tupleStream;
-      if (daemons.containsKey(daemonStream.getId())) {
-        daemons.remove(daemonStream.getId()).close();
-      }
-      daemonStream.setDaemons(daemons);
-      daemonStream.open(); // This will start the deamonStream
-      daemons.put(daemonStream.getId(), daemonStream);
-      rsp.add("result-set", new DaemonResponseStream("Deamon:" + daemonStream.getId() + " started on " + coreName));
-    } else {
-      rsp.add("result-set", new TimerStream(new ExceptionStream(tupleStream)));
-    }
-  }
-
-  private void handleAdmin(SolrQueryRequest req, SolrQueryResponse rsp, SolrParams params) {
-    String action = params.get("action");
-    if ("stop".equalsIgnoreCase(action)) {
-      String id = params.get(ID);
-      DaemonStream d = daemons.get(id);
-      if (d != null) {
-        d.close();
-        rsp.add("result-set", new DaemonResponseStream("Deamon:" + id + " stopped on " + coreName));
-      } else {
-        rsp.add("result-set", new DaemonResponseStream("Deamon:" + id + " not found on " + coreName));
-      }
-    } else {
-      if ("start".equalsIgnoreCase(action)) {
-        String id = params.get(ID);
-        DaemonStream d = daemons.get(id);
-        d.open();
-        rsp.add("result-set", new DaemonResponseStream("Deamon:" + id + " started on " + coreName));
-      } else if ("list".equalsIgnoreCase(action)) {
-        Collection<DaemonStream> vals = daemons.values();
-        rsp.add("result-set", new DaemonCollectionStream(vals));
-      } else if ("kill".equalsIgnoreCase(action)) {
-        String id = params.get("id");
-        DaemonStream d = daemons.remove(id);
-        if (d != null) {
-          d.close();
-        }
-        rsp.add("result-set", new DaemonResponseStream("Deamon:" + id + " killed on " + coreName));
-      }
-    }
-  }
-
-  private SolrParams adjustParams(SolrParams params) {
-    ModifiableSolrParams adjustedParams = new ModifiableSolrParams();
-    adjustedParams.add(params);
-    adjustedParams.add(CommonParams.OMIT_HEADER, "true");
-    return adjustedParams;
-  }
-
-  public String getDescription() {
-    return "StreamHandler";
-  }
-
-  public String getSource() {
-    return null;
-  }
-
-  public static class DummyErrorStream extends TupleStream {
-    private Exception e;
-
-    public DummyErrorStream(Exception e) {
-      this.e = e;
-    }
-
-    public StreamComparator getStreamSort() {
-      return null;
-    }
-
-    public void close() {}
-
-    public void open() {}
-
-    public void setStreamContext(StreamContext context) {}
-
-    public List<TupleStream> children() {
-      return null;
-    }
-
-    @Override
-    public Explanation toExplanation(StreamFactory factory) throws IOException {
-
-      return new StreamExplanation(getStreamNodeId().toString())
-          .withFunctionName("error")
-          .withImplementingClass(this.getClass().getName())
-          .withExpressionType(ExpressionType.STREAM_DECORATOR)
-          .withExpression("--non-expressible--");
-    }
-
-    public Tuple read() {
-      String msg = e.getMessage();
-
-      Throwable t = e.getCause();
-      while (t != null) {
-        msg = t.getMessage();
-        t = t.getCause();
-      }
-
-      Map m = new HashMap();
-      m.put("EOF", true);
-      m.put("EXCEPTION", msg);
-      return new Tuple(m);
-    }
-  }
-
-  public static class DaemonCollectionStream extends TupleStream {
-    private Iterator<DaemonStream> it;
-
-    public DaemonCollectionStream(Collection<DaemonStream> col) {
-      this.it = col.iterator();
-    }
-
-    public StreamComparator getStreamSort() {
-      return null;
-    }
-
-    public void close() {}
-
-    public void open() {}
-
-    public void setStreamContext(StreamContext context) {}
-
-    public List<TupleStream> children() {
-      return null;
-    }
-
-    @Override
-    public Explanation toExplanation(StreamFactory factory) throws IOException {
-
-      return new StreamExplanation(getStreamNodeId().toString())
-          .withFunctionName("daemon-collection")
-          .withImplementingClass(this.getClass().getName())
-          .withExpressionType(ExpressionType.STREAM_DECORATOR)
-          .withExpression("--non-expressible--");
-    }
-
-    public Tuple read() {
-      if (it.hasNext()) {
-        return it.next().getInfo();
-      } else {
-        Map m = new HashMap();
-        m.put("EOF", true);
-        return new Tuple(m);
-      }
-    }
-  }
-
-  public static class DaemonResponseStream extends TupleStream {
-    private String message;
-    private boolean sendEOF = false;
-
-    public DaemonResponseStream(String message) {
-      this.message = message;
-    }
-
-    public StreamComparator getStreamSort() {
-      return null;
-    }
-
-    public void close() {}
-
-    public void open() {}
-
-    public void setStreamContext(StreamContext context) {}
-
-    public List<TupleStream> children() {
-      return null;
-    }
-
-    @Override
-    public Explanation toExplanation(StreamFactory factory) throws IOException {
-
-      return new StreamExplanation(getStreamNodeId().toString())
-          .withFunctionName("daemon-response")
-          .withImplementingClass(this.getClass().getName())
-          .withExpressionType(ExpressionType.STREAM_DECORATOR)
-          .withExpression("--non-expressible--");
-    }
-
-    public Tuple read() {
-      if (sendEOF) {
-        Map m = new HashMap();
-        m.put("EOF", true);
-        return new Tuple(m);
-      } else {
-        sendEOF = true;
-        Map m = new HashMap();
-        m.put("DaemonOp", message);
-        return new Tuple(m);
-      }
-    }
-  }
-
-  public static class TimerStream extends TupleStream {
-
-    private long begin;
-    private TupleStream tupleStream;
-
-    public TimerStream(TupleStream tupleStream) {
-      this.tupleStream = tupleStream;
-    }
-
-    public StreamComparator getStreamSort() {
-      return this.tupleStream.getStreamSort();
-    }
-
-    public void close() throws IOException {
-      this.tupleStream.close();
-    }
-
-    public void open() throws IOException {
-      this.begin = System.nanoTime();
-      this.tupleStream.open();
-    }
-
-    public void setStreamContext(StreamContext context) {
-      this.tupleStream.setStreamContext(context);
-    }
-
-    public List<TupleStream> children() {
-      return this.tupleStream.children();
-    }
-
-    @Override
-    public Explanation toExplanation(StreamFactory factory) throws IOException {
-
-      return new StreamExplanation(getStreamNodeId().toString())
-          .withFunctionName("timer")
-          .withImplementingClass(this.getClass().getName())
-          .withExpressionType(ExpressionType.STREAM_DECORATOR)
-          .withExpression("--non-expressible--");
-    }
-
-    public Tuple read() throws IOException {
-      Tuple tuple = this.tupleStream.read();
-      if (tuple.EOF) {
-        long totalTime = (System.nanoTime() - begin) / 1000000;
-        tuple.fields.put("RESPONSE_TIME", totalTime);
-      }
-      return tuple;
-    }
-  }
-
-  private Map<String,List<String>> getCollectionShards(SolrParams params) {
-
-    Map<String,List<String>> collectionShards = new HashMap();
-    Iterator<String> paramsIt = params.getParameterNamesIterator();
-    while (paramsIt.hasNext()) {
-      String param = paramsIt.next();
-      if (param.indexOf(".shards") > -1) {
-        String collection = param.split("\\.")[0];
-        String shardString = params.get(param);
-        String[] shards = shardString.split(",");
-        List<String> shardList = new ArrayList();
-        for (String shard : shards) {
-          shardList.add(shard);
-        }
-        collectionShards.put(collection, shardList);
-      }
-    }
-
-    if (collectionShards.size() > 0) {
-      return collectionShards;
-    } else {
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
deleted file mode 100644
index cbe2cba..0000000
--- a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.loader.CSVLoader;
-import org.apache.solr.handler.loader.ContentStreamLoader;
-import org.apache.solr.handler.loader.JavabinLoader;
-import org.apache.solr.handler.loader.JsonLoader;
-import org.apache.solr.handler.loader.XMLLoader;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-
-import static org.apache.solr.common.params.CommonParams.PATH;
-import static org.apache.solr.security.PermissionNameProvider.Name.UPDATE_PERM;
-
-/**
- * UpdateHandler that uses content-type to pick the right Loader
- */
-public class UpdateRequestHandler extends ContentStreamHandlerBase implements PermissionNameProvider {
-
-  // XML Constants
-  public static final String ADD = "add";
-  public static final String DELETE = "delete";
-  public static final String OPTIMIZE = "optimize";
-  public static final String COMMIT = "commit";
-  public static final String ROLLBACK = "rollback";
-  public static final String WAIT_SEARCHER = "waitSearcher";
-  public static final String SOFT_COMMIT = "softCommit";
-
-  public static final String OVERWRITE = "overwrite";
-
-  public static final String VERSION = "version";
-
-  // NOTE: This constant is for use with the <add> XML tag, not the HTTP param with same name
-  public static final String COMMIT_WITHIN = "commitWithin";
-
-  Map<String,ContentStreamLoader> loaders = null;
-
-  ContentStreamLoader instance = new ContentStreamLoader() {
-    @Override
-    public void load(SolrQueryRequest req, SolrQueryResponse rsp,
-        ContentStream stream, UpdateRequestProcessor processor) throws Exception {
-
-      ContentStreamLoader loader = pathVsLoaders.get(req.getContext().get(PATH));
-      if(loader == null) {
-        String type = req.getParams().get(UpdateParams.ASSUME_CONTENT_TYPE);
-        if (type == null) {
-          type = stream.getContentType();
-        }
-        if (type == null) { // Normal requests will not get here.
-          throw new SolrException(ErrorCode.UNSUPPORTED_MEDIA_TYPE, "Missing ContentType");
-        }
-        int idx = type.indexOf(';');
-        if (idx > 0) {
-          type = type.substring(0, idx);
-        }
-        loader = loaders.get(type);
-        if (loader == null) {
-          throw new SolrException(ErrorCode.UNSUPPORTED_MEDIA_TYPE, "Unsupported ContentType: "
-              + type + "  Not in: " + loaders.keySet());
-        }
-      }
-
-      if(loader.getDefaultWT()!=null) {
-        setDefaultWT(req,loader);
-      }
-      loader.load(req, rsp, stream, processor);
-    }
-
-    private void setDefaultWT(SolrQueryRequest req, ContentStreamLoader loader) {
-      SolrParams params = req.getParams();
-      if( params.get(CommonParams.WT) == null ) {
-        String wt = loader.getDefaultWT();
-        // Make sure it is a valid writer
-        if(req.getCore().getQueryResponseWriter(wt)!=null) {
-          Map<String,String> map = new HashMap<>(1);
-          map.put(CommonParams.WT, wt);
-          req.setParams(SolrParams.wrapDefaults(params,
-              new MapSolrParams(map)));
-        }
-      }
-    }
-  };
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-
-    // Since backed by a non-thread safe Map, it should not be modifiable
-    loaders = Collections.unmodifiableMap(createDefaultLoaders(args));
-  }
-
-  protected void setAssumeContentType(String ct) {
-    if(invariants==null) {
-      Map<String,String> map = new HashMap<>();
-      map.put(UpdateParams.ASSUME_CONTENT_TYPE,ct);
-      invariants = new MapSolrParams(map);
-    }
-    else {
-      ModifiableSolrParams params = new ModifiableSolrParams(invariants);
-      params.set(UpdateParams.ASSUME_CONTENT_TYPE,ct);
-      invariants = params;
-    }
-  }
-  private Map<String ,ContentStreamLoader> pathVsLoaders = new HashMap<>();
-  protected Map<String,ContentStreamLoader> createDefaultLoaders(NamedList args) {
-    SolrParams p = null;
-    if(args!=null) {
-      p = args.toSolrParams();
-    }
-    Map<String,ContentStreamLoader> registry = new HashMap<>();
-    registry.put("application/xml", new XMLLoader().init(p) );
-    registry.put("application/json", new JsonLoader().init(p) );
-    registry.put("application/csv", new CSVLoader().init(p) );
-    registry.put("application/javabin", new JavabinLoader(instance).init(p) );
-    registry.put("text/csv", registry.get("application/csv") );
-    registry.put("text/xml", registry.get("application/xml") );
-    registry.put("text/json", registry.get("application/json"));
-
-    pathVsLoaders.put(JSON_PATH,registry.get("application/json"));
-    pathVsLoaders.put(DOC_PATH,registry.get("application/json"));
-    pathVsLoaders.put(CSV_PATH,registry.get("application/csv"));
-    pathVsLoaders.put(BIN_PATH,registry.get("application/javabin"));
-    return registry;
-  }
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext ctx) {
-    return UPDATE_PERM;
-  }
-
-  @Override
-  protected ContentStreamLoader newLoader(SolrQueryRequest req, final UpdateRequestProcessor processor) {
-    return instance;
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Add documents using XML (with XSLT), CSV, JSON, or javabin";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.UPDATE;
-  }
-
-  public static final String DOC_PATH = "/update/json/docs";
-  public static final String JSON_PATH = "/update/json";
-  public static final String CSV_PATH = "/update/csv";
-  public static final String BIN_PATH = "/update/bin";
-
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandlerApi.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandlerApi.java
deleted file mode 100644
index f7bc140..0000000
--- a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandlerApi.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.api.Api;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-
-public class UpdateRequestHandlerApi extends UpdateRequestHandler  {
-
-
-  @Override
-  public Collection<Api> getApis() {
-    return Collections.singleton(getApiImpl());
-  }
-
-  private Api getApiImpl() {
-    return new Api(Utils.getSpec("core.Update")) {
-      @Override
-      public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-        String path = req.getPath();
-        String target =  mapping.get(path);
-        if(target != null) req.getContext().put("path", target);
-        try {
-          handleRequest(req, rsp);
-        } catch (RuntimeException e) {
-          throw e;
-        } catch (Exception e){
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,e );
-        }
-      }
-    };
-  }
-
-  @Override
-  public Boolean registerV1() {
-    return Boolean.FALSE;
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  private static final Map<String, String> mapping = ImmutableMap.<String,String>builder()
-      .put("/update", DOC_PATH)
-      .put(JSON_PATH, DOC_PATH)
-      .put("/update/json/commands", JSON_PATH)
-      .build();
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java b/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
deleted file mode 100644
index ae3e01f..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URL;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.GenericSolrRequest;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Static methods to proxy calls to an Admin (GET) API to other nodes in the cluster and return a combined response
- */
-public class AdminHandlersProxy {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String PARAM_NODES = "nodes";
-
-  // Proxy this request to a different remote node if 'node' parameter is provided
-  public static boolean maybeProxyToNodes(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer container)
-      throws IOException, SolrServerException, InterruptedException {
-    String nodeNames = req.getParams().get(PARAM_NODES);
-    if (nodeNames == null || nodeNames.isEmpty()) {
-      return false; // local request
-    }
-
-    if (!container.isZooKeeperAware()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Parameter " + PARAM_NODES + " only supported in Cloud mode");
-    }
-    
-    Set<String> nodes;
-    String pathStr = req.getPath();
-    
-    Map<String,String> paramsMap = req.getParams().toMap(new HashMap<>());
-    paramsMap.remove(PARAM_NODES);
-    SolrParams params = new MapSolrParams(paramsMap);
-    Set<String> liveNodes = container.getZkController().zkStateReader.getClusterState().getLiveNodes();
-    
-    if (nodeNames.equals("all")) {
-      nodes = liveNodes;
-      log.debug("All live nodes requested");
-    } else {
-      nodes = new HashSet<>(Arrays.asList(nodeNames.split(",")));
-      for (String nodeName : nodes) {
-        if (!nodeName.matches("^[^/:]+:\\d+_[\\w/]+$")) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Parameter " + PARAM_NODES + " has wrong format");
-        }
-
-        if (!liveNodes.contains(nodeName)) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Requested node " + nodeName + " is not part of cluster");
-        }
-      }       
-      log.debug("Nodes requested: {}", nodes);
-    }
-    log.debug(PARAM_NODES + " parameter {} specified on {} request", nodeNames, pathStr);
-    
-    Map<String, Pair<Future<NamedList<Object>>, SolrClient>> responses = new HashMap<>();
-    for (String node : nodes) {
-      responses.put(node, callRemoteNode(node, pathStr, params, container.getZkController()));
-    }
-    
-    for (Map.Entry<String, Pair<Future<NamedList<Object>>, SolrClient>> entry : responses.entrySet()) {
-      try {
-        NamedList<Object> resp = entry.getValue().first().get(10, TimeUnit.SECONDS);
-        entry.getValue().second().close();
-        rsp.add(entry.getKey(), resp);
-      } catch (ExecutionException ee) {
-        log.warn("Exception when fetching result from node {}", entry.getKey(), ee);
-      } catch (TimeoutException te) {
-        log.warn("Timeout when fetching result from node {}", entry.getKey(), te);
-      }
-    }
-    log.info("Fetched response from {} nodes: {}", responses.keySet().size(), responses.keySet());
-    return true;
-  } 
-
-  /**
-   * Makes a remote request and returns a future and the solr client. The caller is responsible for closing the client 
-   */
-  public static Pair<Future<NamedList<Object>>, SolrClient> callRemoteNode(String nodeName, String endpoint, 
-                                                                           SolrParams params, ZkController zkController) 
-      throws IOException, SolrServerException {
-    log.debug("Proxying {} request to node {}", endpoint, nodeName);
-    URL baseUrl = new URL(zkController.zkStateReader.getBaseUrlForNodeName(nodeName));
-    HttpSolrClient solr = new HttpSolrClient.Builder(baseUrl.toString()).build();
-    SolrRequest proxyReq = new GenericSolrRequest(SolrRequest.METHOD.GET, endpoint, params);
-    HttpSolrClient.HttpUriRequestResponse proxyResp = solr.httpUriRequest(proxyReq);
-    return new Pair<>(proxyResp.future, solr);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/AutoscalingHistoryHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/AutoscalingHistoryHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/AutoscalingHistoryHandler.java
deleted file mode 100644
index ae99453..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/AutoscalingHistoryHandler.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.cloud.autoscaling.SystemLogListener;
-import org.apache.solr.cloud.autoscaling.TriggerEvent;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This handler makes it easier to retrieve a history of autoscaling events from the .system
- * collection.
- */
-public class AutoscalingHistoryHandler extends RequestHandlerBase implements PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String SYSTEM_COLLECTION_PARAM = "systemCollection";
-
-  public static final String ACTION_PARAM = "action";
-  public static final String MESSAGE_PARAM = "message";
-  public static final String TRIGGER_PARAM = AutoScalingParams.TRIGGER;
-  public static final String TYPE_PARAM = "eventType";
-  public static final String NODE_PARAM = "node";
-  public static final String COLLECTION_PARAM = CollectionAdminParams.COLLECTION;
-  public static final String STAGE_PARAM = AutoScalingParams.STAGE;
-  public static final String BEFORE_ACTION_PARAM = AutoScalingParams.BEFORE_ACTION;
-  public static final String AFTER_ACTION_PARAM = AutoScalingParams.AFTER_ACTION;
-
-  private static final String EVENTS_FQ = "{!term f=" + CommonParams.TYPE + "}" + SystemLogListener.DOC_TYPE;
-
-  private static final String ACTION_FQ_FORMAT = "{!term f=" + SystemLogListener.ACTION_FIELD + "}%s";
-  private static final String MESSAGE_FQ_FORMAT = "{!lucene}" + SystemLogListener.MESSAGE_FIELD + ":%s";
-  private static final String TRIGGER_FQ_FORMAT = "{!term f=" + SystemLogListener.EVENT_SOURCE_FIELD + "}%s";
-  private static final String STAGE_FQ_FORMAT = "{!term f=" + SystemLogListener.STAGE_FIELD + "}%s";
-  private static final String COLLECTION_FQ_FORMAT = "{!term f=" + SystemLogListener.COLLECTIONS_FIELD + "}%s";
-  private static final String TYPE_FQ_FORMAT = "{!term f=" + SystemLogListener.EVENT_TYPE_FIELD + "}%s";
-  private static final String NODE_FQ_FORMAT = "{!term f=event.property." + TriggerEvent.NODE_NAMES + "_ss}%s";
-  private static final String BEFORE_ACTION_FQ_FORMAT = "{!term f=" + SystemLogListener.BEFORE_ACTIONS_FIELD + "}%s";
-  private static final String AFTER_ACTION_FQ_FORMAT = "{!term f=" + SystemLogListener.AFTER_ACTIONS_FIELD + "}%s";
-
-  private static final Map<String, String> formats = new HashMap<String, String>() {{
-    put(ACTION_PARAM, ACTION_FQ_FORMAT);
-    put(MESSAGE_PARAM, MESSAGE_FQ_FORMAT);
-    put(TRIGGER_PARAM, TRIGGER_FQ_FORMAT);
-    put(TYPE_PARAM, TYPE_FQ_FORMAT);
-    put(STAGE_PARAM, STAGE_FQ_FORMAT);
-    put(NODE_PARAM, NODE_FQ_FORMAT);
-    put(COLLECTION_PARAM, COLLECTION_FQ_FORMAT);
-    put(BEFORE_ACTION_PARAM, BEFORE_ACTION_FQ_FORMAT);
-    put(AFTER_ACTION_PARAM, AFTER_ACTION_FQ_FORMAT);
-  }};
-
-  private final CoreContainer coreContainer;
-
-
-  public AutoscalingHistoryHandler(CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext request) {
-    return Name.AUTOSCALING_HISTORY_READ_PERM;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
-    String collection = params.get(SYSTEM_COLLECTION_PARAM, CollectionAdminParams.SYSTEM_COLL);
-    params.remove(SYSTEM_COLLECTION_PARAM);
-    params.remove(CommonParams.QT);
-    // check that we have the main query, if not then use *:*
-    if (params.get(CommonParams.Q) == null) {
-      params.add(CommonParams.Q, "*:*");
-    }
-    // sort by doc id, which are time-based, unless specified otherwise
-    if (params.get(CommonParams.SORT) == null) {
-      params.add(CommonParams.SORT, "id asc");
-    }
-    // filter query to pick only autoscaling events
-    params.remove(CommonParams.FQ, EVENTS_FQ);
-    params.add(CommonParams.FQ, EVENTS_FQ);
-    // add filters translated from simplified parameters
-    for (Map.Entry<String, String> e : formats.entrySet()) {
-      String[] values = params.remove(e.getKey());
-      if (values != null) {
-        for (String value : values) {
-          params.add(CommonParams.FQ, String.format(Locale.ROOT, e.getValue(), value));
-        }
-      }
-    }
-    try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(coreContainer.getZkController().getZkServerAddress()), Optional.empty())
-        .withHttpClient(coreContainer.getUpdateShardHandler().getDefaultHttpClient())
-        .build()) {
-      QueryResponse qr = cloudSolrClient.query(collection, params);
-      rsp.setAllValues(qr.getResponse());
-    } catch (Exception e) {
-      if ((e instanceof SolrException) && e.getMessage().contains("Collection not found")) {
-        // relatively benign
-        String msg = "Collection " + collection + " does not exist.";
-        log.info(msg);
-        rsp.getValues().add("error", msg);
-      } else {
-        throw e;
-      }
-    }
-  }
-
-  @Override
-  public String getDescription() {
-    return "A handler to return autoscaling event history";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this, "autoscaling.history");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java b/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
deleted file mode 100644
index 503eed0..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.net.URI;
-import java.util.Optional;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.handler.SnapShooter;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-class BackupCoreOp implements CoreAdminHandler.CoreAdminOp {
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    final SolrParams params = it.req.getParams();
-
-    String cname = params.required().get(CoreAdminParams.CORE);
-    String name = params.required().get(NAME);
-
-    String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY);
-    BackupRepository repository = it.handler.coreContainer.newBackupRepository(Optional.ofNullable(repoName));
-
-    String location = repository.getBackupLocation(params.get(CoreAdminParams.BACKUP_LOCATION));
-    if (location == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-          + " parameter or as a default repository property");
-    }
-
-    // An optional parameter to describe the snapshot to be backed-up. If this
-    // parameter is not supplied, the latest index commit is backed-up.
-    String commitName = params.get(CoreAdminParams.COMMIT_NAME);
-
-    URI locationUri = repository.createURI(location);
-    try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
-      SnapShooter snapShooter = new SnapShooter(repository, core, locationUri, name, commitName);
-      // validateCreateSnapshot will create parent dirs instead of throw; that choice is dubious.
-      //  But we want to throw. One reason is that
-      //  this dir really should, in fact must, already exist here if triggered via a collection backup on a shared
-      //  file system. Otherwise, perhaps the FS location isn't shared -- we want an error.
-      if (!snapShooter.getBackupRepository().exists(snapShooter.getLocation())) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Directory to contain snapshots doesn't exist: " + snapShooter.getLocation() + ". " +
-            "Note that Backup/Restore of a SolrCloud collection " +
-            "requires a shared file system mounted at the same path on all nodes!");
-      }
-      snapShooter.validateCreateSnapshot();
-      snapShooter.createSnapshot();
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Failed to backup core=" + cname + " because " + e, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/BaseHandlerApiSupport.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/BaseHandlerApiSupport.java b/solr/core/src/java/org/apache/solr/handler/admin/BaseHandlerApiSupport.java
deleted file mode 100644
index 90a2dd2..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/BaseHandlerApiSupport.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import com.google.common.collect.ImmutableList;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiSupport;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.CommandMeta;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.V2EndPoint;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
-import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
-import static org.apache.solr.common.util.StrUtils.splitSmart;
-
-/**
- * This is a utility class to provide an easy mapping of request handlers which support multiple commands
- * to the V2 API format (core admin api, collections api). This helps in automatically mapping paths
- * to actions and old parameter names to new parameter names
- */
-public abstract class BaseHandlerApiSupport implements ApiSupport {
-  protected final Map<SolrRequest.METHOD, Map<V2EndPoint, List<ApiCommand>>> commandsMapping;
-
-  protected BaseHandlerApiSupport() {
-    commandsMapping = new HashMap<>();
-    for (ApiCommand cmd : getCommands()) {
-      Map<V2EndPoint, List<ApiCommand>> m = commandsMapping.get(cmd.meta().getHttpMethod());
-      if (m == null) commandsMapping.put(cmd.meta().getHttpMethod(), m = new HashMap<>());
-      List<ApiCommand> list = m.get(cmd.meta().getEndPoint());
-      if (list == null) m.put(cmd.meta().getEndPoint(), list = new ArrayList<>());
-      list.add(cmd);
-    }
-  }
-
-  @Override
-  public synchronized Collection<Api> getApis() {
-    ImmutableList.Builder<Api> l = ImmutableList.builder();
-    for (V2EndPoint op : getEndPoints()) l.add(getApi(op));
-    return l.build();
-  }
-
-
-  private Api getApi(final V2EndPoint op) {
-    final BaseHandlerApiSupport apiHandler = this;
-    return new Api(Utils.getSpec(op.getSpecName())) {
-      @Override
-      public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-        SolrParams params = req.getParams();
-        SolrRequest.METHOD method = SolrRequest.METHOD.valueOf(req.getHttpMethod());
-        List<ApiCommand> commands = commandsMapping.get(method).get(op);
-        try {
-          if (method == POST) {
-            List<CommandOperation> cmds = req.getCommands(true);
-            if (cmds.size() > 1)
-              throw new SolrException(BAD_REQUEST, "Only one command is allowed");
-            CommandOperation c = cmds.size() == 0 ? null : cmds.get(0);
-            ApiCommand command = null;
-            String commandName = c == null ? null : c.name;
-            for (ApiCommand cmd : commands) {
-              if (Objects.equals(cmd.meta().getName(), commandName)) {
-                command = cmd;
-                break;
-              }
-            }
-
-            if (command == null) {
-              throw new SolrException(BAD_REQUEST, " no such command " + c);
-            }
-            wrapParams(req, c, command, false);
-            command.invoke(req, rsp, apiHandler);
-
-          } else {
-            if (commands == null || commands.isEmpty()) {
-              rsp.add("error", "No support for : " + method + " at :" + req.getPath());
-              return;
-            }
-            if (commands.size() > 1) {
-              for (ApiCommand command : commands) {
-                if (command.meta().getName().equals(req.getPath())) {
-                  commands = Collections.singletonList(command);
-                  break;
-                }
-              }
-            }
-            wrapParams(req, new CommandOperation("", Collections.EMPTY_MAP), commands.get(0), true);
-            commands.get(0).invoke(req, rsp, apiHandler);
-          }
-
-        } catch (SolrException e) {
-          throw e;
-        } catch (Exception e) {
-          throw new SolrException(BAD_REQUEST, e); //TODO BAD_REQUEST is a wild guess; should we flip the default?  fail here to investigate how this happens in tests
-        } finally {
-          req.setParams(params);
-        }
-
-      }
-    };
-
-  }
-
-  /**
-   * Wrapper for SolrParams that wraps V2 params and exposes them as V1 params.
-   */
-  private static void wrapParams(final SolrQueryRequest req, final CommandOperation co, final ApiCommand cmd, final boolean useRequestParams) {
-    final Map<String, String> pathValues = req.getPathTemplateValues();
-    final Map<String, Object> map = co == null || !(co.getCommandData() instanceof Map) ?
-        Collections.singletonMap("", co.getCommandData()) : co.getDataMap();
-    final SolrParams origParams = req.getParams();
-
-    req.setParams(
-        new SolrParams() {
-          @Override
-          public String get(String param) {
-            Object vals = getParams0(param);
-            if (vals == null) return null;
-            if (vals instanceof String) return (String) vals;
-            if (vals instanceof Boolean || vals instanceof Number) return String.valueOf(vals);
-            if (vals instanceof String[] && ((String[]) vals).length > 0) return ((String[]) vals)[0];
-            return null;
-          }
-
-          private Object getParams0(String param) {
-            param = cmd.meta().getParamSubstitute(param); // v1 -> v2, possibly dotted path
-            Object o = param.indexOf('.') > 0 ?
-                Utils.getObjectByPath(map, true, splitSmart(param, '.')) :
-                map.get(param);
-            if (o == null) o = pathValues.get(param);
-            if (o == null && useRequestParams) o = origParams.getParams(param);
-            if (o instanceof List) {
-              List l = (List) o;
-              return l.toArray(new String[l.size()]);
-            }
-
-            return o;
-          }
-
-          @Override
-          public String[] getParams(String param) {
-            Object vals = getParams0(param);
-            return vals == null || vals instanceof String[] ?
-                (String[]) vals :
-                new String[]{vals.toString()};
-          }
-
-          @Override
-          public Iterator<String> getParameterNamesIterator() {
-            return cmd.meta().getParamNamesIterator(co);
-          }
-
-        });
-
-  }
-
-  protected abstract Collection<ApiCommand> getCommands();
-
-  protected abstract Collection<V2EndPoint> getEndPoints();
-
-
-  public interface ApiCommand  {
-    CommandMeta meta();
-
-    void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) throws Exception;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java b/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java
deleted file mode 100644
index 9ebac77..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.KeeperException;
-
-public class ClusterStatus {
-  private final ZkStateReader zkStateReader;
-  private final ZkNodeProps message;
-  private final String collection; // maybe null
-
-  public ClusterStatus(ZkStateReader zkStateReader, ZkNodeProps props) {
-    this.zkStateReader = zkStateReader;
-    this.message = props;
-    collection = props.getStr(ZkStateReader.COLLECTION_PROP);
-  }
-
-  @SuppressWarnings("unchecked")
-  public void getClusterStatus(NamedList results)
-      throws KeeperException, InterruptedException {
-    // read aliases
-    Aliases aliases = zkStateReader.getAliases();
-    Map<String, List<String>> collectionVsAliases = new HashMap<>();
-    Map<String, List<String>> aliasVsCollections = aliases.getCollectionAliasListMap();
-    for (Map.Entry<String, List<String>> entry : aliasVsCollections.entrySet()) {
-      String alias = entry.getKey();
-      List<String> colls = entry.getValue();
-      for (String coll : colls) {
-        if (collection == null || collection.equals(coll))  {
-          List<String> list = collectionVsAliases.computeIfAbsent(coll, k -> new ArrayList<>());
-          list.add(alias);
-        }
-      }
-    }
-
-    Map roles = null;
-    if (zkStateReader.getZkClient().exists(ZkStateReader.ROLES, true)) {
-      roles = (Map) Utils.fromJSON(zkStateReader.getZkClient().getData(ZkStateReader.ROLES, null, null, true));
-    }
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    // convert cluster state into a map of writable types
-    byte[] bytes = Utils.toJSON(clusterState);
-    Map<String, Object> stateMap = (Map<String,Object>) Utils.fromJSON(bytes);
-
-    String routeKey = message.getStr(ShardParams._ROUTE_);
-    String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
-
-    Map<String, DocCollection> collectionsMap = null;
-    if (collection == null) {
-      collectionsMap = clusterState.getCollectionsMap();
-    } else  {
-      collectionsMap = Collections.singletonMap(collection, clusterState.getCollectionOrNull(collection));
-    }
-
-    NamedList<Object> collectionProps = new SimpleOrderedMap<>();
-
-    for (Map.Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
-      Map<String, Object> collectionStatus;
-      String name = entry.getKey();
-      DocCollection clusterStateCollection = entry.getValue();
-      if (clusterStateCollection == null) {
-        if (collection != null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
-        } else {
-          //collection might have got deleted at the same time
-          continue;
-        }
-      }
-
-      Set<String> requestedShards = new HashSet<>();
-      if (routeKey != null) {
-        DocRouter router = clusterStateCollection.getRouter();
-        Collection<Slice> slices = router.getSearchSlices(routeKey, null, clusterStateCollection);
-        for (Slice slice : slices) {
-          requestedShards.add(slice.getName());
-        }
-      }
-      if (shard != null) {
-        String[] paramShards = shard.split(",");
-        requestedShards.addAll(Arrays.asList(paramShards));
-      }
-
-      if (clusterStateCollection.getStateFormat() > 1) {
-        bytes = Utils.toJSON(clusterStateCollection);
-        Map<String, Object> docCollection = (Map<String, Object>) Utils.fromJSON(bytes);
-        collectionStatus = getCollectionStatus(docCollection, name, requestedShards);
-      } else {
-        collectionStatus = getCollectionStatus((Map<String, Object>) stateMap.get(name), name, requestedShards);
-      }
-
-      collectionStatus.put("znodeVersion", clusterStateCollection.getZNodeVersion());
-      if (collectionVsAliases.containsKey(name) && !collectionVsAliases.get(name).isEmpty()) {
-        collectionStatus.put("aliases", collectionVsAliases.get(name));
-      }
-      try {
-        String configName = zkStateReader.readConfigName(name);
-        collectionStatus.put("configName", configName);
-        collectionProps.add(name, collectionStatus);
-      } catch (SolrException e) {
-        if (e.getCause() instanceof KeeperException.NoNodeException)  {
-          // skip this collection because the collection's znode has been deleted
-          // which can happen during aggressive collection removal, see SOLR-10720
-        } else throw e;
-      }
-    }
-
-    List<String> liveNodes = zkStateReader.getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true);
-
-    // now we need to walk the collectionProps tree to cross-check replica state with live nodes
-    crossCheckReplicaStateWithLiveNodes(liveNodes, collectionProps);
-
-    NamedList<Object> clusterStatus = new SimpleOrderedMap<>();
-    clusterStatus.add("collections", collectionProps);
-
-    // read cluster properties
-    Map clusterProps = zkStateReader.getClusterProperties();
-    if (clusterProps != null && !clusterProps.isEmpty())  {
-      clusterStatus.add("properties", clusterProps);
-    }
-
-    // add the alias map too
-    Map<String, String> collectionAliasMap = aliases.getCollectionAliasMap(); // comma delim
-    if (!collectionAliasMap.isEmpty())  {
-      clusterStatus.add("aliases", collectionAliasMap);
-    }
-
-    // add the roles map
-    if (roles != null)  {
-      clusterStatus.add("roles", roles);
-    }
-
-    // add live_nodes
-    clusterStatus.add("live_nodes", liveNodes);
-
-    results.add("cluster", clusterStatus);
-  }
-
-  /**
-   * Get collection status from cluster state.
-   * Can return collection status by given shard name.
-   *
-   *
-   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
-   * @param name  collection name
-   * @param requestedShards a set of shards to be returned in the status.
-   *                        An empty or null values indicates <b>all</b> shards.
-   * @return map of collection properties
-   */
-  @SuppressWarnings("unchecked")
-  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
-    if (collection == null)  {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
-    }
-    if (requestedShards == null || requestedShards.isEmpty()) {
-      return collection;
-    } else {
-      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
-      Map<String, Object>  selected = new HashMap<>();
-      for (String selectedShard : requestedShards) {
-        if (!shards.containsKey(selectedShard)) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
-        }
-        selected.put(selectedShard, shards.get(selectedShard));
-        collection.put("shards", selected);
-      }
-      return collection;
-    }
-  }
-
-
-
-  /**
-   * Walks the tree of collection status to verify that any replicas not reporting a "down" status is
-   * on a live node, if any replicas reporting their status as "active" but the node is not live is
-   * marked as "down"; used by CLUSTERSTATUS.
-   * @param liveNodes List of currently live node names.
-   * @param collectionProps Map of collection status information pulled directly from ZooKeeper.
-   */
-
-  @SuppressWarnings("unchecked")
-  protected void crossCheckReplicaStateWithLiveNodes(List<String> liveNodes, NamedList<Object> collectionProps) {
-    Iterator<Map.Entry<String,Object>> colls = collectionProps.iterator();
-    while (colls.hasNext()) {
-      Map.Entry<String,Object> next = colls.next();
-      Map<String,Object> collMap = (Map<String,Object>)next.getValue();
-      Map<String,Object> shards = (Map<String,Object>)collMap.get("shards");
-      for (Object nextShard : shards.values()) {
-        Map<String,Object> shardMap = (Map<String,Object>)nextShard;
-        Map<String,Object> replicas = (Map<String,Object>)shardMap.get("replicas");
-        for (Object nextReplica : replicas.values()) {
-          Map<String,Object> replicaMap = (Map<String,Object>)nextReplica;
-          if (Replica.State.getState((String) replicaMap.get(ZkStateReader.STATE_PROP)) != Replica.State.DOWN) {
-            // not down, so verify the node is live
-            String node_name = (String)replicaMap.get(ZkStateReader.NODE_NAME_PROP);
-            if (!liveNodes.contains(node_name)) {
-              // node is not live, so this replica is actually down
-              replicaMap.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
-            }
-          }
-        }
-      }
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java
deleted file mode 100644
index d7d179a..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.request.CollectionApiMapping;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.CommandMeta;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.Meta;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.V2EndPoint;
-import org.apache.solr.common.Callable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterProperties;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.handler.admin.CollectionsHandler.CollectionOperation;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class CollectionHandlerApi extends BaseHandlerApiSupport {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  final CollectionsHandler handler;
-  static Collection<ApiCommand> apiCommands = createCollMapping();
-
-  private static Collection<ApiCommand> createCollMapping() {
-    Map<Meta, ApiCommand> result = new EnumMap<>(Meta.class);
-
-    for (Meta meta : Meta.values()) {
-      for (CollectionOperation op : CollectionOperation.values()) {
-        if (op.action == meta.action) {
-          result.put(meta, new ApiCommand() {
-            @Override
-            public CommandMeta meta() {
-              return meta;
-            }
-
-            @Override
-            public void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) throws Exception {
-              ((CollectionHandlerApi) apiHandler).handler.invokeAction(req, rsp, ((CollectionHandlerApi) apiHandler).handler.coreContainer, op.action, op);
-            }
-          });
-        }
-      }
-    }
-    //The following APIs have only V2 implementations
-    addApi(result, Meta.GET_NODES, params -> params.rsp.add("nodes", ((CollectionHandlerApi) params.apiHandler).handler.coreContainer.getZkController().getClusterState().getLiveNodes()));
-    addApi(result, Meta.SET_CLUSTER_PROPERTY_OBJ, params -> {
-      List<CommandOperation> commands = params.req.getCommands(true);
-      if (commands == null || commands.isEmpty()) throw new RuntimeException("Empty commands");
-      ClusterProperties clusterProperties = new ClusterProperties(((CollectionHandlerApi) params.apiHandler).handler.coreContainer.getZkController().getZkClient());
-
-      try {
-        clusterProperties.setClusterProperties(commands.get(0).getDataMap());
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in API", e);
-      }
-    });
-
-    for (Meta meta : Meta.values()) {
-      if (result.get(meta) == null) {
-        log.error("ERROR_INIT. No corresponding API implementation for : " + meta.commandName);
-      }
-    }
-
-    return result.values();
-  }
-
-  private static void addApi(Map<Meta, ApiCommand> result, Meta metaInfo, Callable<ApiParams> fun) {
-    result.put(metaInfo, new ApiCommand() {
-      @Override
-      public CommandMeta meta() {
-        return metaInfo;
-      }
-
-      @Override
-      public void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) throws Exception {
-        fun.call(new ApiParams(req, rsp, apiHandler));
-      }
-    });
-  }
-
-  static class ApiParams {
-    final SolrQueryRequest req;
-    final SolrQueryResponse rsp;
-    final BaseHandlerApiSupport apiHandler;
-
-    ApiParams(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) {
-      this.req = req;
-      this.rsp = rsp;
-      this.apiHandler = apiHandler;
-    }
-  }
-
-  public CollectionHandlerApi(CollectionsHandler handler) {
-    this.handler = handler;
-  }
-
-  @Override
-  protected Collection<ApiCommand> getCommands() {
-    return apiCommands;
-  }
-
-  @Override
-  protected List<V2EndPoint> getEndPoints() {
-    return Arrays.asList(CollectionApiMapping.EndPoint.values());
-  }
-
-}


[24/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
deleted file mode 100644
index 99f858a..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/HdfsBackupRepository.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core.backup.repository;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Objects;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.NoLockFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.DirectoryFactory;
-import org.apache.solr.core.HdfsDirectoryFactory;
-import org.apache.solr.store.hdfs.HdfsDirectory;
-import org.apache.solr.store.hdfs.HdfsDirectory.HdfsIndexInput;
-
-public class HdfsBackupRepository implements BackupRepository {
-  private static final String HDFS_UMASK_MODE_PARAM = "solr.hdfs.permissions.umask-mode";
-
-  private HdfsDirectoryFactory factory;
-  private Configuration hdfsConfig = null;
-  private FileSystem fileSystem = null;
-  private Path baseHdfsPath = null;
-  private NamedList config = null;
-
-  @SuppressWarnings("rawtypes")
-  @Override
-  public void init(NamedList args) {
-    this.config = args;
-
-    // We don't really need this factory instance. But we want to initialize it here to
-    // make sure that all HDFS related initialization is at one place (and not duplicated here).
-    factory = new HdfsDirectoryFactory();
-    factory.init(args);
-    this.hdfsConfig = factory.getConf();
-
-    // Configure the umask mode if specified.
-    if (args.get(HDFS_UMASK_MODE_PARAM) != null) {
-      String umaskVal = (String)args.get(HDFS_UMASK_MODE_PARAM);
-      this.hdfsConfig.set(FsPermission.UMASK_LABEL, umaskVal);
-    }
-
-    String hdfsSolrHome = (String) Objects.requireNonNull(args.get(HdfsDirectoryFactory.HDFS_HOME),
-        "Please specify " + HdfsDirectoryFactory.HDFS_HOME + " property.");
-    Path path = new Path(hdfsSolrHome);
-    while (path != null) { // Compute the path of root file-system (without requiring an additional system property).
-      baseHdfsPath = path;
-      path = path.getParent();
-    }
-
-    try {
-      this.fileSystem = FileSystem.get(this.baseHdfsPath.toUri(), this.hdfsConfig);
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    }
-  }
-
-  public void close() throws IOException {
-    if (this.fileSystem != null) {
-      this.fileSystem.close();
-    }
-    if (this.factory != null) {
-      this.factory.close();
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public <T> T getConfigProperty(String name) {
-    return (T) this.config.get(name);
-  }
-
-  @Override
-  public URI createURI(String location) {
-    Objects.requireNonNull(location);
-
-    URI result = null;
-    try {
-      result = new URI(location);
-      if (!result.isAbsolute()) {
-        result = resolve(this.baseHdfsPath.toUri(), location);
-      }
-    } catch (URISyntaxException ex) {
-      result = resolve(this.baseHdfsPath.toUri(), location);
-    }
-
-    return result;
-  }
-
-  @Override
-  public URI resolve(URI baseUri, String... pathComponents) {
-    Preconditions.checkArgument(baseUri.isAbsolute());
-
-    Path result = new Path(baseUri);
-    for (String path : pathComponents) {
-      result = new Path(result, path);
-    }
-
-    return result.toUri();
-  }
-
-  @Override
-  public boolean exists(URI path) throws IOException {
-    return this.fileSystem.exists(new Path(path));
-  }
-
-  @Override
-  public PathType getPathType(URI path) throws IOException {
-    return this.fileSystem.isDirectory(new Path(path)) ? PathType.DIRECTORY : PathType.FILE;
-  }
-
-  @Override
-  public String[] listAll(URI path) throws IOException {
-    FileStatus[] status = this.fileSystem.listStatus(new Path(path));
-    String[] result = new String[status.length];
-    for (int i = 0; i < status.length; i++) {
-      result[i] = status[i].getPath().getName();
-    }
-    return result;
-  }
-
-  @Override
-  public IndexInput openInput(URI dirPath, String fileName, IOContext ctx) throws IOException {
-    Path p = new Path(new Path(dirPath), fileName);
-    return new HdfsIndexInput(fileName, this.fileSystem, p, HdfsDirectory.DEFAULT_BUFFER_SIZE);
-  }
-
-  @Override
-  public OutputStream createOutput(URI path) throws IOException {
-    return this.fileSystem.create(new Path(path));
-  }
-
-  @Override
-  public void createDirectory(URI path) throws IOException {
-    if (!this.fileSystem.mkdirs(new Path(path))) {
-      throw new IOException("Unable to create a directory at following location " + path);
-    }
-  }
-
-  @Override
-  public void deleteDirectory(URI path) throws IOException {
-    if (!this.fileSystem.delete(new Path(path), true)) {
-      throw new IOException("Unable to delete a directory at following location " + path);
-    }
-  }
-
-  @Override
-  public void copyFileFrom(Directory sourceDir, String fileName, URI dest) throws IOException {
-    try (HdfsDirectory dir = new HdfsDirectory(new Path(dest), NoLockFactory.INSTANCE,
-        hdfsConfig, HdfsDirectory.DEFAULT_BUFFER_SIZE)) {
-      dir.copyFrom(sourceDir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-    }
-  }
-
-  @Override
-  public void copyFileTo(URI sourceRepo, String fileName, Directory dest) throws IOException {
-    try (HdfsDirectory dir = new HdfsDirectory(new Path(sourceRepo), NoLockFactory.INSTANCE,
-        hdfsConfig, HdfsDirectory.DEFAULT_BUFFER_SIZE)) {
-      dest.copyFrom(dir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
deleted file mode 100644
index 01810f6..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core.backup.repository;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
-import java.util.Objects;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.NoLockFactory;
-import org.apache.lucene.store.SimpleFSDirectory;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.DirectoryFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * A concrete implementation of {@linkplain BackupRepository} interface supporting backup/restore of Solr indexes to a
- * local file-system. (Note - This can even be used for a shared file-system if it is exposed via a local file-system
- * interface e.g. NFS).
- */
-public class LocalFileSystemRepository implements BackupRepository {
-  private NamedList config = null;
-
-  @Override
-  public void init(NamedList args) {
-    this.config = args;
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public <T> T getConfigProperty(String name) {
-    return (T) this.config.get(name);
-  }
-
-  @Override
-  public URI createURI(String location) {
-    Objects.requireNonNull(location);
-
-    URI result = null;
-    try {
-      result = new URI(location);
-      if (!result.isAbsolute()) {
-        result = Paths.get(location).toUri();
-      }
-    } catch (URISyntaxException ex) {
-      result = Paths.get(location).toUri();
-    }
-
-    return result;
-  }
-
-  @Override
-  public URI resolve(URI baseUri, String... pathComponents) {
-    Preconditions.checkArgument(pathComponents.length > 0);
-
-    Path result = Paths.get(baseUri);
-    for (int i = 0; i < pathComponents.length; i++) {
-      result = result.resolve(pathComponents[i]);
-    }
-
-    return result.toUri();
-  }
-
-  @Override
-  public void createDirectory(URI path) throws IOException {
-    Files.createDirectory(Paths.get(path));
-  }
-
-  @Override
-  public void deleteDirectory(URI path) throws IOException {
-    Files.walkFileTree(Paths.get(path), new SimpleFileVisitor<Path>() {
-      @Override
-      public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
-        Files.delete(file);
-        return FileVisitResult.CONTINUE;
-      }
-
-      @Override
-      public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
-        Files.delete(dir);
-        return FileVisitResult.CONTINUE;
-      }
-    });
-  }
-
-  @Override
-  public boolean exists(URI path) throws IOException {
-    return Files.exists(Paths.get(path));
-  }
-
-  @Override
-  public IndexInput openInput(URI dirPath, String fileName, IOContext ctx) throws IOException {
-    try (FSDirectory dir = new SimpleFSDirectory(Paths.get(dirPath), NoLockFactory.INSTANCE)) {
-      return dir.openInput(fileName, ctx);
-    }
-  }
-
-  @Override
-  public OutputStream createOutput(URI path) throws IOException {
-    return Files.newOutputStream(Paths.get(path));
-  }
-
-  @Override
-  public String[] listAll(URI dirPath) throws IOException {
-    try (FSDirectory dir = new SimpleFSDirectory(Paths.get(dirPath), NoLockFactory.INSTANCE)) {
-      return dir.listAll();
-    }
-  }
-
-  @Override
-  public PathType getPathType(URI path) throws IOException {
-    return Files.isDirectory(Paths.get(path)) ? PathType.DIRECTORY : PathType.FILE;
-  }
-
-  @Override
-  public void copyFileFrom(Directory sourceDir, String fileName, URI dest) throws IOException {
-    try (FSDirectory dir = new SimpleFSDirectory(Paths.get(dest), NoLockFactory.INSTANCE)) {
-      dir.copyFrom(sourceDir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-    }
-  }
-
-  @Override
-  public void copyFileTo(URI sourceDir, String fileName, Directory dest) throws IOException {
-    try (FSDirectory dir = new SimpleFSDirectory(Paths.get(sourceDir), NoLockFactory.INSTANCE)) {
-      dest.copyFrom(dir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {}
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/backup/repository/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/package-info.java b/solr/core/src/java/org/apache/solr/core/backup/repository/package-info.java
deleted file mode 100644
index fb3cfd5..0000000
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-
-/**
-* {@link org.apache.solr.core.backup.repository.BackupRepository} Providing backup/restore
-* repository interfaces to plug different storage systems
-*/
-package org.apache.solr.core.backup.repository;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/package-info.java b/solr/core/src/java/org/apache/solr/core/package-info.java
deleted file mode 100644
index 0dbb8c2..0000000
--- a/solr/core/src/java/org/apache/solr/core/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Core classes implementing Solr internals and the management of {@link org.apache.solr.core.SolrCore}s
- */
-package org.apache.solr.core;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/snapshots/CollectionSnapshotMetaData.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/snapshots/CollectionSnapshotMetaData.java b/solr/core/src/java/org/apache/solr/core/snapshots/CollectionSnapshotMetaData.java
deleted file mode 100644
index 4170861..0000000
--- a/solr/core/src/java/org/apache/solr/core/snapshots/CollectionSnapshotMetaData.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core.snapshots;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.noggit.JSONWriter;
-
-/**
- * This class defines the meta-data about a collection level snapshot
- */
-public class CollectionSnapshotMetaData implements JSONWriter.Writable {
-  public static class CoreSnapshotMetaData implements JSONWriter.Writable {
-    private final String coreName;
-    private final String indexDirPath;
-    private final long generationNumber;
-    private final boolean leader;
-    private final String shardId;
-    private final Collection<String> files;
-
-    public CoreSnapshotMetaData(String coreName, String indexDirPath, long generationNumber, String shardId, boolean leader, Collection<String> files) {
-      this.coreName = coreName;
-      this.indexDirPath = indexDirPath;
-      this.generationNumber = generationNumber;
-      this.shardId = shardId;
-      this.leader = leader;
-      this.files = files;
-    }
-
-    @SuppressWarnings({"unchecked", "rawtypes"})
-    public CoreSnapshotMetaData(NamedList resp) {
-      this.coreName = (String)resp.get(CoreAdminParams.CORE);
-      this.indexDirPath = (String)resp.get(SolrSnapshotManager.INDEX_DIR_PATH);
-      this.generationNumber = (Long)resp.get(SolrSnapshotManager.GENERATION_NUM);
-      this.shardId = (String)resp.get(SolrSnapshotManager.SHARD_ID);
-      this.leader = (Boolean)resp.get(SolrSnapshotManager.LEADER);
-      this.files = (Collection<String>)resp.get(SolrSnapshotManager.FILE_LIST);
-    }
-
-    public String getCoreName() {
-      return coreName;
-    }
-
-    public String getIndexDirPath() {
-      return indexDirPath;
-    }
-
-    public long getGenerationNumber() {
-      return generationNumber;
-    }
-
-    public Collection<String> getFiles() {
-      return files;
-    }
-
-    public String getShardId() {
-      return shardId;
-    }
-
-    public boolean isLeader() {
-      return leader;
-    }
-
-    @Override
-    public void write(JSONWriter arg0) {
-      LinkedHashMap<String, Object> info = new LinkedHashMap<String, Object>();
-      info.put(CoreAdminParams.CORE, getCoreName());
-      info.put(SolrSnapshotManager.INDEX_DIR_PATH, getIndexDirPath());
-      info.put(SolrSnapshotManager.GENERATION_NUM, getGenerationNumber());
-      info.put(SolrSnapshotManager.SHARD_ID, getShardId());
-      info.put(SolrSnapshotManager.LEADER, isLeader());
-      info.put(SolrSnapshotManager.FILE_LIST, getFiles());
-      arg0.write(info);
-    }
-
-    @SuppressWarnings({"rawtypes", "unchecked"})
-    public NamedList toNamedList() {
-      NamedList result = new NamedList();
-      result.add(CoreAdminParams.CORE, getCoreName());
-      result.add(SolrSnapshotManager.INDEX_DIR_PATH, getIndexDirPath());
-      result.add(SolrSnapshotManager.GENERATION_NUM, getGenerationNumber());
-      result.add(SolrSnapshotManager.SHARD_ID, getShardId());
-      result.add(SolrSnapshotManager.LEADER, isLeader());
-      result.add(SolrSnapshotManager.FILE_LIST, getFiles());
-      return result;
-    }
-  }
-
-  public static enum SnapshotStatus {
-    Successful, InProgress, Failed;
-  }
-
-  private final String name;
-  private final SnapshotStatus status;
-  private final Date creationDate;
-  private final List<CoreSnapshotMetaData> replicaSnapshots;
-
-  public CollectionSnapshotMetaData(String name) {
-    this(name, SnapshotStatus.InProgress, new Date(), Collections.<CoreSnapshotMetaData>emptyList());
-  }
-
-  public CollectionSnapshotMetaData(String name, SnapshotStatus status, Date creationTime, List<CoreSnapshotMetaData> replicaSnapshots) {
-    this.name = name;
-    this.status = status;
-    this.creationDate = creationTime;
-    this.replicaSnapshots = replicaSnapshots;
-  }
-
-  @SuppressWarnings("unchecked")
-  public CollectionSnapshotMetaData(Map<String, Object> data) {
-    this.name = (String)data.get(CoreAdminParams.NAME);
-    this.status = SnapshotStatus.valueOf((String)data.get(SolrSnapshotManager.SNAPSHOT_STATUS));
-    this.creationDate = new Date((Long)data.get(SolrSnapshotManager.CREATION_DATE));
-    this.replicaSnapshots = new ArrayList<>();
-
-    List<Object> r = (List<Object>) data.get(SolrSnapshotManager.SNAPSHOT_REPLICAS);
-    for (Object x : r) {
-      Map<String, Object> info = (Map<String, Object>)x;
-      String coreName = (String)info.get(CoreAdminParams.CORE);
-      String indexDirPath = (String)info.get(SolrSnapshotManager.INDEX_DIR_PATH);
-      long generationNumber = (Long) info.get(SolrSnapshotManager.GENERATION_NUM);
-      String shardId = (String)info.get(SolrSnapshotManager.SHARD_ID);
-      boolean leader = (Boolean) info.get(SolrSnapshotManager.LEADER);
-      Collection<String> files = (Collection<String>)info.get(SolrSnapshotManager.FILE_LIST);
-      replicaSnapshots.add(new CoreSnapshotMetaData(coreName, indexDirPath, generationNumber, shardId, leader, files));
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  public CollectionSnapshotMetaData(NamedList<Object> data) {
-    this.name = (String)data.get(CoreAdminParams.NAME);
-    String statusStr = (String)data.get(SolrSnapshotManager.SNAPSHOT_STATUS);
-    this.creationDate = new Date((Long)data.get(SolrSnapshotManager.CREATION_DATE));
-    this.status = SnapshotStatus.valueOf(statusStr);
-    this.replicaSnapshots = new ArrayList<>();
-
-    NamedList<Object> r = (NamedList<Object>) data.get(SolrSnapshotManager.SNAPSHOT_REPLICAS);
-    for (Map.Entry<String,Object> x : r) {
-      NamedList<Object> info = (NamedList<Object>)x.getValue();
-      String coreName = (String)info.get(CoreAdminParams.CORE);
-      String indexDirPath = (String)info.get(SolrSnapshotManager.INDEX_DIR_PATH);
-      long generationNumber = (Long) info.get(SolrSnapshotManager.GENERATION_NUM);
-      String shardId = (String)info.get(SolrSnapshotManager.SHARD_ID);
-      boolean leader = (Boolean) info.get(SolrSnapshotManager.LEADER);
-      Collection<String> files = (Collection<String>)info.get(SolrSnapshotManager.FILE_LIST);
-      replicaSnapshots.add(new CoreSnapshotMetaData(coreName, indexDirPath, generationNumber, shardId, leader, files));
-    }
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public SnapshotStatus getStatus() {
-    return status;
-  }
-
-  public Date getCreationDate() {
-    return creationDate;
-  }
-
-  public List<CoreSnapshotMetaData> getReplicaSnapshots() {
-    return replicaSnapshots;
-  }
-
-  public List<CoreSnapshotMetaData> getReplicaSnapshotsForShard(String shardId) {
-    List<CoreSnapshotMetaData> result = new ArrayList<>();
-    for (CoreSnapshotMetaData d : replicaSnapshots) {
-      if (d.getShardId().equals(shardId)) {
-        result.add(d);
-      }
-    }
-    return result;
-  }
-
-  public boolean isSnapshotExists(String shardId, Replica r) {
-    for (CoreSnapshotMetaData d : replicaSnapshots) {
-      if (d.getShardId().equals(shardId) && d.getCoreName().equals(r.getCoreName())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  public Collection<String> getShards() {
-    Set<String> result = new HashSet<>();
-    for (CoreSnapshotMetaData d : replicaSnapshots) {
-      result.add(d.getShardId());
-    }
-    return result;
-  }
-
-  @Override
-  public void write(JSONWriter arg0) {
-    LinkedHashMap<String, Object> result = new LinkedHashMap<>();
-    result.put(CoreAdminParams.NAME, this.name);
-    result.put(SolrSnapshotManager.SNAPSHOT_STATUS, this.status.toString());
-    result.put(SolrSnapshotManager.CREATION_DATE, this.getCreationDate().getTime());
-    result.put(SolrSnapshotManager.SNAPSHOT_REPLICAS, this.replicaSnapshots);
-    arg0.write(result);
-  }
-
-  @SuppressWarnings({"rawtypes", "unchecked"})
-  public NamedList toNamedList() {
-    NamedList result = new NamedList();
-    result.add(CoreAdminParams.NAME, this.name);
-    result.add(SolrSnapshotManager.SNAPSHOT_STATUS, this.status.toString());
-    result.add(SolrSnapshotManager.CREATION_DATE, this.getCreationDate().getTime());
-
-    NamedList replicas = new NamedList();
-    for (CoreSnapshotMetaData x : replicaSnapshots) {
-      replicas.add(x.getCoreName(), x.toNamedList());
-    }
-    result.add(SolrSnapshotManager.SNAPSHOT_REPLICAS, replicas);
-
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotManager.java b/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotManager.java
deleted file mode 100644
index 354307d..0000000
--- a/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotManager.java
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core.snapshots;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.index.NoMergePolicy;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData;
-import org.apache.solr.update.SolrIndexWriter;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class provides functionality required to handle the data files corresponding to Solr snapshots.
- */
-public class SolrSnapshotManager {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String INDEX_DIR_PATH = "indexDirPath";
-  public static final String GENERATION_NUM = "generation";
-  public static final String SNAPSHOT_STATUS = "status";
-  public static final String CREATION_DATE = "creationDate";
-  public static final String SNAPSHOT_REPLICAS = "replicas";
-  public static final String SNAPSHOTS_INFO = "snapshots";
-  public static final String LEADER = "leader";
-  public static final String SHARD_ID = "shard_id";
-  public static final String FILE_LIST = "files";
-
-  /**
-   * This method returns if a named snapshot exists for the specified collection.
-   *
-   * @param zkClient Zookeeper client
-   * @param collectionName The name of the collection
-   * @param commitName The name of the snapshot
-   * @return true if the named snapshot exists
-   *         false Otherwise
-   * @throws KeeperException In case of Zookeeper error
-   * @throws InterruptedException In case of thread interruption.
-   */
-  public static boolean snapshotExists(SolrZkClient zkClient, String collectionName, String commitName)
-      throws KeeperException, InterruptedException {
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.ofNullable(commitName));
-    return zkClient.exists(zkPath, true);
-  }
-
-  /**
-   * This method creates an entry for the named snapshot for the specified collection in Zookeeper.
-   *
-   * @param zkClient Zookeeper client
-   * @param collectionName The name of the collection
-   * @param meta The {@linkplain CollectionSnapshotMetaData} corresponding to named snapshot
-   * @throws KeeperException In case of Zookeeper error
-   * @throws InterruptedException In case of thread interruption.
-   */
-  public static void createCollectionLevelSnapshot(SolrZkClient zkClient, String collectionName,
-      CollectionSnapshotMetaData meta) throws KeeperException, InterruptedException {
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.of(meta.getName()));
-    zkClient.makePath(zkPath, Utils.toJSON(meta), CreateMode.PERSISTENT, true);
-  }
-
-  /**
-   * This method updates an entry for the named snapshot for the specified collection in Zookeeper.
-   *
-   * @param zkClient Zookeeper client
-   * @param collectionName  The name of the collection
-   * @param meta The {@linkplain CollectionSnapshotMetaData} corresponding to named snapshot
-   * @throws KeeperException In case of Zookeeper error
-   * @throws InterruptedException In case of thread interruption.
-   */
-  public static void updateCollectionLevelSnapshot(SolrZkClient zkClient, String collectionName,
-      CollectionSnapshotMetaData meta) throws KeeperException, InterruptedException {
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.of(meta.getName()));
-    zkClient.setData(zkPath, Utils.toJSON(meta), -1, true);
-  }
-
-  /**
-   * This method deletes an entry for the named snapshot for the specified collection in Zookeeper.
-   *
-   * @param zkClient Zookeeper client
-   * @param collectionName The name of the collection
-   * @param commitName  The name of the snapshot
-   * @throws InterruptedException In case of thread interruption.
-   * @throws KeeperException  In case of Zookeeper error
-   */
-  public static void deleteCollectionLevelSnapshot(SolrZkClient zkClient, String collectionName, String commitName)
-      throws InterruptedException, KeeperException {
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.of(commitName));
-    zkClient.delete(zkPath, -1, true);
-  }
-
-  /**
-   * This method deletes all snapshots for the specified collection in Zookeeper.
-   *
-   * @param zkClient  Zookeeper client
-   * @param collectionName The name of the collection
-   * @throws InterruptedException In case of thread interruption.
-   * @throws KeeperException In case of Zookeeper error
-   */
-  public static void cleanupCollectionLevelSnapshots(SolrZkClient zkClient, String collectionName)
-      throws InterruptedException, KeeperException {
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.empty());
-    try {
-      // Delete the meta-data for each snapshot.
-      Collection<String> snapshots = zkClient.getChildren(zkPath, null, true);
-      for (String snapshot : snapshots) {
-        String path = getSnapshotMetaDataZkPath(collectionName, Optional.of(snapshot));
-        try {
-          zkClient.delete(path, -1, true);
-        } catch (KeeperException ex) {
-          // Gracefully handle the case when the zk node doesn't exist
-          if ( ex.code() != KeeperException.Code.NONODE ) {
-            throw ex;
-          }
-        }
-      }
-
-      // Delete the parent node.
-      zkClient.delete(zkPath, -1, true);
-    } catch (KeeperException ex) {
-      // Gracefully handle the case when the zk node doesn't exist (e.g. if no snapshots were created for this collection).
-      if ( ex.code() != KeeperException.Code.NONODE ) {
-        throw ex;
-      }
-    }
-  }
-
-  /**
-   * This method returns the {@linkplain CollectionSnapshotMetaData} for the named snapshot for the specified collection in Zookeeper.
-   *
-   * @param zkClient  Zookeeper client
-   * @param collectionName  The name of the collection
-   * @param commitName The name of the snapshot
-   * @return (Optional) the {@linkplain CollectionSnapshotMetaData}
-   * @throws InterruptedException In case of thread interruption.
-   * @throws KeeperException In case of Zookeeper error
-   */
-  public static Optional<CollectionSnapshotMetaData> getCollectionLevelSnapshot(SolrZkClient zkClient, String collectionName, String commitName)
-      throws InterruptedException, KeeperException {
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.of(commitName));
-    try {
-      Map<String, Object> data = (Map<String, Object>)Utils.fromJSON(zkClient.getData(zkPath, null, null, true));
-      return Optional.of(new CollectionSnapshotMetaData(data));
-    } catch (KeeperException ex) {
-      // Gracefully handle the case when the zk node for a specific
-      // snapshot doesn't exist (e.g. due to a concurrent delete operation).
-      if ( ex.code() == KeeperException.Code.NONODE ) {
-        return Optional.empty();
-      }
-      throw ex;
-    }
-  }
-
-  /**
-   * This method returns the {@linkplain CollectionSnapshotMetaData} for each named snapshot for the specified collection in Zookeeper.
-   *
-   * @param zkClient Zookeeper client
-   * @param collectionName The name of the collection
-   * @return the {@linkplain CollectionSnapshotMetaData} for each named snapshot
-   * @throws InterruptedException In case of thread interruption.
-   * @throws KeeperException In case of Zookeeper error
-   */
-  public static Collection<CollectionSnapshotMetaData> listSnapshots(SolrZkClient zkClient, String collectionName)
-      throws InterruptedException, KeeperException {
-    Collection<CollectionSnapshotMetaData> result = new ArrayList<>();
-    String zkPath = getSnapshotMetaDataZkPath(collectionName, Optional.empty());
-
-    try {
-      Collection<String> snapshots = zkClient.getChildren(zkPath, null, true);
-      for (String snapshot : snapshots) {
-        Optional<CollectionSnapshotMetaData> s = getCollectionLevelSnapshot(zkClient, collectionName, snapshot);
-        if (s.isPresent()) {
-          result.add(s.get());
-        }
-      }
-    } catch (KeeperException ex) {
-      // Gracefully handle the case when the zk node doesn't exist (e.g. due to a concurrent delete collection operation).
-      if ( ex.code() != KeeperException.Code.NONODE ) {
-        throw ex;
-      }
-    }
-    return result;
-  }
-
-
-  /**
-   * This method deletes index files of the {@linkplain IndexCommit} for the specified generation number.
-   *
-   * @param core The Solr core
-   * @param dir The index directory storing the snapshot.
-   * @param gen The generation number of the {@linkplain IndexCommit} to be deleted.
-   * @throws IOException in case of I/O errors.
-   */
-  public static void deleteSnapshotIndexFiles(SolrCore core, Directory dir, final long gen) throws IOException {
-    deleteSnapshotIndexFiles(core, dir, new IndexDeletionPolicy() {
-      @Override
-      public void onInit(List<? extends IndexCommit> commits) throws IOException {
-        for (IndexCommit ic : commits) {
-          if (gen == ic.getGeneration()) {
-            log.info("Deleting non-snapshotted index commit with generation {}", ic.getGeneration());
-            ic.delete();
-          }
-        }
-      }
-
-      @Override
-      public void onCommit(List<? extends IndexCommit> commits)
-          throws IOException {}
-    });
-  }
-
-  /**
-   * This method deletes index files not associated with the specified <code>snapshots</code>.
-   *
-   * @param core The Solr core
-   * @param dir The index directory storing the snapshot.
-   * @param snapshots The snapshots to be preserved.
-   * @throws IOException in case of I/O errors.
-   */
-  public static void deleteNonSnapshotIndexFiles(SolrCore core, Directory dir, Collection<SnapshotMetaData> snapshots) throws IOException {
-    final Set<Long> genNumbers = new HashSet<>();
-    for (SnapshotMetaData m : snapshots) {
-      genNumbers.add(m.getGenerationNumber());
-    }
-
-    deleteSnapshotIndexFiles(core, dir, new IndexDeletionPolicy() {
-      @Override
-      public void onInit(List<? extends IndexCommit> commits) throws IOException {
-        for (IndexCommit ic : commits) {
-          if (!genNumbers.contains(ic.getGeneration())) {
-            log.info("Deleting non-snapshotted index commit with generation {}", ic.getGeneration());
-            ic.delete();
-          }
-        }
-      }
-
-      @Override
-      public void onCommit(List<? extends IndexCommit> commits)
-          throws IOException {}
-    });
-  }
-
-  /**
-   * This method deletes index files of the {@linkplain IndexCommit} for the specified generation number.
-   *
-   * @param core The Solr core
-   * @param dir The index directory storing the snapshot.
-   * @throws IOException in case of I/O errors.
-   */
-  private static void deleteSnapshotIndexFiles(SolrCore core, Directory dir, IndexDeletionPolicy delPolicy) throws IOException {
-    IndexWriterConfig conf = core.getSolrConfig().indexConfig.toIndexWriterConfig(core);
-    conf.setOpenMode(OpenMode.APPEND);
-    conf.setMergePolicy(NoMergePolicy.INSTANCE);//Don't want to merge any commits here!
-    conf.setIndexDeletionPolicy(delPolicy);
-    conf.setCodec(core.getCodec());
-
-    try (SolrIndexWriter iw = new SolrIndexWriter("SolrSnapshotCleaner", dir, conf)) {
-      // Do nothing. The only purpose of opening index writer is to invoke the Lucene IndexDeletionPolicy#onInit
-      // method so that we can cleanup the files associated with specified index commit.
-      // Note the index writer creates a new commit during the close() operation (which is harmless).
-    }
-  }
-
-  private static String getSnapshotMetaDataZkPath(String collectionName, Optional<String> commitName) {
-    if (commitName.isPresent()) {
-      return "/snapshots/"+collectionName+"/"+commitName.get();
-    }
-    return "/snapshots/"+collectionName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotMetaDataManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotMetaDataManager.java b/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotMetaDataManager.java
deleted file mode 100644
index 8e4d185..0000000
--- a/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotMetaDataManager.java
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core.snapshots;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.stream.Collectors;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.IOUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.core.DirectoryFactory;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.IndexDeletionPolicyWrapper;
-import org.apache.solr.core.SolrCore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is responsible to manage the persistent snapshots meta-data for the Solr indexes. The
- * persistent snapshots are implemented by relying on Lucene {@linkplain IndexDeletionPolicy}
- * abstraction to configure a specific {@linkplain IndexCommit} to be retained. The
- * {@linkplain IndexDeletionPolicyWrapper} in Solr uses this class to create/delete the Solr index
- * snapshots.
- */
-public class SolrSnapshotMetaDataManager {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final String SNAPSHOT_METADATA_DIR = "snapshot_metadata";
-
-  /**
-   * A class defining the meta-data for a specific snapshot.
-   */
-  public static class SnapshotMetaData {
-    private String name;
-    private String indexDirPath;
-    private long generationNumber;
-
-    public SnapshotMetaData(String name, String indexDirPath, long generationNumber) {
-      super();
-      this.name = name;
-      this.indexDirPath = indexDirPath;
-      this.generationNumber = generationNumber;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public String getIndexDirPath() {
-      return indexDirPath;
-    }
-
-    public long getGenerationNumber() {
-      return generationNumber;
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder builder = new StringBuilder();
-      builder.append("SnapshotMetaData[name=");
-      builder.append(name);
-      builder.append(", indexDirPath=");
-      builder.append(indexDirPath);
-      builder.append(", generation=");
-      builder.append(generationNumber);
-      builder.append("]");
-      return builder.toString();
-    }
-  }
-
-  /** Prefix used for the save file. */
-  public static final String SNAPSHOTS_PREFIX = "snapshots_";
-  private static final int VERSION_START = 0;
-  private static final int VERSION_CURRENT = VERSION_START;
-  private static final String CODEC_NAME = "solr-snapshots";
-
-  // The index writer which maintains the snapshots metadata
-  private long nextWriteGen;
-
-  private final Directory dir;
-
-  /** Used to map snapshot name to snapshot meta-data. */
-  protected final Map<String,SnapshotMetaData> nameToDetailsMapping = new LinkedHashMap<>();
-  /** Used to figure out the *current* index data directory path */
-  private final SolrCore solrCore;
-
-  /**
-   * A constructor.
-   *
-   * @param dir The directory where the snapshot meta-data should be stored. Enables updating
-   *            the existing meta-data.
-   * @throws IOException in case of errors.
-   */
-  public SolrSnapshotMetaDataManager(SolrCore solrCore, Directory dir) throws IOException {
-    this(solrCore, dir, OpenMode.CREATE_OR_APPEND);
-  }
-
-  /**
-   * A constructor.
-   *
-   * @param dir The directory where the snapshot meta-data is stored.
-   * @param mode CREATE If previous meta-data should be erased.
-   *             APPEND If previous meta-data should be read and updated.
-   *             CREATE_OR_APPEND Creates a new meta-data structure if one does not exist
-   *                              Updates the existing structure if one exists.
-   * @throws IOException in case of errors.
-   */
-  public SolrSnapshotMetaDataManager(SolrCore solrCore, Directory dir, OpenMode mode) throws IOException {
-    this.solrCore = solrCore;
-    this.dir = dir;
-
-    if (mode == OpenMode.CREATE) {
-      deleteSnapshotMetadataFiles();
-    }
-
-    loadFromSnapshotMetadataFile();
-
-    if (mode == OpenMode.APPEND && nextWriteGen == 0) {
-      throw new IllegalStateException("no snapshots stored in this directory");
-    }
-  }
-
-  /**
-   * @return The snapshot meta-data directory
-   */
-  public Directory getSnapshotsDir() {
-    return dir;
-  }
-
-  /**
-   * This method creates a new snapshot meta-data entry.
-   *
-   * @param name The name of the snapshot.
-   * @param indexDirPath The directory path where the index files are stored.
-   * @param gen The generation number for the {@linkplain IndexCommit} being snapshotted.
-   * @throws IOException in case of I/O errors.
-   */
-  public synchronized void snapshot(String name, String indexDirPath, long gen) throws IOException {
-    Objects.requireNonNull(name);
-
-    log.info("Creating the snapshot named {} for core {} associated with index commit with generation {} in directory {}"
-        , name, solrCore.getName(), gen, indexDirPath);
-
-    if(nameToDetailsMapping.containsKey(name)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "A snapshot with name " + name + " already exists");
-    }
-
-    SnapshotMetaData d = new SnapshotMetaData(name, indexDirPath, gen);
-    nameToDetailsMapping.put(name, d);
-
-    boolean success = false;
-    try {
-      persist();
-      success = true;
-    } finally {
-      if (!success) {
-        try {
-          release(name);
-        } catch (Exception e) {
-          // Suppress so we keep throwing original exception
-        }
-      }
-    }
-  }
-
-  /**
-   * This method deletes a previously created snapshot (if any).
-   *
-   * @param name The name of the snapshot to be deleted.
-   * @return The snapshot meta-data if the snapshot with the snapshot name exists.
-   * @throws IOException in case of I/O error
-   */
-  public synchronized Optional<SnapshotMetaData> release(String name) throws IOException {
-    log.info("Deleting the snapshot named {} for core {}", name, solrCore.getName());
-    SnapshotMetaData result = nameToDetailsMapping.remove(Objects.requireNonNull(name));
-    if(result != null) {
-      boolean success = false;
-      try {
-        persist();
-        success = true;
-      } finally {
-        if (!success) {
-          nameToDetailsMapping.put(name, result);
-        }
-      }
-    }
-    return Optional.ofNullable(result);
-  }
-
-  /**
-   * This method returns if snapshot is created for the specified generation number in
-   * the *current* index directory.
-   *
-   * @param genNumber The generation number for the {@linkplain IndexCommit} to be checked.
-   * @return true if the snapshot is created.
-   *         false otherwise.
-   */
-  public synchronized boolean isSnapshotted(long genNumber) {
-    return !nameToDetailsMapping.isEmpty() && isSnapshotted(solrCore.getIndexDir(), genNumber);
-  }
-
-  /**
-   * This method returns if snapshot is created for the specified generation number in
-   * the specified index directory.
-   *
-   * @param genNumber The generation number for the {@linkplain IndexCommit} to be checked.
-   * @return true if the snapshot is created.
-   *         false otherwise.
-   */
-  public synchronized boolean isSnapshotted(String indexDirPath, long genNumber) {
-    return !nameToDetailsMapping.isEmpty()
-        && nameToDetailsMapping.values().stream()
-           .anyMatch(entry -> entry.getIndexDirPath().equals(indexDirPath) && entry.getGenerationNumber() == genNumber);
-  }
-
-  /**
-   * This method returns the snapshot meta-data for the specified name (if it exists).
-   *
-   * @param name The name of the snapshot
-   * @return The snapshot meta-data if exists.
-   */
-  public synchronized Optional<SnapshotMetaData> getSnapshotMetaData(String name) {
-    return Optional.ofNullable(nameToDetailsMapping.get(name));
-  }
-
-  /**
-   * @return A list of snapshots created so far.
-   */
-  public synchronized List<String> listSnapshots() {
-    // We create a copy for thread safety.
-    return new ArrayList<>(nameToDetailsMapping.keySet());
-  }
-
-  /**
-   * This method returns a list of snapshots created in a specified index directory.
-   *
-   * @param indexDirPath The index directory path.
-   * @return a list snapshots stored in the specified directory.
-   */
-  public synchronized Collection<SnapshotMetaData> listSnapshotsInIndexDir(String indexDirPath) {
-    return nameToDetailsMapping.values().stream()
-        .filter(entry -> indexDirPath.equals(entry.getIndexDirPath()))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * This method returns the {@linkplain IndexCommit} associated with the specified
-   * <code>commitName</code>. A snapshot with specified <code>commitName</code> must
-   * be created before invoking this method.
-   *
-   * @param commitName The name of persisted commit
-   * @return the {@linkplain IndexCommit}
-   * @throws IOException in case of I/O error.
-   */
-  public Optional<IndexCommit> getIndexCommitByName(String commitName) throws IOException {
-    Optional<IndexCommit> result = Optional.empty();
-    Optional<SnapshotMetaData> metaData = getSnapshotMetaData(commitName);
-    if (metaData.isPresent()) {
-      String indexDirPath = metaData.get().getIndexDirPath();
-      long gen = metaData.get().getGenerationNumber();
-
-      Directory d = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, DirectoryFactory.LOCK_TYPE_NONE);
-      try {
-        result = DirectoryReader.listCommits(d)
-                                .stream()
-                                .filter(ic -> ic.getGeneration() == gen)
-                                .findAny();
-
-        if (!result.isPresent()) {
-          log.warn("Unable to find commit with generation {} in the directory {}", gen, indexDirPath);
-        }
-
-      } finally {
-        solrCore.getDirectoryFactory().release(d);
-      }
-    } else {
-      log.warn("Commit with name {} is not persisted for core {}", commitName, solrCore.getName());
-    }
-
-    return result;
-  }
-
-  private synchronized void persist() throws IOException {
-    String fileName = SNAPSHOTS_PREFIX + nextWriteGen;
-    IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT);
-    boolean success = false;
-    try {
-      CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
-      out.writeVInt(nameToDetailsMapping.size());
-      for(Entry<String,SnapshotMetaData> ent : nameToDetailsMapping.entrySet()) {
-        out.writeString(ent.getKey());
-        out.writeString(ent.getValue().getIndexDirPath());
-        out.writeVLong(ent.getValue().getGenerationNumber());
-      }
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(out);
-        IOUtils.deleteFilesIgnoringExceptions(dir, fileName);
-      } else {
-        IOUtils.close(out);
-      }
-    }
-
-    dir.sync(Collections.singletonList(fileName));
-
-    if (nextWriteGen > 0) {
-      String lastSaveFile = SNAPSHOTS_PREFIX + (nextWriteGen-1);
-      // exception OK: likely it didn't exist
-      IOUtils.deleteFilesIgnoringExceptions(dir, lastSaveFile);
-    }
-
-    nextWriteGen++;
-  }
-
-  private synchronized void deleteSnapshotMetadataFiles() throws IOException {
-    for(String file : dir.listAll()) {
-      if (file.startsWith(SNAPSHOTS_PREFIX)) {
-        dir.deleteFile(file);
-      }
-    }
-  }
-
-  /**
-   * Reads the snapshot meta-data information from the given {@link Directory}.
-   */
-  private synchronized void loadFromSnapshotMetadataFile() throws IOException {
-    log.debug("Loading from snapshot metadata file...");
-    long genLoaded = -1;
-    IOException ioe = null;
-    List<String> snapshotFiles = new ArrayList<>();
-    for(String file : dir.listAll()) {
-      if (file.startsWith(SNAPSHOTS_PREFIX)) {
-        long gen = Long.parseLong(file.substring(SNAPSHOTS_PREFIX.length()));
-        if (genLoaded == -1 || gen > genLoaded) {
-          snapshotFiles.add(file);
-          Map<String, SnapshotMetaData> snapshotMetaDataMapping = new HashMap<>();
-          IndexInput in = dir.openInput(file, IOContext.DEFAULT);
-          try {
-            CodecUtil.checkHeader(in, CODEC_NAME, VERSION_START, VERSION_START);
-            int count = in.readVInt();
-            for(int i=0;i<count;i++) {
-              String name = in.readString();
-              String indexDirPath = in.readString();
-              long commitGen = in.readVLong();
-              snapshotMetaDataMapping.put(name, new SnapshotMetaData(name, indexDirPath, commitGen));
-            }
-          } catch (IOException ioe2) {
-            // Save first exception & throw in the end
-            if (ioe == null) {
-              ioe = ioe2;
-            }
-          } finally {
-            in.close();
-          }
-
-          genLoaded = gen;
-          nameToDetailsMapping.clear();
-          nameToDetailsMapping.putAll(snapshotMetaDataMapping);
-        }
-      }
-    }
-
-    if (genLoaded == -1) {
-      // Nothing was loaded...
-      if (ioe != null) {
-        // ... not for lack of trying:
-        throw ioe;
-      }
-    } else {
-      if (snapshotFiles.size() > 1) {
-        // Remove any broken / old snapshot files:
-        String curFileName = SNAPSHOTS_PREFIX + genLoaded;
-        for(String file : snapshotFiles) {
-          if (!curFileName.equals(file)) {
-            IOUtils.deleteFilesIgnoringExceptions(dir, file);
-          }
-        }
-      }
-      nextWriteGen = 1+genLoaded;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotsTool.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotsTool.java b/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotsTool.java
deleted file mode 100644
index 062f434..0000000
--- a/solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotsTool.java
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core.snapshots;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.charset.StandardCharsets;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.hadoop.fs.Path;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * This class provides utility functions required for Solr snapshots functionality.
- */
-public class SolrSnapshotsTool implements Closeable {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final DateFormat dateFormat = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss z", Locale.getDefault());
-
-  private static final String CREATE = "create";
-  private static final String DELETE = "delete";
-  private static final String LIST = "list";
-  private static final String DESCRIBE = "describe";
-  private static final String PREPARE_FOR_EXPORT = "prepare-snapshot-export";
-  private static final String EXPORT_SNAPSHOT = "export";
-  private static final String HELP = "help";
-  private static final String COLLECTION = "c";
-  private static final String TEMP_DIR = "t";
-  private static final String DEST_DIR = "d";
-  private static final String SOLR_ZK_ENSEMBLE = "z";
-  private static final String HDFS_PATH_PREFIX = "p";
-  private static final String BACKUP_REPO_NAME = "r";
-  private static final String ASYNC_REQ_ID = "i";
-  private static final List<String> OPTION_HELP_ORDER = Arrays.asList(CREATE, DELETE, LIST, DESCRIBE,
-      PREPARE_FOR_EXPORT, EXPORT_SNAPSHOT, HELP, SOLR_ZK_ENSEMBLE, COLLECTION, DEST_DIR, BACKUP_REPO_NAME,
-      ASYNC_REQ_ID, TEMP_DIR, HDFS_PATH_PREFIX);
-
-  private final CloudSolrClient solrClient;
-
-  public SolrSnapshotsTool(String solrZkEnsemble) {
-    solrClient = new CloudSolrClient.Builder(Collections.singletonList(solrZkEnsemble), Optional.empty()).build();
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (solrClient != null) {
-      solrClient.close();
-    }
-  }
-
-  public void createSnapshot(String collectionName, String snapshotName) {
-    CollectionAdminRequest.CreateSnapshot createSnap = new CollectionAdminRequest.CreateSnapshot(collectionName, snapshotName);
-    CollectionAdminResponse resp;
-    try {
-      resp = createSnap.process(solrClient);
-      Preconditions.checkState(resp.getStatus() == 0, "The CREATESNAPSHOT request failed. The status code is " + resp.getStatus());
-      System.out.println("Successfully created snapshot with name " + snapshotName + " for collection " + collectionName);
-
-    } catch (Exception e) {
-      log.error("Failed to create a snapshot with name " + snapshotName + " for collection " + collectionName, e);
-      System.out.println("Failed to create a snapshot with name " + snapshotName + " for collection " + collectionName
-          +" due to following error : "+e.getLocalizedMessage());
-    }
-  }
-
-  public void deleteSnapshot(String collectionName, String snapshotName) {
-    CollectionAdminRequest.DeleteSnapshot deleteSnap = new CollectionAdminRequest.DeleteSnapshot(collectionName, snapshotName);
-    CollectionAdminResponse resp;
-    try {
-      resp = deleteSnap.process(solrClient);
-      Preconditions.checkState(resp.getStatus() == 0, "The DELETESNAPSHOT request failed. The status code is " + resp.getStatus());
-      System.out.println("Successfully deleted snapshot with name " + snapshotName + " for collection " + collectionName);
-
-    } catch (Exception e) {
-      log.error("Failed to delete a snapshot with name " + snapshotName + " for collection " + collectionName, e);
-      System.out.println("Failed to delete a snapshot with name " + snapshotName + " for collection " + collectionName
-          +" due to following error : "+e.getLocalizedMessage());
-    }
-  }
-
-  @SuppressWarnings("rawtypes")
-  public void listSnapshots(String collectionName) {
-    CollectionAdminRequest.ListSnapshots listSnaps = new CollectionAdminRequest.ListSnapshots(collectionName);
-    CollectionAdminResponse resp;
-    try {
-      resp = listSnaps.process(solrClient);
-      Preconditions.checkState(resp.getStatus() == 0, "The LISTSNAPSHOTS request failed. The status code is " + resp.getStatus());
-
-      NamedList apiResult = (NamedList) resp.getResponse().get(SolrSnapshotManager.SNAPSHOTS_INFO);
-      for (int i = 0; i < apiResult.size(); i++) {
-        System.out.println(apiResult.getName(i));
-      }
-
-    } catch (Exception e) {
-      log.error("Failed to list snapshots for collection " + collectionName, e);
-      System.out.println("Failed to list snapshots for collection " + collectionName
-          +" due to following error : "+e.getLocalizedMessage());
-    }
-  }
-
-  public void describeSnapshot(String collectionName, String snapshotName) {
-    try {
-      Collection<CollectionSnapshotMetaData> snaps = listCollectionSnapshots(collectionName);
-      for (CollectionSnapshotMetaData m : snaps) {
-        if (snapshotName.equals(m.getName())) {
-          System.out.println("Name: " + m.getName());
-          System.out.println("Status: " + m.getStatus());
-          System.out.println("Time of creation: " + dateFormat.format(m.getCreationDate()));
-          System.out.println("Total number of cores with snapshot: " + m.getReplicaSnapshots().size());
-          System.out.println("-----------------------------------");
-          for (CoreSnapshotMetaData n : m.getReplicaSnapshots()) {
-            StringBuilder builder = new StringBuilder();
-            builder.append("Core [name=");
-            builder.append(n.getCoreName());
-            builder.append(", leader=");
-            builder.append(n.isLeader());
-            builder.append(", generation=");
-            builder.append(n.getGenerationNumber());
-            builder.append(", indexDirPath=");
-            builder.append(n.getIndexDirPath());
-            builder.append("]\n");
-            System.out.println(builder.toString());
-          }
-        }
-      }
-    } catch (Exception e) {
-      log.error("Failed to fetch snapshot details", e);
-      System.out.println("Failed to fetch snapshot details due to following error : " + e.getLocalizedMessage());
-    }
-  }
-
-  public Map<String, List<String>> getIndexFilesPathForSnapshot(String collectionName,  String snapshotName, Optional<String> pathPrefix)
-      throws SolrServerException, IOException {
-    Map<String, List<String>> result = new HashMap<>();
-
-    Collection<CollectionSnapshotMetaData> snaps = listCollectionSnapshots(collectionName);
-    Optional<CollectionSnapshotMetaData> meta = Optional.empty();
-    for (CollectionSnapshotMetaData m : snaps) {
-      if (snapshotName.equals(m.getName())) {
-        meta = Optional.of(m);
-      }
-    }
-
-    if (!meta.isPresent()) {
-      throw new IllegalArgumentException("The snapshot named " + snapshotName
-          + " is not found for collection " + collectionName);
-    }
-
-    DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
-    for (Slice s : collectionState.getSlices()) {
-      List<CoreSnapshotMetaData> replicaSnaps = meta.get().getReplicaSnapshotsForShard(s.getName());
-      // Prepare a list of *existing* replicas (since one or more replicas could have been deleted after the snapshot creation).
-      List<CoreSnapshotMetaData> availableReplicas = new ArrayList<>();
-      for (CoreSnapshotMetaData m : replicaSnaps) {
-        if (isReplicaAvailable(s, m.getCoreName())) {
-          availableReplicas.add(m);
-        }
-      }
-
-      if (availableReplicas.isEmpty()) {
-        throw new IllegalArgumentException(
-            "The snapshot named " + snapshotName + " not found for shard "
-                + s.getName() + " of collection " + collectionName);
-      }
-
-      // Prefer a leader replica (at the time when the snapshot was created).
-      CoreSnapshotMetaData coreSnap = availableReplicas.get(0);
-      for (CoreSnapshotMetaData m : availableReplicas) {
-        if (m.isLeader()) {
-          coreSnap = m;
-        }
-      }
-
-      String indexDirPath = coreSnap.getIndexDirPath();
-      if (pathPrefix.isPresent()) {
-        // If the path prefix is specified, rebuild the path to the index directory.
-        Path t = new Path(coreSnap.getIndexDirPath());
-        indexDirPath = (new Path(pathPrefix.get(), t.toUri().getPath())).toString();
-      }
-
-      List<String> paths = new ArrayList<>();
-      for (String fileName : coreSnap.getFiles()) {
-        Path p = new Path(indexDirPath, fileName);
-        paths.add(p.toString());
-      }
-
-      result.put(s.getName(), paths);
-    }
-
-    return result;
-  }
-
-  public void buildCopyListings(String collectionName, String snapshotName, String localFsPath, Optional<String> pathPrefix)
-      throws SolrServerException, IOException {
-    Map<String, List<String>> paths = getIndexFilesPathForSnapshot(collectionName, snapshotName, pathPrefix);
-    for (Map.Entry<String,List<String>> entry : paths.entrySet()) {
-      StringBuilder filesBuilder = new StringBuilder();
-      for (String filePath : entry.getValue()) {
-        filesBuilder.append(filePath);
-        filesBuilder.append("\n");
-      }
-
-      String files = filesBuilder.toString().trim();
-      try (Writer w = new OutputStreamWriter(new FileOutputStream(new File(localFsPath, entry.getKey())), StandardCharsets.UTF_8)) {
-        w.write(files);
-      }
-    }
-  }
-
-  public void backupCollectionMetaData(String collectionName, String snapshotName, String backupLoc) throws SolrServerException, IOException {
-    // Backup the collection meta-data
-    CollectionAdminRequest.Backup backup = new CollectionAdminRequest.Backup(collectionName, snapshotName);
-    backup.setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
-    backup.setLocation(backupLoc);
-    CollectionAdminResponse resp = backup.process(solrClient);
-    Preconditions.checkState(resp.getStatus() == 0, "The request failed. The status code is " + resp.getStatus());
-  }
-
-  public void prepareForExport(String collectionName, String snapshotName, String localFsPath, Optional<String> pathPrefix, String destPath) {
-    try {
-      buildCopyListings(collectionName, snapshotName, localFsPath, pathPrefix);
-      System.out.println("Successfully prepared copylisting for the snapshot export.");
-    } catch (Exception e) {
-      log.error("Failed to prepare a copylisting for snapshot with name " + snapshotName + " for collection "
-      + collectionName, e);
-      System.out.println("Failed to prepare a copylisting for snapshot with name " + snapshotName + " for collection "
-      + collectionName + " due to following error : " + e.getLocalizedMessage());
-      System.exit(1);
-    }
-
-    try {
-      backupCollectionMetaData(collectionName, snapshotName, destPath);
-      System.out.println("Successfully backed up collection meta-data");
-    } catch (Exception e) {
-      log.error("Failed to backup collection meta-data for collection " + collectionName, e);
-      System.out.println("Failed to backup collection meta-data for collection " + collectionName
-          + " due to following error : " + e.getLocalizedMessage());
-      System.exit(1);
-    }
-  }
-
-  public void exportSnapshot(String collectionName, String snapshotName, String destPath, Optional<String> backupRepo,
-      Optional<String> asyncReqId) {
-    try {
-      CollectionAdminRequest.Backup backup = new CollectionAdminRequest.Backup(collectionName, snapshotName);
-      backup.setCommitName(snapshotName);
-      backup.setIndexBackupStrategy(CollectionAdminParams.COPY_FILES_STRATEGY);
-      backup.setLocation(destPath);
-      if (backupRepo.isPresent()) {
-        backup.setRepositoryName(backupRepo.get());
-      }
-      // if asyncId is null, processAsync will block and throw an Exception with any error
-      backup.processAsync(asyncReqId.orElse(null), solrClient);
-    } catch (Exception e) {
-      log.error("Failed to backup collection meta-data for collection " + collectionName, e);
-      System.out.println("Failed to backup collection meta-data for collection " + collectionName
-          + " due to following error : " + e.getLocalizedMessage());
-      System.exit(1);
-    }
-  }
-
-  public static void main(String[] args) throws IOException {
-    CommandLineParser parser = new PosixParser();
-    Options options = new Options();
-
-    options.addOption(null, CREATE, true, "This command will create a snapshot with the specified name");
-    options.addOption(null, DELETE, true, "This command will delete a snapshot with the specified name");
-    options.addOption(null, LIST, false, "This command will list all the named snapshots for the specified collection.");
-    options.addOption(null, DESCRIBE, true, "This command will print details for a named snapshot for the specified collection.");
-    options.addOption(null, PREPARE_FOR_EXPORT, true, "This command will prepare copylistings for the specified snapshot."
-        + " This command should only be used only if Solr is deployed with Hadoop and collection index files are stored on a shared"
-        + " file-system e.g. HDFS");
-    options.addOption(null, EXPORT_SNAPSHOT, true, "This command will create a backup for the specified snapshot.");
-    options.addOption(null, HELP, false, "This command will print the help message for the snapshots related commands.");
-    options.addOption(TEMP_DIR, true, "This parameter specifies the path of a temporary directory on local filesystem"
-        + " during prepare-snapshot-export command.");
-    options.addOption(DEST_DIR, true, "This parameter specifies the path on shared file-system (e.g. HDFS) where the snapshot related"
-        + " information should be stored.");
-    options.addOption(COLLECTION, true, "This parameter specifies the name of the collection to be used during snapshot operation");
-    options.addOption(SOLR_ZK_ENSEMBLE, true, "This parameter specifies the Solr Zookeeper ensemble address");
-    options.addOption(HDFS_PATH_PREFIX, true, "This parameter specifies the HDFS URI prefix to be used"
-        + " during snapshot export preparation. This is applicable only if the Solr collection index files are stored on HDFS.");
-    options.addOption(BACKUP_REPO_NAME, true, "This parameter specifies the name of the backup repository to be used"
-        + " during snapshot export preparation");
-    options.addOption(ASYNC_REQ_ID, true, "This parameter specifies the async request identifier to be used"
-        + " during snapshot export preparation");
-
-    CommandLine cmd = null;
-    try {
-      cmd = parser.parse(options, args);
-    } catch (ParseException e) {
-      System.out.println(e.getLocalizedMessage());
-      printHelp(options);
-      System.exit(1);
-    }
-
-    if (cmd.hasOption(CREATE) || cmd.hasOption(DELETE) || cmd.hasOption(LIST) || cmd.hasOption(DESCRIBE)
-        || cmd.hasOption(PREPARE_FOR_EXPORT) || cmd.hasOption(EXPORT_SNAPSHOT)) {
-      try (SolrSnapshotsTool tool = new SolrSnapshotsTool(requiredArg(options, cmd, SOLR_ZK_ENSEMBLE))) {
-        if (cmd.hasOption(CREATE)) {
-          String snapshotName = cmd.getOptionValue(CREATE);
-          String collectionName = requiredArg(options, cmd, COLLECTION);
-          tool.createSnapshot(collectionName, snapshotName);
-
-        } else if (cmd.hasOption(DELETE)) {
-          String snapshotName = cmd.getOptionValue(DELETE);
-          String collectionName = requiredArg(options, cmd, COLLECTION);
-          tool.deleteSnapshot(collectionName, snapshotName);
-
-        } else if (cmd.hasOption(LIST)) {
-          String collectionName = requiredArg(options, cmd, COLLECTION);
-          tool.listSnapshots(collectionName);
-
-        } else if (cmd.hasOption(DESCRIBE)) {
-          String snapshotName = cmd.getOptionValue(DESCRIBE);
-          String collectionName = requiredArg(options, cmd, COLLECTION);
-          tool.describeSnapshot(collectionName, snapshotName);
-
-        } else if (cmd.hasOption(PREPARE_FOR_EXPORT)) {
-          String snapshotName = cmd.getOptionValue(PREPARE_FOR_EXPORT);
-          String collectionName = requiredArg(options, cmd, COLLECTION);
-          String localFsDir = requiredArg(options, cmd, TEMP_DIR);
-          String hdfsOpDir = requiredArg(options, cmd, DEST_DIR);
-          Optional<String> pathPrefix = Optional.ofNullable(cmd.getOptionValue(HDFS_PATH_PREFIX));
-
-          if (pathPrefix.isPresent()) {
-            try {
-              new URI(pathPrefix.get());
-            } catch (URISyntaxException e) {
-              System.out.println(
-                  "The specified File system path prefix " + pathPrefix.get()
-                      + " is invalid. The error is " + e.getLocalizedMessage());
-              System.exit(1);
-            }
-          }
-          tool.prepareForExport(collectionName, snapshotName, localFsDir, pathPrefix, hdfsOpDir);
-
-        }  else if (cmd.hasOption(EXPORT_SNAPSHOT)) {
-          String snapshotName = cmd.getOptionValue(EXPORT_SNAPSHOT);
-          String collectionName = requiredArg(options, cmd, COLLECTION);
-          String destDir = requiredArg(options, cmd, DEST_DIR);
-          Optional<String> backupRepo = Optional.ofNullable(cmd.getOptionValue(BACKUP_REPO_NAME));
-          Optional<String> asyncReqId = Optional.ofNullable(cmd.getOptionValue(ASYNC_REQ_ID));
-
-          tool.exportSnapshot(collectionName, snapshotName, destDir, backupRepo, asyncReqId);
-        }
-      }
-    } else if (cmd.hasOption(HELP))  {
-      printHelp(options);
-    } else {
-      System.out.println("Unknown command specified.");
-      printHelp(options);
-    }
-  }
-
-  private static String requiredArg(Options options, CommandLine cmd, String optVal) {
-    if (!cmd.hasOption(optVal)) {
-      System.out.println("Please specify the value for option " + optVal);
-      printHelp(options);
-      System.exit(1);
-    }
-    return cmd.getOptionValue(optVal);
-  }
-
-  private static boolean isReplicaAvailable (Slice s, String coreName) {
-    for (Replica r: s.getReplicas()) {
-      if (coreName.equals(r.getCoreName())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private Collection<CollectionSnapshotMetaData> listCollectionSnapshots(String collectionName)
-      throws SolrServerException, IOException {
-    CollectionAdminRequest.ListSnapshots listSnapshots = new CollectionAdminRequest.ListSnapshots(collectionName);
-    CollectionAdminResponse resp = listSnapshots.process(solrClient);
-
-    Preconditions.checkState(resp.getStatus() == 0);
-
-    NamedList apiResult = (NamedList) resp.getResponse().get(SolrSnapshotManager.SNAPSHOTS_INFO);
-
-    Collection<CollectionSnapshotMetaData> result = new ArrayList<>();
-    for (int i = 0; i < apiResult.size(); i++) {
-      result.add(new CollectionSnapshotMetaData((NamedList<Object>)apiResult.getVal(i)));
-    }
-
-    return result;
-  }
-
-  private static void printHelp(Options options) {
-    StringBuilder helpFooter = new StringBuilder();
-    helpFooter.append("Examples: \n");
-    helpFooter.append("snapshotscli.sh --create snapshot-1 -c books -z localhost:2181 \n");
-    helpFooter.append("snapshotscli.sh --list -c books -z localhost:2181 \n");
-    helpFooter.append("snapshotscli.sh --describe snapshot-1 -c books -z localhost:2181 \n");
-    helpFooter.append("snapshotscli.sh --export snapshot-1 -c books -z localhost:2181 -b repo -l backupPath -i req_0 \n");
-    helpFooter.append("snapshotscli.sh --delete snapshot-1 -c books -z localhost:2181 \n");
-
-    HelpFormatter formatter = new HelpFormatter();
-    formatter.setOptionComparator(new OptionComarator<>());
-    formatter.printHelp("SolrSnapshotsTool", null, options, helpFooter.toString(), false);
-  }
-
-  private static class OptionComarator<T extends Option> implements Comparator<T> {
-
-    public int compare(T o1, T o2) {
-      String s1 = o1.hasLongOpt() ? o1.getLongOpt() : o1.getOpt();
-      String s2 = o2.hasLongOpt() ? o2.getLongOpt() : o2.getOpt();
-        return OPTION_HELP_ORDER.indexOf(s1) - OPTION_HELP_ORDER.indexOf(s2);
-    }
-}
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/snapshots/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/snapshots/package-info.java b/solr/core/src/java/org/apache/solr/core/snapshots/package-info.java
deleted file mode 100644
index 3242cd3..0000000
--- a/solr/core/src/java/org/apache/solr/core/snapshots/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-
-/**
- * Core classes for Solr's persistent snapshots functionality
- */
-package org.apache.solr.core.snapshots;
\ No newline at end of file


[33/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/rule/ReplicaAssigner.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/ReplicaAssigner.java b/solr/core/src/java/org/apache/solr/cloud/rule/ReplicaAssigner.java
deleted file mode 100644
index d40f342..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/rule/ReplicaAssigner.java
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.rule;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.rule.ImplicitSnitch;
-import org.apache.solr.common.cloud.rule.Snitch;
-import org.apache.solr.common.cloud.rule.SnitchContext;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Collections.singletonList;
-import static org.apache.solr.cloud.rule.Rule.MatchStatus.NODE_CAN_BE_ASSIGNED;
-import static org.apache.solr.cloud.rule.Rule.MatchStatus.NOT_APPLICABLE;
-import static org.apache.solr.cloud.rule.Rule.Phase.ASSIGN;
-import static org.apache.solr.cloud.rule.Rule.Phase.FUZZY_ASSIGN;
-import static org.apache.solr.cloud.rule.Rule.Phase.FUZZY_VERIFY;
-import static org.apache.solr.cloud.rule.Rule.Phase.VERIFY;
-import static org.apache.solr.common.util.Utils.getDeepCopy;
-
-public class ReplicaAssigner {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  List<Rule> rules;
-  Map<String, Integer> shardVsReplicaCount;
-  Map<String, Map<String, Object>> nodeVsTags;
-  Map<String, HashMap<String, Integer>> shardVsNodes;
-  List<String> participatingLiveNodes;
-  Set<String> tagNames = new HashSet<>();
-  private Map<String, AtomicInteger> nodeVsCores = new HashMap<>();
-
-
-  /**
-   * @param shardVsReplicaCount shard names vs no:of replicas required for each of those shards
-   * @param snitches            snitches details
-   * @param shardVsNodes        The current state of the system. can be an empty map if no nodes
-   *                            are created in this collection till now
-   */
-  public ReplicaAssigner(List<Rule> rules,
-                         Map<String, Integer> shardVsReplicaCount,
-                         List snitches,
-                         Map<String, Map<String, Integer>> shardVsNodes,
-                         List<String> participatingLiveNodes,
-                         SolrCloudManager cloudManager, ClusterState clusterState) {
-    this.rules = rules;
-    for (Rule rule : rules) tagNames.add(rule.tag.name);
-    this.shardVsReplicaCount = shardVsReplicaCount;
-    this.participatingLiveNodes = new ArrayList<>(participatingLiveNodes);
-    this.nodeVsTags = getTagsForNodes(cloudManager, snitches);
-    this.shardVsNodes = getDeepCopy(shardVsNodes, 2);
-
-    if (clusterState != null) {
-      Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-      for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
-        DocCollection coll = entry.getValue();
-        for (Slice slice : coll.getSlices()) {
-          for (Replica replica : slice.getReplicas()) {
-            AtomicInteger count = nodeVsCores.get(replica.getNodeName());
-            if (count == null) nodeVsCores.put(replica.getNodeName(), count = new AtomicInteger());
-            count.incrementAndGet();
-          }
-        }
-      }
-    }
-  }
-
-  public Map<String, Map<String, Object>> getNodeVsTags() {
-    return nodeVsTags;
-
-  }
-
-
-  /**
-   * For each shard return a new set of nodes where the replicas need to be created satisfying
-   * the specified rule
-   */
-  public Map<ReplicaPosition, String> getNodeMappings() {
-    Map<ReplicaPosition, String> result = getNodeMappings0();
-    if (result == null) {
-      String msg = "Could not identify nodes matching the rules " + rules;
-      if (!failedNodes.isEmpty()) {
-        Map<String, String> failedNodes = new HashMap<>();
-        for (Map.Entry<String, SnitchContext> e : this.failedNodes.entrySet()) {
-          failedNodes.put(e.getKey(), e.getValue().getErrMsg());
-        }
-        msg += " Some nodes where excluded from assigning replicas because tags could not be obtained from them " + failedNodes;
-      }
-      msg += "\n tag values" + Utils.toJSONString(getNodeVsTags());
-      if (!shardVsNodes.isEmpty()) {
-        msg += "\nInitial state for the coll : " + Utils.toJSONString(shardVsNodes);
-      }
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg);
-    }
-    return result;
-
-  }
-
-  Map<ReplicaPosition, String> getNodeMappings0() {
-    List<String> shardNames = new ArrayList<>(shardVsReplicaCount.keySet());
-    int[] shardOrder = new int[shardNames.size()];
-    for (int i = 0; i < shardNames.size(); i++) shardOrder[i] = i;
-
-    boolean hasFuzzyRules = false;
-    int nonWildCardShardRules = 0;
-    for (Rule r : rules) {
-      if (r.isFuzzy()) hasFuzzyRules = true;
-      if (!r.shard.isWildCard()) {
-        nonWildCardShardRules++;
-        //we will have to try all combinations
-        if (shardNames.size() > 10) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Max 10 shards allowed if there is a non wild card shard specified in rule");
-        }
-      }
-    }
-
-    Map<ReplicaPosition, String> result = tryAllPermutations(shardNames, shardOrder, nonWildCardShardRules, false);
-    if (result == null && hasFuzzyRules) {
-      result = tryAllPermutations(shardNames, shardOrder, nonWildCardShardRules, true);
-    }
-    return result;
-  }
-
-  private Map<ReplicaPosition, String> tryAllPermutations(List<String> shardNames,
-                                                          int[] shardOrder,
-                                                          int nonWildCardShardRules,
-                                                          boolean fuzzyPhase) {
-
-
-    Iterator<int[]> shardPermutations = nonWildCardShardRules > 0 ?
-        permutations(shardNames.size()) :
-        singletonList(shardOrder).iterator();
-
-    for (; shardPermutations.hasNext(); ) {
-      int[] p = shardPermutations.next();
-      List<ReplicaPosition> replicaPositions = new ArrayList<>();
-      for (int pos : p) {
-        for (int j = 0; j < shardVsReplicaCount.get(shardNames.get(pos)); j++) {
-          replicaPositions.add(new ReplicaPosition(shardNames.get(pos), j, Replica.Type.NRT));
-        }
-      }
-      Collections.sort(replicaPositions);
-      for (Iterator<int[]> it = permutations(rules.size()); it.hasNext(); ) {
-        int[] permutation = it.next();
-        Map<ReplicaPosition, String> result = tryAPermutationOfRules(permutation, replicaPositions, fuzzyPhase);
-        if (result != null) return result;
-      }
-    }
-
-    return null;
-  }
-
-
-  private Map<ReplicaPosition, String> tryAPermutationOfRules(int[] rulePermutation, List<ReplicaPosition> replicaPositions, boolean fuzzyPhase) {
-    Map<String, Map<String, Object>> nodeVsTagsCopy = getDeepCopy(nodeVsTags, 2);
-    Map<ReplicaPosition, String> result = new LinkedHashMap<>();
-    int startPosition = 0;
-    Map<String, Map<String, Integer>> copyOfCurrentState = getDeepCopy(shardVsNodes, 2);
-    List<String> sortedLiveNodes = new ArrayList<>(this.participatingLiveNodes);
-    Collections.sort(sortedLiveNodes, (String n1, String n2) -> {
-      int result1 = 0;
-      for (int i = 0; i < rulePermutation.length; i++) {
-        Rule rule = rules.get(rulePermutation[i]);
-        int val = rule.compare(n1, n2, nodeVsTagsCopy, copyOfCurrentState);
-        if (val != 0) {//atleast one non-zero compare break now
-          result1 = val;
-          break;
-        }
-        if (result1 == 0) {//if all else is equal, prefer nodes with fewer cores
-          AtomicInteger n1Count = nodeVsCores.get(n1);
-          AtomicInteger n2Count = nodeVsCores.get(n2);
-          int a = n1Count == null ? 0 : n1Count.get();
-          int b = n2Count == null ? 0 : n2Count.get();
-          result1 = a > b ? 1 : a == b ? 0 : -1;
-        }
-
-      }
-      return result1;
-    });
-    forEachPosition:
-    for (ReplicaPosition replicaPosition : replicaPositions) {
-      //trying to assign a node by verifying each rule in this rulePermutation
-      forEachNode:
-      for (int j = 0; j < sortedLiveNodes.size(); j++) {
-        String liveNode = sortedLiveNodes.get(startPosition % sortedLiveNodes.size());
-        startPosition++;
-        for (int i = 0; i < rulePermutation.length; i++) {
-          Rule rule = rules.get(rulePermutation[i]);
-          //trying to assign a replica into this node in this shard
-          Rule.MatchStatus status = rule.tryAssignNodeToShard(liveNode,
-              copyOfCurrentState, nodeVsTagsCopy, replicaPosition.shard, fuzzyPhase ? FUZZY_ASSIGN : ASSIGN);
-          if (status == Rule.MatchStatus.CANNOT_ASSIGN_FAIL) {
-            continue forEachNode;//try another node for this position
-          }
-        }
-        //We have reached this far means this node can be applied to this position
-        //and all rules are fine. So let us change the currentState
-        result.put(replicaPosition, liveNode);
-        Map<String, Integer> nodeNames = copyOfCurrentState.get(replicaPosition.shard);
-        if (nodeNames == null) copyOfCurrentState.put(replicaPosition.shard, nodeNames = new HashMap<>());
-        Integer n = nodeNames.get(liveNode);
-        n = n == null ? 1 : n + 1;
-        nodeNames.put(liveNode, n);
-        Map<String, Object> tagsMap = nodeVsTagsCopy.get(liveNode);
-        Number coreCount = tagsMap == null ? null: (Number) tagsMap.get(ImplicitSnitch.CORES);
-        if (coreCount != null) {
-          nodeVsTagsCopy.get(liveNode).put(ImplicitSnitch.CORES, coreCount.intValue() + 1);
-        }
-
-        continue forEachPosition;
-      }
-      //if it reached here, we could not find a node for this position
-      return null;
-    }
-
-    if (replicaPositions.size() > result.size()) {
-      return null;
-    }
-
-    for (Map.Entry<ReplicaPosition, String> e : result.entrySet()) {
-      for (int i = 0; i < rulePermutation.length; i++) {
-        Rule rule = rules.get(rulePermutation[i]);
-        Rule.MatchStatus matchStatus = rule.tryAssignNodeToShard(e.getValue(),
-            copyOfCurrentState, nodeVsTagsCopy, e.getKey().shard, fuzzyPhase ? FUZZY_VERIFY : VERIFY);
-        if (matchStatus != NODE_CAN_BE_ASSIGNED && matchStatus != NOT_APPLICABLE) return null;
-      }
-    }
-    return result;
-  }
-
-  /**
-   * get all permutations for the int[] whose items are 0..level
-   */
-  public static Iterator<int[]> permutations(final int level) {
-    return new Iterator<int[]>() {
-      int i = 0;
-      int[] next;
-
-      @Override
-      public boolean hasNext() {
-        AtomicReference<int[]> nthval = new AtomicReference<>();
-        permute(0, new int[level], new BitSet(level), nthval, i, new AtomicInteger());
-        i++;
-        next = nthval.get();
-        return next != null;
-      }
-
-      @Override
-      public int[] next() {
-        return next;
-      }
-    };
-
-  }
-
-
-  private static void permute(int level, int[] permuted, BitSet used, AtomicReference<int[]> nthval,
-                              int requestedIdx, AtomicInteger seenSoFar) {
-    if (level == permuted.length) {
-      if (seenSoFar.get() == requestedIdx) nthval.set(permuted);
-      else seenSoFar.incrementAndGet();
-    } else {
-      for (int i = 0; i < permuted.length; i++) {
-        if (!used.get(i)) {
-          used.set(i);
-          permuted[level] = i;
-          permute(level + 1, permuted, used, nthval, requestedIdx, seenSoFar);
-          if (nthval.get() != null) break;
-          used.set(i, false);
-        }
-      }
-    }
-  }
-
-
-  public Map<String, SnitchContext> failedNodes = new HashMap<>();
-
-  static class SnitchInfoImpl extends SnitchContext.SnitchInfo {
-    final Snitch snitch;
-    final Set<String> myTags = new HashSet<>();
-    final Map<String, SnitchContext> nodeVsContext = new HashMap<>();
-    private final SolrCloudManager cloudManager;
-
-    SnitchInfoImpl(Map<String, Object> conf, Snitch snitch, SolrCloudManager cloudManager) {
-      super(conf);
-      this.snitch = snitch;
-      this.cloudManager = cloudManager;
-    }
-
-    @Override
-    public Set<String> getTagNames() {
-      return myTags;
-    }
-
-
-  }
-
-  /**
-   * This method uses the snitches and get the tags for all the nodes
-   */
-  private Map<String, Map<String, Object>> getTagsForNodes(final SolrCloudManager cloudManager, List snitchConf) {
-
-    Map<Class, SnitchInfoImpl> snitches = getSnitchInfos(cloudManager, snitchConf);
-    for (Class c : Snitch.WELL_KNOWN_SNITCHES) {
-      if (snitches.containsKey(c)) continue;// it is already specified explicitly , ignore
-      try {
-        snitches.put(c, new SnitchInfoImpl(Collections.EMPTY_MAP, (Snitch) c.newInstance(), cloudManager));
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error instantiating Snitch " + c.getName());
-      }
-    }
-    for (String tagName : tagNames) {
-      //identify which snitch is going to provide values for a given tag
-      boolean foundProvider = false;
-      for (SnitchInfoImpl info : snitches.values()) {
-        if (info.snitch.isKnownTag(tagName)) {
-          foundProvider = true;
-          info.myTags.add(tagName);
-          break;
-        }
-      }
-      if (!foundProvider)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown tag in rules " + tagName);
-    }
-
-
-    for (String node : participatingLiveNodes) {
-      //now use the Snitch to get the tags
-      for (SnitchInfoImpl info : snitches.values()) {
-        if (!info.myTags.isEmpty()) {
-          SnitchContext context = getSnitchCtx(node, info, cloudManager);
-          info.nodeVsContext.put(node, context);
-          try {
-            info.snitch.getTags(node, info.myTags, context);
-          } catch (Exception e) {
-            context.exception = e;
-          }
-        }
-      }
-    }
-
-    Map<String, Map<String, Object>> result = new HashMap<>();
-    for (SnitchInfoImpl info : snitches.values()) {
-      for (Map.Entry<String, SnitchContext> e : info.nodeVsContext.entrySet()) {
-        SnitchContext context = e.getValue();
-        String node = e.getKey();
-        if (context.exception != null) {
-          failedNodes.put(node, context);
-          participatingLiveNodes.remove(node);
-          log.warn("Not all tags were obtained from node " + node, context.exception);
-          context.exception = new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Not all tags were obtained from node " + node);
-        } else {
-          Map<String, Object> tags = result.get(node);
-          if (tags == null) {
-            tags = new HashMap<>();
-            result.put(node, tags);
-          }
-          tags.putAll(context.getTags());
-        }
-      }
-    }
-
-    if (participatingLiveNodes.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not get all tags for any nodes");
-
-    }
-    return result;
-
-  }
-
-  private Map<String, Object> snitchSession = new HashMap<>();
-
-  protected SnitchContext getSnitchCtx(String node, SnitchInfoImpl info, SolrCloudManager cloudManager) {
-    return new ServerSnitchContext(info, node, snitchSession, cloudManager);
-  }
-
-  public static void verifySnitchConf(SolrCloudManager cloudManager, List snitchConf) {
-    getSnitchInfos(cloudManager, snitchConf);
-  }
-
-
-  static Map<Class, SnitchInfoImpl> getSnitchInfos(SolrCloudManager cloudManager, List snitchConf) {
-    if (snitchConf == null) snitchConf = Collections.emptyList();
-    Map<Class, SnitchInfoImpl> snitches = new LinkedHashMap<>();
-    for (Object o : snitchConf) {
-      //instantiating explicitly specified snitches
-      String klas = null;
-      Map map = Collections.emptyMap();
-      if (o instanceof Map) {//it can be a Map
-        map = (Map) o;
-        klas = (String) map.get("class");
-        if (klas == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "snitch must have  a class attribute");
-        }
-      } else { //or just the snitch name
-        klas = o.toString();
-      }
-      try {
-        if (klas.indexOf('.') == -1) klas = Snitch.class.getPackage().getName() + "." + klas;
-        Snitch inst =
-            (Snitch) Snitch.class.getClassLoader().loadClass(klas).newInstance() ;
-        snitches.put(inst.getClass(), new SnitchInfoImpl(map, inst, cloudManager));
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-
-      }
-
-    }
-    return snitches;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/rule/Rule.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/Rule.java b/solr/core/src/java/org/apache/solr/cloud/rule/Rule.java
deleted file mode 100644
index e54f5a0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/rule/Rule.java
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.rule;
-
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-
-import static org.apache.solr.cloud.rule.Rule.MatchStatus.CANNOT_ASSIGN_FAIL;
-import static org.apache.solr.cloud.rule.Rule.MatchStatus.NODE_CAN_BE_ASSIGNED;
-import static org.apache.solr.cloud.rule.Rule.MatchStatus.NOT_APPLICABLE;
-import static org.apache.solr.cloud.rule.Rule.Operand.EQUAL;
-import static org.apache.solr.cloud.rule.Rule.Operand.GREATER_THAN;
-import static org.apache.solr.cloud.rule.Rule.Operand.LESS_THAN;
-import static org.apache.solr.cloud.rule.Rule.Operand.NOT_EQUAL;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.rule.ImplicitSnitch.CORES;
-
-
-public class Rule {
-  public static final String WILD_CARD = "*";
-  public static final String WILD_WILD_CARD = "**";
-  static final Condition SHARD_DEFAULT = new Rule.Condition(SHARD_ID_PROP, WILD_WILD_CARD);
-  static final Condition REPLICA_DEFAULT = new Rule.Condition(REPLICA_PROP, WILD_CARD);
-  Condition shard;
-  Condition replica;
-  Condition tag;
-
-  public Rule(Map m) {
-    for (Object o : m.entrySet()) {
-      Map.Entry e = (Map.Entry) o;
-      Condition condition = new Condition(String.valueOf(e.getKey()), String.valueOf(e.getValue()));
-      if (condition.name.equals(SHARD_ID_PROP)) shard = condition;
-      else if (condition.name.equals(REPLICA_PROP)) replica = condition;
-      else {
-        if (tag != null) {
-          throw new RuntimeException("There can be only one and only one tag other than 'shard' and 'replica' in rule " + m);
-        }
-        tag = condition;
-      }
-
-    }
-    if (shard == null) shard = SHARD_DEFAULT;
-    if (replica == null) replica = REPLICA_DEFAULT;
-    if (tag == null) throw new RuntimeException("There should be a tag other than 'shard' and 'replica'");
-    if (replica.isWildCard() && tag.isWildCard()) {
-      throw new RuntimeException("Both replica and tag cannot be wild cards");
-    }
-
-  }
-
-  static Object parseObj(Object o, Class typ) {
-    if (o == null) return o;
-    if (typ == String.class) return String.valueOf(o);
-    if (typ == Integer.class) {
-      Double v = Double.parseDouble(String.valueOf(o));
-      return v.intValue();
-    }
-    return o;
-  }
-
-  public static Map parseRule(String s) {
-    Map<String, String> result = new LinkedHashMap<>();
-    s = s.trim();
-    List<String> keyVals = StrUtils.splitSmart(s, ',');
-    for (String kv : keyVals) {
-      List<String> keyVal = StrUtils.splitSmart(kv, ':');
-      if (keyVal.size() != 2) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid rule. should have only key and val in : " + kv);
-      }
-      if (keyVal.get(0).trim().length() == 0 || keyVal.get(1).trim().length() == 0) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid rule. should have key and val in : " + kv);
-      }
-      result.put(keyVal.get(0).trim(), keyVal.get(1).trim());
-    }
-    return result;
-  }
-
-
-  @Override
-  public String toString() {
-    Map map = new LinkedHashMap();
-    if (shard != SHARD_DEFAULT) map.put(shard.name, shard.operand.toStr(shard.val));
-    if (replica != REPLICA_DEFAULT) map.put(replica.name, replica.operand.toStr(replica.val));
-    map.put(tag.name, tag.operand.toStr(tag.val));
-    return Utils.toJSONString(map);
-  }
-
-  /**
-   * Check if it is possible to assign this node as a replica of the given shard
-   * without violating this rule
-   *
-   * @param testNode       The node in question
-   * @param shardVsNodeSet Set of nodes for every shard 
-   * @param nodeVsTags     The pre-fetched tags for all the nodes
-   * @param shardName      The shard to which this node should be attempted
-   * @return MatchStatus
-   */
-  MatchStatus tryAssignNodeToShard(String testNode,
-                                   Map<String, Map<String,Integer>> shardVsNodeSet,
-                                   Map<String, Map<String, Object>> nodeVsTags,
-                                   String shardName, Phase phase) {
-
-    if (tag.isWildCard()) {
-      //this is ensuring uniqueness across a certain tag
-      //eg: rack:r168
-      if (!shard.isWildCard() && shardName.equals(shard.val)) return NOT_APPLICABLE;
-      Object tagValueForThisNode = nodeVsTags.get(testNode).get(tag.name);
-      int v = getNumberOfNodesWithSameTagVal(shard, nodeVsTags, shardVsNodeSet,
-          shardName, new Condition(tag.name, tagValueForThisNode, EQUAL), phase);
-      if (phase == Phase.ASSIGN || phase == Phase.FUZZY_ASSIGN)
-        v++;//v++ because including this node , it becomes v+1 during ASSIGN
-      return replica.canMatch(v, phase) ?
-          NODE_CAN_BE_ASSIGNED :
-          CANNOT_ASSIGN_FAIL;
-    } else {
-      if (!shard.isWildCard() && !shardName.equals(shard.val)) return NOT_APPLICABLE;
-      if (replica.isWildCard()) {
-        //this means for each replica, the value must match
-        //shard match is already tested
-        Map<String, Object> tags = nodeVsTags.get(testNode);
-        if (tag.canMatch(tags == null ? null : tags.get(tag.name), phase)) return NODE_CAN_BE_ASSIGNED;
-        else return CANNOT_ASSIGN_FAIL;
-      } else {
-        int v = getNumberOfNodesWithSameTagVal(shard, nodeVsTags, shardVsNodeSet, shardName, tag, phase);
-        return replica.canMatch(v, phase) ? NODE_CAN_BE_ASSIGNED : CANNOT_ASSIGN_FAIL;
-
-      }
-
-    }
-  }
-
-  private int getNumberOfNodesWithSameTagVal(Condition shardCondition,
-                                             Map<String, Map<String, Object>> nodeVsTags,
-                                             Map<String, Map<String,Integer>> shardVsNodeSet,
-                                             String shardName,
-                                             Condition tagCondition,
-                                             Phase phase) {
-
-    int countMatchingThisTagValue = 0;
-    for (Map.Entry<String, Map<String,Integer>> entry : shardVsNodeSet.entrySet()) {
-      //check if this shard is relevant. either it is a ANY Wild card (**)
-      // or this shard is same as the shard in question
-      if (shardCondition.val.equals(WILD_WILD_CARD) || entry.getKey().equals(shardName)) {
-        Map<String,Integer> nodesInThisShard = shardVsNodeSet.get(shardCondition.val.equals(WILD_WILD_CARD) ? entry.getKey() : shardName);
-        if (nodesInThisShard != null) {
-          for (Map.Entry<String,Integer> aNode : nodesInThisShard.entrySet()) {
-            Map<String, Object> tagValues = nodeVsTags.get(aNode.getKey());
-            if(tagValues == null) continue;
-            Object obj = tagValues.get(tag.name);
-            if (tagCondition.canMatch(obj, phase)) countMatchingThisTagValue += aNode.getValue();
-          }
-        }
-      }
-    }
-    return countMatchingThisTagValue;
-  }
-
-  public int compare(String n1, String n2,
-                     Map<String, Map<String, Object>> nodeVsTags,
-                     Map<String, Map<String,Integer>> currentState) {
-    return tag.compare(n1, n2, nodeVsTags);
-  }
-
-  public boolean isFuzzy() {
-    return shard.fuzzy || replica.fuzzy || tag.fuzzy;
-  }
-
-  public enum Operand {
-    EQUAL(""),
-    NOT_EQUAL("!") {
-      @Override
-      public boolean canMatch(Object ruleVal, Object testVal) {
-        return !super.canMatch(ruleVal, testVal);
-      }
-    },
-    GREATER_THAN(">") {
-      @Override
-      public Object match(String val) {
-        return checkNumeric(super.match(val));
-      }
-
-
-      @Override
-      public boolean canMatch(Object ruleVal, Object testVal) {
-        return testVal != null && compareNum(ruleVal, testVal) == 1;
-      }
-
-    },
-    LESS_THAN("<") {
-      @Override
-      public int compare(Object n1Val, Object n2Val) {
-        return GREATER_THAN.compare(n1Val, n2Val) * -1;
-      }
-
-      @Override
-      public boolean canMatch(Object ruleVal, Object testVal) {
-        return testVal != null && compareNum(ruleVal, testVal) == -1;
-      }
-
-      @Override
-      public Object match(String val) {
-        return checkNumeric(super.match(val));
-      }
-    };
-    public final String operand;
-
-    Operand(String val) {
-      this.operand = val;
-    }
-
-    public String toStr(Object expectedVal) {
-      return operand + expectedVal.toString();
-    }
-
-    Object checkNumeric(Object val) {
-      if (val == null) return null;
-      try {
-        return Integer.parseInt(val.toString());
-      } catch (NumberFormatException e) {
-        throw new RuntimeException("for operand " + operand + " the value must be numeric");
-      }
-    }
-
-    public Object match(String val) {
-      if (operand.isEmpty()) return val;
-      return val.startsWith(operand) ? val.substring(1) : null;
-    }
-
-    public boolean canMatch(Object ruleVal, Object testVal) {
-      return Objects.equals(String.valueOf(ruleVal), String.valueOf(testVal));
-    }
-
-
-    public int compare(Object n1Val, Object n2Val) {
-      return 0;
-    }
-
-    public int compareNum(Object n1Val, Object n2Val) {
-      Integer n1 = (Integer) parseObj(n1Val, Integer.class);
-      Integer n2 = (Integer) parseObj(n2Val, Integer.class);
-      return n1 > n2 ? -1 : Objects.equals(n1, n2) ? 0 : 1;
-    }
-  }
-
-  enum MatchStatus {
-    NODE_CAN_BE_ASSIGNED,
-    CANNOT_ASSIGN_GO_AHEAD,
-    NOT_APPLICABLE,
-    CANNOT_ASSIGN_FAIL
-  }
-
-  enum Phase {
-    ASSIGN, VERIFY, FUZZY_ASSIGN, FUZZY_VERIFY
-  }
-
-  public static class Condition {
-    public final String name;
-    final Object val;
-    public final Operand operand;
-    final boolean fuzzy;
-
-    Condition(String name, Object val, Operand op) {
-      this.name = name;
-      this.val = val;
-      this.operand = op;
-      fuzzy = false;
-    }
-
-    Condition(String key, Object val) {
-      Object expectedVal;
-      boolean fuzzy = false;
-      if (val == null) throw new RuntimeException("value of  a tag cannot be null for key " + key);
-      try {
-        this.name = key.trim();
-        String value = val.toString().trim();
-        if (value.endsWith("~")) {
-          fuzzy = true;
-          value = value.substring(0, value.length() - 1);
-        }
-        if ((expectedVal = NOT_EQUAL.match(value)) != null) {
-          operand = NOT_EQUAL;
-        } else if ((expectedVal = GREATER_THAN.match(value)) != null) {
-          operand = GREATER_THAN;
-        } else if ((expectedVal = LESS_THAN.match(value)) != null) {
-          operand = LESS_THAN;
-        } else {
-          operand = EQUAL;
-          expectedVal = value;
-        }
-
-        if (name.equals(REPLICA_PROP)) {
-          if (!WILD_CARD.equals(expectedVal)) {
-            try {
-              expectedVal = Integer.parseInt(expectedVal.toString());
-            } catch (NumberFormatException e) {
-              throw new RuntimeException("The replica tag value can only be '*' or an integer");
-            }
-          }
-        }
-
-      } catch (Exception e) {
-        throw new IllegalArgumentException("Invalid condition : " + key + ":" + val, e);
-      }
-      this.val = expectedVal;
-      this.fuzzy = fuzzy;
-
-    }
-
-    public boolean isWildCard() {
-      return val.equals(WILD_CARD) || val.equals(WILD_WILD_CARD);
-    }
-
-    boolean canMatch(Object testVal, Phase phase) {
-      if (phase == Phase.FUZZY_ASSIGN || phase == Phase.FUZZY_VERIFY) return true;
-      if (phase == Phase.ASSIGN) {
-        if ((name.equals(REPLICA_PROP) || name.equals(CORES)) &&
-            (operand == GREATER_THAN || operand == NOT_EQUAL)) {
-          //the no:of replicas or cores will increase towards the end
-          //so this should only be checked in the Phase.
-          //process
-          return true;
-        }
-      }
-
-      return operand.canMatch(val, testVal);
-    }
-
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj instanceof Condition) {
-        Condition that = (Condition) obj;
-        return Objects.equals(name, that.name) &&
-            Objects.equals(operand, that.operand) &&
-            Objects.equals(val, that.val);
-
-      }
-      return false;
-    }
-
-    @Override
-    public String toString() {
-      return name + ":" + operand.toStr(val) + (fuzzy ? "~" : "");
-    }
-
-    public Integer getInt() {
-      return (Integer) val;
-    }
-
-    public int compare(String n1, String n2, Map<String, Map<String, Object>> nodeVsTags) {
-      Map<String, Object> tags = nodeVsTags.get(n1);
-      Object n1Val = tags == null ? null : tags.get(name);
-      tags = nodeVsTags.get(n2);
-      Object n2Val = tags == null ? null : tags.get(name);
-      if (n1Val == null || n2Val == null) return -1;
-      return isWildCard() ? 0 : operand.compare(n1Val, n2Val);
-    }
-
-  }
-
-
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/rule/ServerSnitchContext.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/ServerSnitchContext.java b/solr/core/src/java/org/apache/solr/cloud/rule/ServerSnitchContext.java
deleted file mode 100644
index 3656011..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/rule/ServerSnitchContext.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.rule;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.common.cloud.rule.SnitchContext;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ServerSnitchContext extends SnitchContext {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  SolrCloudManager cloudManager;
-  public ServerSnitchContext(SnitchInfo perSnitch,
-                             String node, Map<String, Object> session,
-                             SolrCloudManager cloudManager) {
-    super(perSnitch, node, session);
-    this.cloudManager = cloudManager;
-  }
-
-
-  public Map getZkJson(String path) throws KeeperException, InterruptedException {
-    try {
-      return Utils.getJson(cloudManager.getDistribStateManager(), path) ;
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-
-  }
-
-  public Map<String, Object> getNodeValues(String node, Collection<String> tags){
-    return cloudManager.getNodeStateProvider().getNodeValues(node, tags);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/rule/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/package-info.java b/solr/core/src/java/org/apache/solr/cloud/rule/package-info.java
deleted file mode 100644
index f4f0dd0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/rule/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Classes for managing Replica placement strategy when operating in <a href="http://wiki.apache.org/solr/SolrCloud">SolrCloud</a> mode.
- */
-package org.apache.solr.cloud.rule;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/AbstractSolrEventListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/AbstractSolrEventListener.java b/solr/core/src/java/org/apache/solr/core/AbstractSolrEventListener.java
deleted file mode 100644
index 83b2a93..0000000
--- a/solr/core/src/java/org/apache/solr/core/AbstractSolrEventListener.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.params.EventParams;
-import org.apache.solr.search.SolrIndexSearcher;
-
-/**
- */
-public class AbstractSolrEventListener implements SolrEventListener {
-  private final SolrCore core;
-  public SolrCore getCore() { return core; }
-
-  public AbstractSolrEventListener(SolrCore core) {
-    this.core = core;
-  }
-  private NamedList args;
-  public NamedList getArgs() { return args; }
-
-  @Override
-  public void init(NamedList args) {
-    this.args = args.clone();
-  }
-
-  @Override
-  public void postCommit() {
-    throw new UnsupportedOperationException();
-  }
-  
-  @Override
-  public void postSoftCommit() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getName() + args;
-  }
-
-  /**
-   * Add the {@link org.apache.solr.common.params.EventParams#EVENT} with either the {@link org.apache.solr.common.params.EventParams#NEW_SEARCHER}
-   * or {@link org.apache.solr.common.params.EventParams#FIRST_SEARCHER} values depending on the value of currentSearcher.
-   * <p>
-   * Makes a copy of NamedList and then adds the parameters.
-   *
-   *
-   * @param currentSearcher If null, add FIRST_SEARCHER, otherwise NEW_SEARCHER
-   * @param nlst The named list to add the EVENT value to
-   */
-  protected NamedList addEventParms(SolrIndexSearcher currentSearcher, NamedList nlst) {
-    NamedList result = new NamedList();
-    result.addAll(nlst);
-    if (currentSearcher != null) {
-      result.add(EventParams.EVENT, EventParams.NEW_SEARCHER);
-    } else {
-      result.add(EventParams.EVENT, EventParams.FIRST_SEARCHER);
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/BlobRepository.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/BlobRepository.java b/solr/core/src/java/org/apache/solr/core/BlobRepository.java
deleted file mode 100644
index 0cf6dfb..0000000
--- a/solr/core/src/java/org/apache/solr/core/BlobRepository.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import static org.apache.solr.common.SolrException.ErrorCode.SERVICE_UNAVAILABLE;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.regex.Pattern;
-
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.SimplePostTool;
-import org.apache.zookeeper.server.ByteBufferInputStream;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The purpose of this class is to store the Jars loaded in memory and to keep only one copy of the Jar in a single node.
- */
-public class BlobRepository {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  static final Random RANDOM;
-  static final Pattern BLOB_KEY_PATTERN_CHECKER = Pattern.compile(".*/\\d+");
-
-  static {
-    // We try to make things reproducible in the context of our tests by initializing the random instance
-    // based on the current seed
-    String seed = System.getProperty("tests.seed");
-    if (seed == null) {
-      RANDOM = new Random();
-    } else {
-      RANDOM = new Random(seed.hashCode());
-    }
-  }
-
-  private final CoreContainer coreContainer;
-  private Map<String, BlobContent> blobs = createMap();
-
-  // for unit tests to override
-  ConcurrentHashMap<String, BlobContent> createMap() {
-    return new ConcurrentHashMap<>();
-  }
-
-  public BlobRepository(CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-  }
-
-  // I wanted to {@link SolrCore#loadDecodeAndCacheBlob(String, Decoder)} below but precommit complains
-  /**
-   * Returns the contents of a blob containing a ByteBuffer and increments a reference count. Please return the 
-   * same object to decrease the refcount. This is normally used for storing jar files, and binary raw data.
-   * If you are caching Java Objects you want to use {@code SolrCore#loadDecodeAndCacheBlob(String, Decoder)}
-   *
-   * @param key it is a combination of blobname and version like blobName/version
-   * @return The reference of a blob
-   */
-  public BlobContentRef<ByteBuffer> getBlobIncRef(String key) {
-   return getBlobIncRef(key, () -> addBlob(key));
-  }
-  
-  /**
-   * Internal method that returns the contents of a blob and increments a reference count. Please return the same 
-   * object to decrease the refcount. Only the decoded content will be cached when this method is used. Component 
-   * authors attempting to share objects across cores should use 
-   * {@code SolrCore#loadDecodeAndCacheBlob(String, Decoder)} which ensures that a proper close hook is also created.
-   *
-   * @param key it is a combination of blob name and version like blobName/version
-   * @param decoder a decoder that knows how to interpret the bytes from the blob
-   * @return The reference of a blob
-   */
-  BlobContentRef<Object> getBlobIncRef(String key, Decoder<Object> decoder) {
-    return getBlobIncRef(key.concat(decoder.getName()), () -> addBlob(key,decoder));
-  }
-
-  // do the actual work returning the appropriate type...
-  private <T> BlobContentRef<T> getBlobIncRef(String key, Callable<BlobContent<T>> blobCreator) {
-    BlobContent<T> aBlob;
-    if (this.coreContainer.isZooKeeperAware()) {
-      synchronized (blobs) {
-        aBlob = blobs.get(key);
-        if (aBlob == null) {
-          try {
-            aBlob = blobCreator.call();
-          } catch (Exception e) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Blob loading failed: "+e.getMessage(), e);
-          }
-        }
-      }
-    } else {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Blob loading is not supported in non-cloud mode");
-      // todo
-    }
-    BlobContentRef<T> ref = new BlobContentRef<>(aBlob);
-    synchronized (aBlob.references) {
-      aBlob.references.add(ref);
-    }
-    return ref;
-  }
-
-  // For use cases sharing raw bytes
-  private BlobContent<ByteBuffer> addBlob(String key) {
-    ByteBuffer b = fetchBlob(key);
-    BlobContent<ByteBuffer> aBlob  = new BlobContent<>(key, b);
-    blobs.put(key, aBlob);
-    return aBlob;
-  }
-
-  // for use cases sharing java objects
-  private BlobContent<Object> addBlob(String key, Decoder<Object> decoder) {
-    ByteBuffer b = fetchBlob(key);
-    String  keyPlusName = key + decoder.getName();
-    BlobContent<Object> aBlob = new BlobContent<>(keyPlusName, b, decoder);
-    blobs.put(keyPlusName, aBlob);
-    return aBlob;
-  }
-  
-  /**
-   *  Package local for unit tests only please do not use elsewhere
-   */
-  ByteBuffer fetchBlob(String key) {
-    Replica replica = getSystemCollReplica();
-    String url = replica.getStr(BASE_URL_PROP) + "/" + CollectionAdminParams.SYSTEM_COLL + "/blob/" + key + "?wt=filestream";
-
-    HttpClient httpClient = coreContainer.getUpdateShardHandler().getDefaultHttpClient();
-    HttpGet httpGet = new HttpGet(url);
-    ByteBuffer b;
-    HttpResponse response = null;
-    HttpEntity entity = null;
-    try {
-      response = httpClient.execute(httpGet);
-      entity = response.getEntity();
-      int statusCode = response.getStatusLine().getStatusCode();
-      if (statusCode != 200) {
-        throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "no such blob or version available: " + key);
-      }
-
-      try (InputStream is = entity.getContent()) {
-        b = SimplePostTool.inputStreamToByteArray(is);
-      }
-    } catch (Exception e) {
-      if (e instanceof SolrException) {
-        throw (SolrException) e;
-      } else {
-        throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "could not load : " + key, e);
-      }
-    } finally {
-      Utils.consumeFully(entity);
-    }
-    return b;
-  }
-
-  private Replica getSystemCollReplica() {
-    ZkStateReader zkStateReader = this.coreContainer.getZkController().getZkStateReader();
-    ClusterState cs = zkStateReader.getClusterState();
-    DocCollection coll = cs.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
-    if (coll == null) throw new SolrException(SERVICE_UNAVAILABLE, CollectionAdminParams.SYSTEM_COLL + " collection not available");
-    ArrayList<Slice> slices = new ArrayList<>(coll.getActiveSlices());
-    if (slices.isEmpty()) throw new SolrException(SERVICE_UNAVAILABLE, "No active slices for " + CollectionAdminParams.SYSTEM_COLL + " collection");
-    Collections.shuffle(slices, RANDOM); //do load balancing
-
-    Replica replica = null;
-    for (Slice slice : slices) {
-      List<Replica> replicas = new ArrayList<>(slice.getReplicasMap().values());
-      Collections.shuffle(replicas, RANDOM);
-      for (Replica r : replicas) {
-        if (r.getState() == Replica.State.ACTIVE) {
-          if(zkStateReader.getClusterState().getLiveNodes().contains(r.get(ZkStateReader.NODE_NAME_PROP))){
-            replica = r;
-            break;
-          } else {
-            log.info("replica {} says it is active but not a member of live nodes", r.get(ZkStateReader.NODE_NAME_PROP));
-          }
-        }
-      }
-    }
-    if (replica == null) {
-      throw new SolrException(SERVICE_UNAVAILABLE, "No active replica available for " + CollectionAdminParams.SYSTEM_COLL + " collection");
-    }
-    return replica;
-  }
-
-  /**
-   * This is to decrement a ref count
-   *
-   * @param ref The reference that is already there. Doing multiple calls with same ref will not matter
-   */
-  public void decrementBlobRefCount(BlobContentRef ref) {
-    if (ref == null) return;
-    synchronized (ref.blob.references) {
-      if (!ref.blob.references.remove(ref)) {
-        log.error("Multiple releases for the same reference");
-      }
-      if (ref.blob.references.isEmpty()) {
-        blobs.remove(ref.blob.key);
-      }
-    }
-  }
-
-  public static class BlobContent<T> {
-    public final String key;
-    private final T content; // holds byte buffer or cached object, holding both is a waste of memory
-    // ref counting mechanism
-    private final Set<BlobContentRef> references = new HashSet<>();
-
-    public BlobContent(String key, ByteBuffer buffer, Decoder<T> decoder) {
-      this.key = key;
-      this.content = decoder.decode(new ByteBufferInputStream(buffer));
-    }
-
-    @SuppressWarnings("unchecked")
-    public BlobContent(String key, ByteBuffer buffer) {
-      this.key = key;
-      this.content = (T) buffer; 
-    }
-
-    /**
-     * Get the cached object. 
-     * 
-     * @return the object representing the content that is cached.
-     */
-    public T get() {
-      return this.content;
-    }
-
-  }
-
-  public interface Decoder<T> {
-
-    /**
-     * A name by which to distinguish this decoding. This only needs to be implemented if you want to support
-     * decoding the same blob content with more than one decoder.
-     * 
-     * @return The name of the decoding, defaults to empty string.
-     */
-    default String getName() { return ""; }
-
-    /**
-     * A routine that knows how to convert the stream of bytes from the blob into a Java object.
-     * 
-     * @param inputStream the bytes from a blob
-     * @return A Java object of the specified type.
-     */
-    T decode(InputStream inputStream);
-  }
-
-
-  public static class BlobContentRef<T> {
-    public final BlobContent<T> blob;
-
-    private BlobContentRef(BlobContent<T> blob) {
-      this.blob = blob;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
deleted file mode 100644
index 8b8c740..0000000
--- a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.IdentityHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.util.IOUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.ObjectReleaseTracker;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A {@link DirectoryFactory} impl base class for caching Directory instances
- * per path. Most DirectoryFactory implementations will want to extend this
- * class and simply implement {@link DirectoryFactory#create(String, LockFactory, DirContext)}.
- * 
- * This is an expert class and these API's are subject to change.
- * 
- */
-public abstract class CachingDirectoryFactory extends DirectoryFactory {
-  protected static class CacheValue {
-    final public String path;
-    final public Directory directory;
-    // for debug
-    //final Exception originTrace;
-    // use the setter!
-    private boolean deleteOnClose = false;
-    
-    public CacheValue(String path, Directory directory) {
-      this.path = path;
-      this.directory = directory;
-      this.closeEntries.add(this);
-      // for debug
-      // this.originTrace = new RuntimeException("Originated from:");
-    }
-    public int refCnt = 1;
-    // has doneWithDirectory(Directory) been called on this?
-    public boolean closeCacheValueCalled = false;
-    public boolean doneWithDir = false;
-    private boolean deleteAfterCoreClose = false;
-    public Set<CacheValue> removeEntries = new HashSet<>();
-    public Set<CacheValue> closeEntries = new HashSet<>();
-
-    public void setDeleteOnClose(boolean deleteOnClose, boolean deleteAfterCoreClose) {
-      if (deleteOnClose) {
-        removeEntries.add(this);
-      }
-      this.deleteOnClose = deleteOnClose;
-      this.deleteAfterCoreClose = deleteAfterCoreClose;
-    }
-    
-    @Override
-    public String toString() {
-      return "CachedDir<<" + "refCount=" + refCnt + ";path=" + path + ";done=" + doneWithDir + ">>";
-    }
-  }
-  
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  protected Map<String,CacheValue> byPathCache = new HashMap<>();
-  
-  protected Map<Directory,CacheValue> byDirectoryCache = new IdentityHashMap<>();
-  
-  protected Map<Directory,List<CloseListener>> closeListeners = new HashMap<>();
-  
-  protected Set<CacheValue> removeEntries = new HashSet<>();
-
-  private Double maxWriteMBPerSecFlush;
-
-  private Double maxWriteMBPerSecMerge;
-
-  private Double maxWriteMBPerSecRead;
-
-  private Double maxWriteMBPerSecDefault;
-
-  private boolean closed;
-  
-  public interface CloseListener {
-    public void postClose();
-
-    public void preClose();
-  }
-  
-  @Override
-  public void addCloseListener(Directory dir, CloseListener closeListener) {
-    synchronized (this) {
-      if (!byDirectoryCache.containsKey(dir)) {
-        throw new IllegalArgumentException("Unknown directory: " + dir
-            + " " + byDirectoryCache);
-      }
-      List<CloseListener> listeners = closeListeners.get(dir);
-      if (listeners == null) {
-        listeners = new ArrayList<>();
-        closeListeners.put(dir, listeners);
-      }
-      listeners.add(closeListener);
-      
-      closeListeners.put(dir, listeners);
-    }
-  }
-  
-  @Override
-  public void doneWithDirectory(Directory directory) throws IOException {
-    synchronized (this) {
-      CacheValue cacheValue = byDirectoryCache.get(directory);
-      if (cacheValue == null) {
-        throw new IllegalArgumentException("Unknown directory: " + directory
-            + " " + byDirectoryCache);
-      }
-      cacheValue.doneWithDir = true;
-      log.debug("Done with dir: {}", cacheValue);
-      if (cacheValue.refCnt == 0 && !closed) {
-        boolean cl = closeCacheValue(cacheValue);
-        if (cl) {
-          removeFromCache(cacheValue);
-        }
-      }
-    }
-  }
-  
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.solr.core.DirectoryFactory#close()
-   */
-  @Override
-  public void close() throws IOException {
-    synchronized (this) {
-      log.debug("Closing {} - {} directories currently being tracked", this.getClass().getSimpleName(), byDirectoryCache.size());
-      this.closed = true;
-      Collection<CacheValue> values = byDirectoryCache.values();
-      for (CacheValue val : values) {
-        log.debug("Closing {} - currently tracking: {}", 
-                  this.getClass().getSimpleName(), val);
-        try {
-          // if there are still refs out, we have to wait for them
-          assert val.refCnt > -1 : val.refCnt;
-          int cnt = 0;
-          while(val.refCnt != 0) {
-            wait(100);
-            
-            if (cnt++ >= 120) {
-              String msg = "Timeout waiting for all directory ref counts to be released - gave up waiting on " + val;
-              log.error(msg);
-              // debug
-              // val.originTrace.printStackTrace();
-              throw new SolrException(ErrorCode.SERVER_ERROR, msg);
-            }
-          }
-          assert val.refCnt == 0 : val.refCnt;
-        } catch (Exception e) {
-          SolrException.log(log, "Error closing directory", e);
-        }
-      }
-      
-      values = byDirectoryCache.values();
-      Set<CacheValue> closedDirs = new HashSet<>();
-      for (CacheValue val : values) {
-        try {
-          for (CacheValue v : val.closeEntries) {
-            assert v.refCnt == 0 : val.refCnt;
-            log.debug("Closing directory when closing factory: " + v.path);
-            boolean cl = closeCacheValue(v);
-            if (cl) {
-              closedDirs.add(v);
-            }
-          }
-        } catch (Exception e) {
-          SolrException.log(log, "Error closing directory", e);
-        }
-      }
-
-      for (CacheValue val : removeEntries) {
-        log.debug("Removing directory after core close: " + val.path);
-        try {
-          removeDirectory(val);
-        } catch (Exception e) {
-          SolrException.log(log, "Error removing directory", e);
-        }
-      }
-      
-      for (CacheValue v : closedDirs) {
-        removeFromCache(v);
-      }
-    }
-  }
-
-  private void removeFromCache(CacheValue v) {
-    log.debug("Removing from cache: {}", v);
-    byDirectoryCache.remove(v.directory);
-    byPathCache.remove(v.path);
-  }
-
-  // be sure this is called with the this sync lock
-  // returns true if we closed the cacheValue, false if it will be closed later
-  private boolean closeCacheValue(CacheValue cacheValue) {
-    log.debug("looking to close {} {}", cacheValue.path, cacheValue.closeEntries.toString());
-    List<CloseListener> listeners = closeListeners.remove(cacheValue.directory);
-    if (listeners != null) {
-      for (CloseListener listener : listeners) {
-        try {
-          listener.preClose();
-        } catch (Exception e) {
-          SolrException.log(log, "Error executing preClose for directory", e);
-        }
-      }
-    }
-    cacheValue.closeCacheValueCalled = true;
-    if (cacheValue.deleteOnClose) {
-      // see if we are a subpath
-      Collection<CacheValue> values = byPathCache.values();
-      
-      Collection<CacheValue> cacheValues = new ArrayList<>(values);
-      cacheValues.remove(cacheValue);
-      for (CacheValue otherCacheValue : cacheValues) {
-        // if we are a parent path and a sub path is not already closed, get a sub path to close us later
-        if (isSubPath(cacheValue, otherCacheValue) && !otherCacheValue.closeCacheValueCalled) {
-          // we let the sub dir remove and close us
-          if (!otherCacheValue.deleteAfterCoreClose && cacheValue.deleteAfterCoreClose) {
-            otherCacheValue.deleteAfterCoreClose = true;
-          }
-          otherCacheValue.removeEntries.addAll(cacheValue.removeEntries);
-          otherCacheValue.closeEntries.addAll(cacheValue.closeEntries);
-          cacheValue.closeEntries.clear();
-          cacheValue.removeEntries.clear();
-          return false;
-        }
-      }
-    }
-
-    boolean cl = false;
-    for (CacheValue val : cacheValue.closeEntries) {
-      close(val);
-      if (val == cacheValue) {
-        cl = true;
-      }
-    }
-
-    for (CacheValue val : cacheValue.removeEntries) {
-      if (!val.deleteAfterCoreClose) {
-        log.debug("Removing directory before core close: " + val.path);
-        try {
-          removeDirectory(val);
-        } catch (Exception e) {
-          SolrException.log(log, "Error removing directory " + val.path + " before core close", e);
-        }
-      } else {
-        removeEntries.add(val);
-      }
-    }
-    
-    if (listeners != null) {
-      for (CloseListener listener : listeners) {
-        try {
-          listener.postClose();
-        } catch (Exception e) {
-          SolrException.log(log, "Error executing postClose for directory", e);
-        }
-      }
-    }
-    return cl;
-  }
-
-  private void close(CacheValue val) {
-    log.debug("Closing directory, CoreContainer#isShutdown={}", coreContainer != null ? coreContainer.isShutDown() : "null");
-    try {
-      if (coreContainer != null && coreContainer.isShutDown() && val.directory instanceof ShutdownAwareDirectory) {
-        log.debug("Closing directory on shutdown: " + val.path);
-        ((ShutdownAwareDirectory) val.directory).closeOnShutdown();
-      } else {
-        log.debug("Closing directory: " + val.path);
-        val.directory.close();
-      }
-      assert ObjectReleaseTracker.release(val.directory);
-    } catch (Exception e) {
-      SolrException.log(log, "Error closing directory", e);
-    }
-  }
-
-  private boolean isSubPath(CacheValue cacheValue, CacheValue otherCacheValue) {
-    int one = cacheValue.path.lastIndexOf('/');
-    int two = otherCacheValue.path.lastIndexOf('/');
-    
-    return otherCacheValue.path.startsWith(cacheValue.path + "/") && two > one;
-  }
-  
-  @Override
-  public boolean exists(String path) throws IOException {
-    // back compat behavior
-    File dirFile = new File(path);
-    return dirFile.canRead() && dirFile.list().length > 0;
-  }
-  
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.solr.core.DirectoryFactory#get(java.lang.String,
-   * java.lang.String, boolean)
-   */
-  @Override
-  public final Directory get(String path,  DirContext dirContext, String rawLockType)
-      throws IOException {
-    String fullPath = normalize(path);
-    synchronized (this) {
-      if (closed) {
-        throw new AlreadyClosedException("Already closed");
-      }
-      
-      final CacheValue cacheValue = byPathCache.get(fullPath);
-      Directory directory = null;
-      if (cacheValue != null) {
-        directory = cacheValue.directory;
-      }
-      
-      if (directory == null) {
-        directory = create(fullPath, createLockFactory(rawLockType), dirContext);
-        assert ObjectReleaseTracker.track(directory);
-        boolean success = false;
-        try {
-          CacheValue newCacheValue = new CacheValue(fullPath, directory);
-          byDirectoryCache.put(directory, newCacheValue);
-          byPathCache.put(fullPath, newCacheValue);
-          log.debug("return new directory for {}", fullPath);
-          success = true;
-        } finally {
-          if (!success) {
-            IOUtils.closeWhileHandlingException(directory);
-          }
-        }
-      } else {
-        cacheValue.refCnt++;
-        log.debug("Reusing cached directory: {}", cacheValue);
-      }
-      
-      return directory;
-    }
-  }
-
-  /*
-   * (non-Javadoc)
-   * 
-   * @see
-   * org.apache.solr.core.DirectoryFactory#incRef(org.apache.lucene.store.Directory
-   * )
-   */
-  @Override
-  public void incRef(Directory directory) {
-    synchronized (this) {
-      if (closed) {
-        throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Already closed");
-      }
-      CacheValue cacheValue = byDirectoryCache.get(directory);
-      if (cacheValue == null) {
-        throw new IllegalArgumentException("Unknown directory: " + directory);
-      }
-      
-      cacheValue.refCnt++;
-      log.debug("incRef'ed: {}", cacheValue);
-    }
-  }
-  
-  @Override
-  public void init(NamedList args) {
-    maxWriteMBPerSecFlush = (Double) args.get("maxWriteMBPerSecFlush");
-    maxWriteMBPerSecMerge = (Double) args.get("maxWriteMBPerSecMerge");
-    maxWriteMBPerSecRead = (Double) args.get("maxWriteMBPerSecRead");
-    maxWriteMBPerSecDefault = (Double) args.get("maxWriteMBPerSecDefault");
-
-    // override global config
-    if (args.get(SolrXmlConfig.SOLR_DATA_HOME) != null) {
-      dataHomePath = Paths.get((String) args.get(SolrXmlConfig.SOLR_DATA_HOME));
-    }
-    if (dataHomePath != null) {
-      log.info(SolrXmlConfig.SOLR_DATA_HOME + "=" + dataHomePath);
-    }
-  }
-  
-  /*
-   * (non-Javadoc)
-   * 
-   * @see
-   * org.apache.solr.core.DirectoryFactory#release(org.apache.lucene.store.Directory
-   * )
-   */
-  @Override
-  public void release(Directory directory) throws IOException {
-    if (directory == null) {
-      throw new NullPointerException();
-    }
-    synchronized (this) {
-      // don't check if already closed here - we need to able to release
-      // while #close() waits.
-      
-      CacheValue cacheValue = byDirectoryCache.get(directory);
-      if (cacheValue == null) {
-        throw new IllegalArgumentException("Unknown directory: " + directory
-            + " " + byDirectoryCache);
-      }
-      log.debug("Releasing directory: " + cacheValue.path + " " + (cacheValue.refCnt - 1) + " " + cacheValue.doneWithDir);
-
-      cacheValue.refCnt--;
-      
-      assert cacheValue.refCnt >= 0 : cacheValue.refCnt;
-
-      if (cacheValue.refCnt == 0 && cacheValue.doneWithDir && !closed) {
-        boolean cl = closeCacheValue(cacheValue);
-        if (cl) {
-          removeFromCache(cacheValue);
-        }
-      }
-    }
-  }
-  
-  @Override
-  public void remove(String path) throws IOException {
-    remove(path, false);
-  }
-  
-  @Override
-  public void remove(Directory dir) throws IOException {
-    remove(dir, false);
-  }
-  
-  @Override
-  public void remove(String path, boolean deleteAfterCoreClose) throws IOException {
-    synchronized (this) {
-      CacheValue val = byPathCache.get(normalize(path));
-      if (val == null) {
-        throw new IllegalArgumentException("Unknown directory " + path);
-      }
-      val.setDeleteOnClose(true, deleteAfterCoreClose);
-    }
-  }
-  
-  @Override
-  public void remove(Directory dir, boolean deleteAfterCoreClose) throws IOException {
-    synchronized (this) {
-      CacheValue val = byDirectoryCache.get(dir);
-      if (val == null) {
-        throw new IllegalArgumentException("Unknown directory " + dir);
-      }
-      val.setDeleteOnClose(true, deleteAfterCoreClose);
-    }
-  }
-  
-  protected synchronized void removeDirectory(CacheValue cacheValue) throws IOException {
-     // this page intentionally left blank
-  }
-  
-  @Override
-  public String normalize(String path) throws IOException {
-    path = stripTrailingSlash(path);
-    return path;
-  }
-  
-  protected String stripTrailingSlash(String path) {
-    if (path.endsWith("/")) {
-      path = path.substring(0, path.length() - 1);
-    }
-    return path;
-  }
-  
-  /**
-   * Method for inspecting the cache
-   * @return paths in the cache which have not been marked "done"
-   *
-   * @see #doneWithDirectory
-   */
-  public synchronized Set<String> getLivePaths() {
-    HashSet<String> livePaths = new HashSet<>();
-    for (CacheValue val : byPathCache.values()) {
-      if (!val.doneWithDir) {
-        livePaths.add(val.path);
-      }
-    }
-    return livePaths;
-  }
-  
-  @Override
-  protected boolean deleteOldIndexDirectory(String oldDirPath) throws IOException {
-    Set<String> livePaths = getLivePaths();
-    if (livePaths.contains(oldDirPath)) {
-      log.warn("Cannot delete directory {} as it is still being referenced in the cache!", oldDirPath);
-      return false;
-    }
-
-    return super.deleteOldIndexDirectory(oldDirPath);
-  }
-  
-  protected synchronized String getPath(Directory directory) {
-    return byDirectoryCache.get(directory).path;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CloseHook.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CloseHook.java b/solr/core/src/java/org/apache/solr/core/CloseHook.java
deleted file mode 100644
index 812d89a..0000000
--- a/solr/core/src/java/org/apache/solr/core/CloseHook.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-/**
- * Used to request notification when the core is closed.
- * <p>
- * Call {@link org.apache.solr.core.SolrCore#addCloseHook(org.apache.solr.core.CloseHook)} during the {@link org.apache.solr.util.plugin.SolrCoreAware#inform(SolrCore)} method to
- * add a close hook to your object.
- * <p>
- * The close hook can be useful for releasing objects related to the request handler (for instance, if you have a JDBC DataSource or something like that)
- */
-
-public abstract class CloseHook {
-
-  /**
-   * Method called when the given SolrCore object is closing / shutting down but before the update handler and
-   * searcher(s) are actually closed
-   * <br>
-   * <b>Important:</b> Keep the method implementation as short as possible. If it were to use any heavy i/o , network connections -
-   * it might be a better idea to launch in a separate Thread so as to not to block the process of
-   * shutting down a given SolrCore instance.
-   *
-   * @param core SolrCore object that is shutting down / closing
-   */
-  public abstract void preClose(SolrCore core);
-
-  /**
-   * Method called when the given SolrCore object has been shut down and update handlers and searchers are closed
-   * <br>
-   * Use this method for post-close clean up operations e.g. deleting the index from disk.
-   * <br>
-   * <b>The core's passed to the method is already closed and therefore, its update handler or searcher should *NOT* be used</b>
-   *
-   * <b>Important:</b> Keep the method implementation as short as possible. If it were to use any heavy i/o , network connections -
-   * it might be a better idea to launch in a separate Thread so as to not to block the process of
-   * shutting down a given SolrCore instance.
-   */
-  public abstract void postClose(SolrCore core);
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CloudConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CloudConfig.java b/solr/core/src/java/org/apache/solr/core/CloudConfig.java
deleted file mode 100644
index 6248b45..0000000
--- a/solr/core/src/java/org/apache/solr/core/CloudConfig.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.solr.common.SolrException;
-
-public class CloudConfig {
-
-  private final String zkHost;
-
-  private final int zkClientTimeout;
-
-  private final int hostPort;
-
-  private final String hostName;
-
-  private final String hostContext;
-
-  private final boolean useGenericCoreNames;
-
-  private final int leaderVoteWait;
-
-  private final int leaderConflictResolveWait;
-
-  private final int autoReplicaFailoverWaitAfterExpiration;
-
-  private final String zkCredentialsProviderClass;
-
-  private final String zkACLProviderClass;
-  
-  private final int createCollectionWaitTimeTillActive;
-  
-  private final boolean createCollectionCheckLeaderActive;
-
-  CloudConfig(String zkHost, int zkClientTimeout, int hostPort, String hostName, String hostContext, boolean useGenericCoreNames, 
-              int leaderVoteWait, int leaderConflictResolveWait, int autoReplicaFailoverWaitAfterExpiration,
-              String zkCredentialsProviderClass, String zkACLProviderClass, int createCollectionWaitTimeTillActive,
-              boolean createCollectionCheckLeaderActive) {
-    this.zkHost = zkHost;
-    this.zkClientTimeout = zkClientTimeout;
-    this.hostPort = hostPort;
-    this.hostName = hostName;
-    this.hostContext = hostContext;
-    this.useGenericCoreNames = useGenericCoreNames;
-    this.leaderVoteWait = leaderVoteWait;
-    this.leaderConflictResolveWait = leaderConflictResolveWait;
-    this.autoReplicaFailoverWaitAfterExpiration = autoReplicaFailoverWaitAfterExpiration;
-    this.zkCredentialsProviderClass = zkCredentialsProviderClass;
-    this.zkACLProviderClass = zkACLProviderClass;
-    this.createCollectionWaitTimeTillActive = createCollectionWaitTimeTillActive;
-    this.createCollectionCheckLeaderActive = createCollectionCheckLeaderActive;
-
-    if (this.hostPort == -1)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "'hostPort' must be configured to run SolrCloud");
-    if (this.hostContext == null)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "'hostContext' must be configured to run SolrCloud");
-  }
-
-  public String getZkHost() {
-    return zkHost;
-  }
-
-  public int getZkClientTimeout() {
-    return zkClientTimeout;
-  }
-
-  public int getSolrHostPort() {
-    return hostPort;
-  }
-
-  public String getSolrHostContext() {
-    return hostContext;
-  }
-
-  public String getHost() {
-    return hostName;
-  }
-
-  public String getZkCredentialsProviderClass() {
-    return zkCredentialsProviderClass;
-  }
-
-  public String getZkACLProviderClass() {
-    return zkACLProviderClass;
-  }
-
-  public int getLeaderVoteWait() {
-    return leaderVoteWait;
-  }
-
-  public int getLeaderConflictResolveWait() {
-    return leaderConflictResolveWait;
-  }
-
-  public int getAutoReplicaFailoverWaitAfterExpiration() {
-    return autoReplicaFailoverWaitAfterExpiration;
-  }
-
-  public boolean getGenericCoreNodeNames() {
-    return useGenericCoreNames;
-  }
-
-  public int getCreateCollectionWaitTimeTillActive() {
-    return createCollectionWaitTimeTillActive;
-  }
-
-  public boolean isCreateCollectionCheckLeaderActive() {
-    return createCollectionCheckLeaderActive;
-  }
-
-  public static class CloudConfigBuilder {
-
-    private static final int DEFAULT_ZK_CLIENT_TIMEOUT = 15000;
-    private static final int DEFAULT_LEADER_VOTE_WAIT = 180000;  // 3 minutes
-    private static final int DEFAULT_LEADER_CONFLICT_RESOLVE_WAIT = 180000;
-    private static final int DEFAULT_CREATE_COLLECTION_ACTIVE_WAIT = 30;  // 30 seconds
-    private static final boolean DEFAULT_CREATE_COLLECTION_CHECK_LEADER_ACTIVE = false; 
- 
-    private static final int DEFAULT_AUTO_REPLICA_FAILOVER_WAIT_AFTER_EXPIRATION = 120000;
-
-    private String zkHost = System.getProperty("zkHost");
-    private int zkClientTimeout = Integer.getInteger("zkClientTimeout", DEFAULT_ZK_CLIENT_TIMEOUT);
-    private final int hostPort;
-    private final String hostName;
-    private final String hostContext;
-    private boolean useGenericCoreNames;
-    private int leaderVoteWait = DEFAULT_LEADER_VOTE_WAIT;
-    private int leaderConflictResolveWait = DEFAULT_LEADER_CONFLICT_RESOLVE_WAIT;
-    private int autoReplicaFailoverWaitAfterExpiration = DEFAULT_AUTO_REPLICA_FAILOVER_WAIT_AFTER_EXPIRATION;
-    private String zkCredentialsProviderClass;
-    private String zkACLProviderClass;
-    private int createCollectionWaitTimeTillActive = DEFAULT_CREATE_COLLECTION_ACTIVE_WAIT;
-    private boolean createCollectionCheckLeaderActive = DEFAULT_CREATE_COLLECTION_CHECK_LEADER_ACTIVE;
-
-    public CloudConfigBuilder(String hostName, int hostPort) {
-      this(hostName, hostPort, null);
-    }
-
-    public CloudConfigBuilder(String hostName, int hostPort, String hostContext) {
-      this.hostName = hostName;
-      this.hostPort = hostPort;
-      this.hostContext = hostContext;
-    }
-
-    public CloudConfigBuilder setZkHost(String zkHost) {
-      this.zkHost = zkHost;
-      return this;
-    }
-
-    public CloudConfigBuilder setZkClientTimeout(int zkClientTimeout) {
-      this.zkClientTimeout = zkClientTimeout;
-      return this;
-    }
-
-    public CloudConfigBuilder setUseGenericCoreNames(boolean useGenericCoreNames) {
-      this.useGenericCoreNames = useGenericCoreNames;
-      return this;
-    }
-
-    public CloudConfigBuilder setLeaderVoteWait(int leaderVoteWait) {
-      this.leaderVoteWait = leaderVoteWait;
-      return this;
-    }
-
-    public CloudConfigBuilder setLeaderConflictResolveWait(int leaderConflictResolveWait) {
-      this.leaderConflictResolveWait = leaderConflictResolveWait;
-      return this;
-    }
-
-    public CloudConfigBuilder setAutoReplicaFailoverWaitAfterExpiration(int autoReplicaFailoverWaitAfterExpiration) {
-      this.autoReplicaFailoverWaitAfterExpiration = autoReplicaFailoverWaitAfterExpiration;
-      return this;
-    }
-
-    public CloudConfigBuilder setZkCredentialsProviderClass(String zkCredentialsProviderClass) {
-      this.zkCredentialsProviderClass = zkCredentialsProviderClass;
-      return this;
-    }
-
-    public CloudConfigBuilder setZkACLProviderClass(String zkACLProviderClass) {
-      this.zkACLProviderClass = zkACLProviderClass;
-      return this;
-    }
-
-    public CloudConfigBuilder setCreateCollectionWaitTimeTillActive(int createCollectionWaitTimeTillActive) {
-      this.createCollectionWaitTimeTillActive = createCollectionWaitTimeTillActive;
-      return this;
-    }
-    
-    public CloudConfigBuilder setCreateCollectionCheckLeaderActive(boolean createCollectionCheckLeaderActive) {
-      this.createCollectionCheckLeaderActive = createCollectionCheckLeaderActive;
-      return this;
-    }
-    
-    public CloudConfig build() {
-      return new CloudConfig(zkHost, zkClientTimeout, hostPort, hostName, hostContext, useGenericCoreNames, leaderVoteWait, 
-                             leaderConflictResolveWait, autoReplicaFailoverWaitAfterExpiration, zkCredentialsProviderClass, zkACLProviderClass, createCollectionWaitTimeTillActive,
-                             createCollectionCheckLeaderActive);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CodecFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CodecFactory.java b/solr/core/src/java/org/apache/solr/core/CodecFactory.java
deleted file mode 100644
index 36c67eb..0000000
--- a/solr/core/src/java/org/apache/solr/core/CodecFactory.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-
-/**
- * Factory for plugging in a custom {@link Codec}
- */
-public abstract class CodecFactory implements NamedListInitializedPlugin {
-  @Override
-  public void init(NamedList args) {  
-  }
-  
-  public abstract Codec getCodec();
-}


[37/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java
deleted file mode 100644
index 6179bcc..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is responsible for executing cluster operations read from the {@link ActionContext}'s properties
- * with the key name "operations".
- */
-public class ExecutePlanAction extends TriggerActionBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String PREFIX = "op-";
-
-  static final int DEFAULT_TASK_TIMEOUT_SECONDS = 120;
-
-  @Override
-  public void process(TriggerEvent event, ActionContext context) throws Exception {
-    log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
-    SolrCloudManager cloudManager = context.getCloudManager();
-    List<SolrRequest> operations = (List<SolrRequest>) context.getProperty("operations");
-    if (operations == null || operations.isEmpty()) {
-      log.info("No operations to execute for event: {}", event);
-      return;
-    }
-    try {
-      for (SolrRequest operation : operations) {
-        log.debug("Executing operation: {}", operation.getParams());
-        try {
-          SolrResponse response = null;
-          int counter = 0;
-          if (operation instanceof CollectionAdminRequest.AsyncCollectionAdminRequest) {
-            CollectionAdminRequest.AsyncCollectionAdminRequest req = (CollectionAdminRequest.AsyncCollectionAdminRequest) operation;
-            // waitForFinalState so that the end effects of operations are visible
-            req.setWaitForFinalState(true);
-            String asyncId = event.getSource() + '/' + event.getId() + '/' + counter;
-            String znode = saveAsyncId(cloudManager.getDistribStateManager(), event, asyncId);
-            log.trace("Saved requestId: {} in znode: {}", asyncId, znode);
-            // TODO: find a better way of using async calls using dataProvider API !!!
-            req.setAsyncId(asyncId);
-            SolrResponse asyncResponse = cloudManager.request(req);
-            if (asyncResponse.getResponse().get("error") != null) {
-              throw new IOException("" + asyncResponse.getResponse().get("error"));
-            }
-            asyncId = (String)asyncResponse.getResponse().get("requestid");
-            CollectionAdminRequest.RequestStatusResponse statusResponse = waitForTaskToFinish(cloudManager, asyncId,
-                DEFAULT_TASK_TIMEOUT_SECONDS, TimeUnit.SECONDS);
-            if (statusResponse != null) {
-              RequestStatusState state = statusResponse.getRequestStatus();
-              if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED || state == RequestStatusState.NOT_FOUND) {
-                try {
-                  cloudManager.getDistribStateManager().removeData(znode, -1);
-                } catch (Exception e) {
-                  log.warn("Unexpected exception while trying to delete znode: " + znode, e);
-                }
-              }
-              response = statusResponse;
-            }
-          } else {
-            response = cloudManager.request(operation);
-          }
-          NamedList<Object> result = response.getResponse();
-          context.getProperties().compute("responses", (s, o) -> {
-            List<NamedList<Object>> responses = (List<NamedList<Object>>) o;
-            if (responses == null)  responses = new ArrayList<>(operations.size());
-            responses.add(result);
-            return responses;
-          });
-        } catch (IOException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Unexpected exception executing operation: " + operation.getParams(), e);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExecutePlanAction was interrupted", e);
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Unexpected exception executing operation: " + operation.getParams(), e);
-        }
-      }
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Unexpected exception while processing event: " + event, e);
-    }
-  }
-
-
-  static CollectionAdminRequest.RequestStatusResponse waitForTaskToFinish(SolrCloudManager cloudManager, String requestId, long duration, TimeUnit timeUnit) throws IOException, InterruptedException {
-    long timeoutSeconds = timeUnit.toSeconds(duration);
-    RequestStatusState state = RequestStatusState.NOT_FOUND;
-    CollectionAdminRequest.RequestStatusResponse statusResponse = null;
-    for (int i = 0; i < timeoutSeconds; i++) {
-      try {
-        statusResponse = (CollectionAdminRequest.RequestStatusResponse)cloudManager.request(CollectionAdminRequest.requestStatus(requestId));
-        state = statusResponse.getRequestStatus();
-        if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
-          log.trace("Task with requestId={} finished with state={} in {}s", requestId, state, i * 5);
-          cloudManager.request(CollectionAdminRequest.deleteAsyncId(requestId));
-          return statusResponse;
-        } else if (state == RequestStatusState.NOT_FOUND) {
-          // the request for this id was never actually submitted! no harm done, just bail out
-          log.warn("Task with requestId={} was not found on overseer", requestId);
-          cloudManager.request(CollectionAdminRequest.deleteAsyncId(requestId));
-          return statusResponse;
-        }
-      } catch (Exception e) {
-        Throwable rootCause = ExceptionUtils.getRootCause(e);
-        if (rootCause instanceof IllegalStateException && rootCause.getMessage().contains("Connection pool shut down"))  {
-          throw e;
-        }
-        if (rootCause instanceof TimeoutException && rootCause.getMessage().contains("Could not connect to ZooKeeper")) {
-          throw e;
-        }
-        if (rootCause instanceof SolrServerException) {
-          throw e;
-        }
-        log.error("Unexpected Exception while querying status of requestId=" + requestId, e);
-        throw e;
-      }
-      if (i > 0 && i % 5 == 0) {
-        log.trace("Task with requestId={} still not complete after {}s. Last state={}", requestId, i * 5, state);
-      }
-      cloudManager.getTimeSource().sleep(5000);
-    }
-    log.debug("Task with requestId={} did not complete within 5 minutes. Last state={}", requestId, state);
-    return statusResponse;
-  }
-
-  /**
-   * Saves the given asyncId in ZK as a persistent sequential node.
-   *
-   * @return the path of the newly created node in ZooKeeper
-   */
-  private String saveAsyncId(DistribStateManager stateManager, TriggerEvent event, String asyncId) throws InterruptedException, AlreadyExistsException, IOException, KeeperException {
-    String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + event.getSource() + "/" + getName();
-    try {
-      stateManager.makePath(parentPath);
-    } catch (AlreadyExistsException e) {
-      // ignore
-    }
-    return stateManager.createData(parentPath + "/" + PREFIX, Utils.toJSON(Collections.singletonMap("requestid", asyncId)), CreateMode.PERSISTENT_SEQUENTIAL);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
deleted file mode 100644
index b4f9bf0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.StringJoiner;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.PropertiesUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Simple HTTP callback that POSTs event data to a URL.
- * URL, payload and headers may contain property substitution patterns, with the following properties available:
- * <ul>
- *   <li>config.* - listener configuration</li>
- *   <li>event.* - event properties</li>
- *   <li>stage - current stage of event processing</li>
- *   <li>actionName - optional current action name</li>
- *   <li>context.* - optional {@link ActionContext} properties</li>
- *   <li>error - optional error string (from {@link Throwable#toString()})</li>
- *   <li>message - optional message</li>
- * </ul>
- * The following listener configuration is supported:
- * <ul>
- *   <li>url - a URL template</li>
- *   <li>payload - string, optional payload template. If absent a JSON map of all properties listed above will be used.</li>
- *   <li>contentType - string, optional payload content type. If absent then <code>application/json</code> will be used.</li>
- *   <li>header.* - string, optional header template(s). The name of the property without "header." prefix defines the literal header name.</li>
- *   <li>timeout - int, optional connection and socket timeout in milliseconds. Default is 60 seconds.</li>
- *   <li>followRedirects - boolean, optional setting to follow redirects. Default is false.</li>
- * </ul>
- */
-public class HttpTriggerListener extends TriggerListenerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private String urlTemplate;
-  private String payloadTemplate;
-  private String contentType;
-  private Map<String, String> headerTemplates = new HashMap<>();
-  private int timeout = HttpClientUtil.DEFAULT_CONNECT_TIMEOUT;
-  private boolean followRedirects;
-
-  public HttpTriggerListener() {
-    super();
-    TriggerUtils.requiredProperties(requiredProperties, validProperties, "url");
-    TriggerUtils.validProperties(validProperties, "payload", "contentType", "timeout", "followRedirects");
-    validPropertyPrefixes.add("header.");
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
-    super.configure(loader, cloudManager, config);
-    urlTemplate = (String)config.properties.get("url");
-    payloadTemplate = (String)config.properties.get("payload");
-    contentType = (String)config.properties.get("contentType");
-    config.properties.forEach((k, v) -> {
-      if (k.startsWith("header.")) {
-        headerTemplates.put(k.substring(7), String.valueOf(v));
-      }
-    });
-    timeout = PropertiesUtil.toInteger(String.valueOf(config.properties.get("timeout")), HttpClientUtil.DEFAULT_CONNECT_TIMEOUT);
-    followRedirects = PropertiesUtil.toBoolean(String.valueOf(config.properties.get("followRedirects")));
-  }
-
-  @Override
-  public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context, Throwable error, String message) {
-    Properties properties = new Properties();
-    properties.setProperty("stage", stage.toString());
-    // if configuration used "actionName" but we're in a non-action related stage then PropertiesUtil will
-    // throws an exception on missing value - so replace it with an empty string
-    if (actionName == null) {
-      actionName = "";
-    }
-    properties.setProperty("actionName", actionName);
-    if (context != null) {
-      context.getProperties().forEach((k, v) -> {
-        properties.setProperty("context." + k, String.valueOf(v));
-      });
-    }
-    if (error != null) {
-      properties.setProperty("error", error.toString());
-    } else {
-      properties.setProperty("error", "");
-    }
-    if (message != null) {
-      properties.setProperty("message", message);
-    } else {
-      properties.setProperty("message", "");
-    }
-    // add event properties
-    properties.setProperty("event.id", event.getId());
-    properties.setProperty("event.source", event.getSource());
-    properties.setProperty("event.eventTime", String.valueOf(event.eventTime));
-    properties.setProperty("event.eventType", event.getEventType().toString());
-    event.getProperties().forEach((k, v) -> {
-      properties.setProperty("event.properties." + k, String.valueOf(v));
-    });
-    // add config properties
-    properties.setProperty("config.name", config.name);
-    properties.setProperty("config.trigger", config.trigger);
-    properties.setProperty("config.listenerClass", config.listenerClass);
-    properties.setProperty("config.beforeActions", String.join(",", config.beforeActions));
-    properties.setProperty("config.afterActions", String.join(",", config.afterActions));
-    StringJoiner joiner = new StringJoiner(",");
-    config.stages.forEach(s -> joiner.add(s.toString()));
-    properties.setProperty("config.stages", joiner.toString());
-    config.properties.forEach((k, v) -> {
-      properties.setProperty("config.properties." + k, String.valueOf(v));
-    });
-    String url = PropertiesUtil.substituteProperty(urlTemplate, properties);
-    String payload;
-    String type;
-    if (payloadTemplate != null) {
-      payload = PropertiesUtil.substituteProperty(payloadTemplate, properties);
-      if (contentType != null) {
-        type = contentType;
-      } else {
-        type = "application/json";
-      }
-    } else {
-      payload = Utils.toJSONString(properties);
-      type = "application/json";
-    }
-    Map<String, String> headers = new HashMap<>();
-    headerTemplates.forEach((k, v) -> {
-      String headerVal = PropertiesUtil.substituteProperty(v, properties);
-      if (!headerVal.isEmpty()) {
-        headers.put(k, headerVal);
-      }
-    });
-    headers.put("Content-Type", type);
-    try {
-      cloudManager.httpRequest(url, SolrRequest.METHOD.POST, headers, payload, timeout, followRedirects);
-    } catch (IOException e) {
-      log.warn("Exception sending request for event " + event, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
deleted file mode 100644
index 6fca29a..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class checks whether there are shards that have been inactive for a long
- * time (which usually means they are left-overs from shard splitting) and requests their removal
- * after their cleanup TTL period elapsed.
- * <p>Shard delete requests are put into the {@link ActionContext}'s properties
- * with the key name "operations". The value is a List of SolrRequest objects.</p>
- */
-public class InactiveShardPlanAction extends TriggerActionBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String TTL_PROP = "ttl";
-
-  public static final int DEFAULT_TTL_SECONDS = 3600 * 24 * 2;
-
-  private int cleanupTTL;
-
-  public InactiveShardPlanAction() {
-    super();
-    TriggerUtils.validProperties(validProperties, TTL_PROP);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    String cleanupStr = String.valueOf(properties.getOrDefault(TTL_PROP, String.valueOf(DEFAULT_TTL_SECONDS)));
-    try {
-      cleanupTTL = Integer.parseInt(cleanupStr);
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), TTL_PROP, "invalid value '" + cleanupStr + "': " + e.toString());
-    }
-    if (cleanupTTL < 0) {
-      throw new TriggerValidationException(getName(), TTL_PROP, "invalid value '" + cleanupStr + "', should be > 0. ");
-    }
-  }
-
-  @Override
-  public void process(TriggerEvent event, ActionContext context) throws Exception {
-    SolrCloudManager cloudManager = context.getCloudManager();
-    ClusterState state = cloudManager.getClusterStateProvider().getClusterState();
-    Map<String, List<String>> cleanup = new LinkedHashMap<>();
-    Map<String, List<String>> inactive = new LinkedHashMap<>();
-    Map<String, Map<String, Object>> staleLocks = new LinkedHashMap<>();
-    state.forEachCollection(coll ->
-      coll.getSlices().forEach(s -> {
-        if (Slice.State.INACTIVE.equals(s.getState())) {
-          inactive.computeIfAbsent(coll.getName(), c -> new ArrayList<>()).add(s.getName());
-          String tstampStr = s.getStr(ZkStateReader.STATE_TIMESTAMP_PROP);
-          if (tstampStr == null || tstampStr.isEmpty()) {
-            return;
-          }
-          long timestamp = Long.parseLong(tstampStr);
-          // this timestamp uses epoch time
-          long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
-          long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
-          log.debug("{}/{}: tstamp={}, time={}, delta={}", coll.getName(), s.getName(), timestamp, currentTime, delta);
-          if (delta > cleanupTTL) {
-            log.debug("-- delete inactive {} / {}", coll.getName(), s.getName());
-            List<SolrRequest> operations = (List<SolrRequest>)context.getProperties().computeIfAbsent("operations", k -> new ArrayList<>());
-            operations.add(CollectionAdminRequest.deleteShard(coll.getName(), s.getName()));
-            cleanup.computeIfAbsent(coll.getName(), c -> new ArrayList<>()).add(s.getName());
-          }
-        }
-        // check for stale shard split locks
-        String parentPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll.getName();
-        List<String> locks;
-        try {
-          locks = cloudManager.getDistribStateManager().listData(parentPath).stream()
-              .filter(name -> name.endsWith("-splitting"))
-              .collect(Collectors.toList());
-          for (String lock : locks) {
-            try {
-              String lockPath = parentPath + "/" + lock;
-              Map<String, Object> lockData = Utils.getJson(cloudManager.getDistribStateManager(), lockPath);
-              String tstampStr = (String)lockData.get(ZkStateReader.STATE_TIMESTAMP_PROP);
-              if (tstampStr == null || tstampStr.isEmpty()) {
-                return;
-              }
-              long timestamp = Long.parseLong(tstampStr);
-              // this timestamp uses epoch time
-              long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
-              long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
-              log.debug("{}/{}: locktstamp={}, time={}, delta={}", coll.getName(), lock, timestamp, currentTime, delta);
-              if (delta > cleanupTTL) {
-                log.debug("-- delete inactive split lock for {}/{}, delta={}", coll.getName(), lock, delta);
-                cloudManager.getDistribStateManager().removeData(lockPath, -1);
-                lockData.put("currentTimeNs", currentTime);
-                lockData.put("deltaSec", delta);
-                lockData.put("ttlSec", cleanupTTL);
-                staleLocks.put(coll.getName() + "/" + lock, lockData);
-              } else {
-                log.debug("-- lock " + coll.getName() + "/" + lock + " still active (delta=" + delta + ")");
-              }
-            } catch (NoSuchElementException nse) {
-              // already removed by someone else - ignore
-            }
-          }
-        } catch (Exception e) {
-          log.warn("Exception checking for inactive shard split locks in " + parentPath, e);
-        }
-      })
-    );
-    Map<String, Object> results = new LinkedHashMap<>();
-    if (!cleanup.isEmpty()) {
-      results.put("inactive", inactive);
-      results.put("cleanup", cleanup);
-    }
-    if (!staleLocks.isEmpty()) {
-      results.put("staleLocks", staleLocks);
-    }
-    if (!results.isEmpty()) {
-      context.getProperties().put(getName(), results);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
deleted file mode 100644
index 25083ae..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.metrics.SolrCoreMetricManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *
- */
-public class IndexSizeTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String ABOVE_BYTES_PROP = "aboveBytes";
-  public static final String ABOVE_DOCS_PROP = "aboveDocs";
-  public static final String ABOVE_OP_PROP = "aboveOp";
-  public static final String BELOW_BYTES_PROP = "belowBytes";
-  public static final String BELOW_DOCS_PROP = "belowDocs";
-  public static final String BELOW_OP_PROP = "belowOp";
-  public static final String COLLECTIONS_PROP = "collections";
-  public static final String MAX_OPS_PROP = "maxOps";
-
-  public static final String BYTES_SIZE_PROP = "__bytes__";
-  public static final String DOCS_SIZE_PROP = "__docs__";
-  public static final String ABOVE_SIZE_PROP = "aboveSize";
-  public static final String BELOW_SIZE_PROP = "belowSize";
-  public static final String VIOLATION_PROP = "violationType";
-
-  public static final int DEFAULT_MAX_OPS = 10;
-
-  public enum Unit { bytes, docs }
-
-  private long aboveBytes, aboveDocs, belowBytes, belowDocs;
-  private int maxOps;
-  private CollectionParams.CollectionAction aboveOp, belowOp;
-  private final Set<String> collections = new HashSet<>();
-  private final Map<String, Long> lastAboveEventMap = new ConcurrentHashMap<>();
-  private final Map<String, Long> lastBelowEventMap = new ConcurrentHashMap<>();
-
-  public IndexSizeTrigger(String name) {
-    super(TriggerEventType.INDEXSIZE, name);
-    TriggerUtils.validProperties(validProperties,
-        ABOVE_BYTES_PROP, ABOVE_DOCS_PROP, BELOW_BYTES_PROP, BELOW_DOCS_PROP,
-        COLLECTIONS_PROP, MAX_OPS_PROP);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    String aboveStr = String.valueOf(properties.getOrDefault(ABOVE_BYTES_PROP, Long.MAX_VALUE));
-    String belowStr = String.valueOf(properties.getOrDefault(BELOW_BYTES_PROP, -1));
-    try {
-      aboveBytes = Long.parseLong(aboveStr);
-      if (aboveBytes <= 0) {
-        throw new Exception("value must be > 0");
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), ABOVE_BYTES_PROP, "invalid value '" + aboveStr + "': " + e.toString());
-    }
-    try {
-      belowBytes = Long.parseLong(belowStr);
-      if (belowBytes < 0) {
-        belowBytes = -1;
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), BELOW_BYTES_PROP, "invalid value '" + belowStr + "': " + e.toString());
-    }
-    // below must be at least 2x smaller than above, otherwise splitting a shard
-    // would immediately put the shard below the threshold and cause the mergeshards action
-    if (belowBytes > 0 && (belowBytes * 2 > aboveBytes)) {
-      throw new TriggerValidationException(getName(), BELOW_BYTES_PROP,
-          "invalid value " + belowBytes + ", should be less than half of '" + ABOVE_BYTES_PROP + "' value, which is " + aboveBytes);
-    }
-    // do the same for docs bounds
-    aboveStr = String.valueOf(properties.getOrDefault(ABOVE_DOCS_PROP, Long.MAX_VALUE));
-    belowStr = String.valueOf(properties.getOrDefault(BELOW_DOCS_PROP, -1));
-    try {
-      aboveDocs = Long.parseLong(aboveStr);
-      if (aboveDocs <= 0) {
-        throw new Exception("value must be > 0");
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), ABOVE_DOCS_PROP, "invalid value '" + aboveStr + "': " + e.toString());
-    }
-    try {
-      belowDocs = Long.parseLong(belowStr);
-      if (belowDocs < 0) {
-        belowDocs = -1;
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), BELOW_DOCS_PROP, "invalid value '" + belowStr + "': " + e.toString());
-    }
-    // below must be at least 2x smaller than above, otherwise splitting a shard
-    // would immediately put the shard below the threshold and cause the mergeshards action
-    if (belowDocs > 0 && (belowDocs * 2 > aboveDocs)) {
-      throw new TriggerValidationException(getName(), BELOW_DOCS_PROP,
-          "invalid value " + belowDocs + ", should be less than half of '" + ABOVE_DOCS_PROP + "' value, which is " + aboveDocs);
-    }
-
-    String collectionsString = (String) properties.get(COLLECTIONS_PROP);
-    if (collectionsString != null && !collectionsString.isEmpty()) {
-      collections.addAll(StrUtils.splitSmart(collectionsString, ','));
-    }
-    String aboveOpStr = String.valueOf(properties.getOrDefault(ABOVE_OP_PROP, CollectionParams.CollectionAction.SPLITSHARD.toLower()));
-    // TODO: this is a placeholder until SOLR-9407 is implemented
-    String belowOpStr = String.valueOf(properties.getOrDefault(BELOW_OP_PROP, CollectionParams.CollectionAction.MERGESHARDS.toLower()));
-    aboveOp = CollectionParams.CollectionAction.get(aboveOpStr);
-    if (aboveOp == null) {
-      throw new TriggerValidationException(getName(), ABOVE_OP_PROP, "unrecognized value of: '" + aboveOpStr + "'");
-    }
-    belowOp = CollectionParams.CollectionAction.get(belowOpStr);
-    if (belowOp == null) {
-      throw new TriggerValidationException(getName(), BELOW_OP_PROP, "unrecognized value of: '" + belowOpStr + "'");
-    }
-    String maxOpsStr = String.valueOf(properties.getOrDefault(MAX_OPS_PROP, DEFAULT_MAX_OPS));
-    try {
-      maxOps = Integer.parseInt(maxOpsStr);
-      if (maxOps < 1) {
-        throw new Exception("must be > 1");
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), MAX_OPS_PROP, "invalid value: '" + maxOpsStr + "': " + e.getMessage());
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    Map<String, Object> state = new HashMap<>();
-    state.put("lastAboveEventMap", lastAboveEventMap);
-    state.put("lastBelowEventMap", lastBelowEventMap);
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    this.lastAboveEventMap.clear();
-    this.lastBelowEventMap.clear();
-    Map<String, Long> replicaVsTime = (Map<String, Long>)state.get("lastAboveEventMap");
-    if (replicaVsTime != null) {
-      this.lastAboveEventMap.putAll(replicaVsTime);
-    }
-    replicaVsTime = (Map<String, Long>)state.get("lastBelowEventMap");
-    if (replicaVsTime != null) {
-      this.lastBelowEventMap.putAll(replicaVsTime);
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof IndexSizeTrigger) {
-      IndexSizeTrigger that = (IndexSizeTrigger)old;
-      assert this.name.equals(that.name);
-      this.lastAboveEventMap.clear();
-      this.lastBelowEventMap.clear();
-      this.lastAboveEventMap.putAll(that.lastAboveEventMap);
-      this.lastBelowEventMap.putAll(that.lastBelowEventMap);
-    } else {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  public void run() {
-    synchronized(this) {
-      if (isClosed) {
-        log.warn(getName() + " ran but was already closed");
-        return;
-      }
-    }
-    AutoScaling.TriggerEventProcessor processor = processorRef.get();
-    if (processor == null) {
-      return;
-    }
-
-    // replica name / info + size, retrieved from leaders only
-    Map<String, ReplicaInfo> currentSizes = new HashMap<>();
-
-    try {
-      ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
-      for (String node : clusterState.getLiveNodes()) {
-        Map<String, ReplicaInfo> metricTags = new HashMap<>();
-        // coll, shard, replica
-        Map<String, Map<String, List<ReplicaInfo>>> infos = cloudManager.getNodeStateProvider().getReplicaInfo(node, Collections.emptyList());
-        infos.forEach((coll, shards) -> {
-          if (!collections.isEmpty() && !collections.contains(coll)) {
-            return;
-          }
-          DocCollection docCollection = clusterState.getCollection(coll);
-
-          shards.forEach((sh, replicas) -> {
-            // check only the leader of a replica in active shard
-            Slice s = docCollection.getSlice(sh);
-            if (s.getState() != Slice.State.ACTIVE) {
-              return;
-            }
-            Replica r = s.getLeader();
-            // no leader - don't do anything
-            if (r == null) {
-              return;
-            }
-            // find ReplicaInfo
-            ReplicaInfo info = null;
-            for (ReplicaInfo ri : replicas) {
-              if (r.getCoreName().equals(ri.getCore())) {
-                info = ri;
-                break;
-              }
-            }
-            if (info == null) {
-              // probably replica is not on this node?
-              return;
-            }
-            // we have to translate to the metrics registry name, which uses "_replica_nN" as suffix
-            String replicaName = Utils.parseMetricsReplicaName(coll, info.getCore());
-            if (replicaName == null) { // should never happen???
-              replicaName = info.getName(); // which is actually coreNode name...
-            }
-            String registry = SolrCoreMetricManager.createRegistryName(true, coll, sh, replicaName, null);
-            String tag = "metrics:" + registry + ":INDEX.sizeInBytes";
-            metricTags.put(tag, info);
-            tag = "metrics:" + registry + ":SEARCHER.searcher.numDocs";
-            metricTags.put(tag, info);
-          });
-        });
-        if (metricTags.isEmpty()) {
-          continue;
-        }
-        Map<String, Object> sizes = cloudManager.getNodeStateProvider().getNodeValues(node, metricTags.keySet());
-        sizes.forEach((tag, size) -> {
-          final ReplicaInfo info = metricTags.get(tag);
-          if (info == null) {
-            log.warn("Missing replica info for response tag " + tag);
-          } else {
-            // verify that it's a Number
-            if (!(size instanceof Number)) {
-              log.warn("invalid size value - not a number: '" + size + "' is " + size.getClass().getName());
-              return;
-            }
-
-            ReplicaInfo currentInfo = currentSizes.computeIfAbsent(info.getCore(), k -> (ReplicaInfo)info.clone());
-            if (tag.contains("INDEX")) {
-              currentInfo.getVariables().put(BYTES_SIZE_PROP, ((Number) size).longValue());
-            } else {
-              currentInfo.getVariables().put(DOCS_SIZE_PROP, ((Number) size).longValue());
-            }
-          }
-        });
-      }
-    } catch (IOException e) {
-      log.warn("Error running trigger " + getName(), e);
-      return;
-    }
-
-    long now = cloudManager.getTimeSource().getTimeNs();
-
-    // now check thresholds
-
-    // collection / list(info)
-    Map<String, List<ReplicaInfo>> aboveSize = new HashMap<>();
-
-    Set<String> splittable = new HashSet<>();
-
-    currentSizes.forEach((coreName, info) -> {
-      if ((Long)info.getVariable(BYTES_SIZE_PROP) > aboveBytes ||
-          (Long)info.getVariable(DOCS_SIZE_PROP) > aboveDocs) {
-        if (waitForElapsed(coreName, now, lastAboveEventMap)) {
-          List<ReplicaInfo> infos = aboveSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
-          if (!infos.contains(info)) {
-            if ((Long)info.getVariable(BYTES_SIZE_PROP) > aboveBytes) {
-              info.getVariables().put(VIOLATION_PROP, ABOVE_BYTES_PROP);
-            } else {
-              info.getVariables().put(VIOLATION_PROP, ABOVE_DOCS_PROP);
-            }
-            infos.add(info);
-            splittable.add(info.getName());
-          }
-        }
-      } else {
-        // no violation - clear waitForElapsed
-        lastAboveEventMap.remove(coreName);
-      }
-    });
-
-    // collection / list(info)
-    Map<String, List<ReplicaInfo>> belowSize = new HashMap<>();
-
-    currentSizes.forEach((coreName, info) -> {
-      if (((Long)info.getVariable(BYTES_SIZE_PROP) < belowBytes ||
-          (Long)info.getVariable(DOCS_SIZE_PROP) < belowDocs) &&
-          // make sure we don't produce conflicting ops
-          !splittable.contains(info.getName())) {
-        if (waitForElapsed(coreName, now, lastBelowEventMap)) {
-          List<ReplicaInfo> infos = belowSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
-          if (!infos.contains(info)) {
-            if ((Long)info.getVariable(BYTES_SIZE_PROP) < belowBytes) {
-              info.getVariables().put(VIOLATION_PROP, BELOW_BYTES_PROP);
-            } else {
-              info.getVariables().put(VIOLATION_PROP, BELOW_DOCS_PROP);
-            }
-            infos.add(info);
-          }
-        }
-      } else {
-        // no violation - clear waitForElapsed
-        lastBelowEventMap.remove(coreName);
-      }
-    });
-
-    if (aboveSize.isEmpty() && belowSize.isEmpty()) {
-      log.trace("NO VIOLATIONS: Now={}", now);
-      log.trace("lastAbove={}", lastAboveEventMap);
-      log.trace("lastBelow={}", lastBelowEventMap);
-      return;
-    }
-
-    // find the earliest time when a condition was exceeded
-    final AtomicLong eventTime = new AtomicLong(now);
-
-    // calculate ops
-    final List<TriggerEvent.Op> ops = new ArrayList<>();
-    aboveSize.forEach((coll, replicas) -> {
-      // sort by decreasing size to first split the largest ones
-      // XXX see the comment below about using DOCS_SIZE_PROP in lieu of BYTES_SIZE_PROP
-      replicas.sort((r1, r2) -> {
-        long delta = (Long) r1.getVariable(DOCS_SIZE_PROP) - (Long) r2.getVariable(DOCS_SIZE_PROP);
-        if (delta > 0) {
-          return -1;
-        } else if (delta < 0) {
-          return 1;
-        } else {
-          return 0;
-        }
-      });
-      replicas.forEach(r -> {
-        if (ops.size() >= maxOps) {
-          return;
-        }
-        TriggerEvent.Op op = new TriggerEvent.Op(aboveOp);
-        op.addHint(Suggester.Hint.COLL_SHARD, new Pair<>(coll, r.getShard()));
-        ops.add(op);
-        Long time = lastAboveEventMap.get(r.getCore());
-        if (time != null && eventTime.get() > time) {
-          eventTime.set(time);
-        }
-      });
-    });
-    belowSize.forEach((coll, replicas) -> {
-      if (replicas.size() < 2) {
-        return;
-      }
-      if (ops.size() >= maxOps) {
-        return;
-      }
-      // sort by increasing size
-      replicas.sort((r1, r2) -> {
-        // XXX this is not quite correct - if BYTES_SIZE_PROP decided that replica got here
-        // then we should be sorting by BYTES_SIZE_PROP. However, since DOCS and BYTES are
-        // loosely correlated it's simpler to sort just by docs (which better reflects the "too small"
-        // condition than index size, due to possibly existing deleted docs that still occupy space)
-        long delta = (Long) r1.getVariable(DOCS_SIZE_PROP) - (Long) r2.getVariable(DOCS_SIZE_PROP);
-        if (delta > 0) {
-          return 1;
-        } else if (delta < 0) {
-          return -1;
-        } else {
-          return 0;
-        }
-      });
-
-      // TODO: MERGESHARDS is not implemented yet. For now take the top two smallest shards
-      // TODO: but in the future we probably need to get ones with adjacent ranges.
-
-      // TODO: generate as many MERGESHARDS as needed to consume all belowSize shards
-      TriggerEvent.Op op = new TriggerEvent.Op(belowOp);
-      op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(0).getShard()));
-      op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(1).getShard()));
-      ops.add(op);
-      Long time = lastBelowEventMap.get(replicas.get(0).getCore());
-      if (time != null && eventTime.get() > time) {
-        eventTime.set(time);
-      }
-      time = lastBelowEventMap.get(replicas.get(1).getCore());
-      if (time != null && eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-
-    if (ops.isEmpty()) {
-      return;
-    }
-    if (processor.process(new IndexSizeEvent(getName(), eventTime.get(), ops, aboveSize, belowSize))) {
-      // update last event times
-      aboveSize.forEach((coll, replicas) -> {
-        replicas.forEach(r -> lastAboveEventMap.put(r.getCore(), now));
-      });
-      belowSize.forEach((coll, replicas) -> {
-        if (replicas.size() < 2) {
-          return;
-        }
-        lastBelowEventMap.put(replicas.get(0).getCore(), now);
-        lastBelowEventMap.put(replicas.get(1).getCore(), now);
-      });
-    }
-  }
-
-  private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
-    Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
-    long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
-    log.trace("name={}, lastTime={}, elapsed={}", name, lastTime, elapsed);
-    if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
-      return false;
-    }
-    return true;
-  }
-
-  public static class IndexSizeEvent extends TriggerEvent {
-    public IndexSizeEvent(String source, long eventTime, List<Op> ops, Map<String, List<ReplicaInfo>> aboveSize,
-                          Map<String, List<ReplicaInfo>> belowSize) {
-      super(TriggerEventType.INDEXSIZE, source, eventTime, null);
-      properties.put(TriggerEvent.REQUESTED_OPS, ops);
-      // avoid passing very large amounts of data here - just use replica names
-      TreeMap<String, String> above = new TreeMap<>();
-      aboveSize.forEach((coll, replicas) ->
-          replicas.forEach(r -> above.put(r.getCore(), "docs=" + r.getVariable(DOCS_SIZE_PROP) + ", bytes=" + r.getVariable(BYTES_SIZE_PROP))));
-      properties.put(ABOVE_SIZE_PROP, above);
-      TreeMap<String, String> below = new TreeMap<>();
-      belowSize.forEach((coll, replicas) ->
-          replicas.forEach(r -> below.put(r.getCore(), "docs=" + r.getVariable(DOCS_SIZE_PROP) + ", bytes=" + r.getVariable(BYTES_SIZE_PROP))));
-      properties.put(BELOW_SIZE_PROP, below);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
deleted file mode 100644
index a7dcf63..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of {@link TriggerListener} that reports
- * events to a log.
- */
-public class LoggingListener extends TriggerListenerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context,
-                      Throwable error, String message) {
-    log.info("{}: stage={}, actionName={}, event={}, error={}, messsage={}", config.name, stage, actionName, event, error, message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
deleted file mode 100644
index 9058a9a..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.ABOVE;
-import static org.apache.solr.common.params.AutoScalingParams.BELOW;
-import static org.apache.solr.common.params.AutoScalingParams.METRIC;
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-public class MetricTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private String metric;
-  private Number above, below;
-  private String collection, shard, node, preferredOp;
-
-  private final Map<String, Long> lastNodeEvent = new ConcurrentHashMap<>();
-
-  public MetricTrigger(String name) {
-    super(TriggerEventType.METRIC, name);
-    TriggerUtils.requiredProperties(requiredProperties, validProperties, METRIC);
-    TriggerUtils.validProperties(validProperties, ABOVE, BELOW, PREFERRED_OP,
-        AutoScalingParams.COLLECTION,
-        AutoScalingParams.SHARD,
-        AutoScalingParams.NODE);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    this.metric = (String) properties.get(METRIC);
-    this.above = (Number) properties.get(ABOVE);
-    this.below = (Number) properties.get(BELOW);
-    this.collection = (String) properties.getOrDefault(AutoScalingParams.COLLECTION, Policy.ANY);
-    shard = (String) properties.getOrDefault(AutoScalingParams.SHARD, Policy.ANY);
-    if (collection.equals(Policy.ANY) && !shard.equals(Policy.ANY)) {
-      throw new TriggerValidationException("shard", "When 'shard' is other than #ANY then collection name must be also other than #ANY");
-    }
-    node = (String) properties.getOrDefault(AutoScalingParams.NODE, Policy.ANY);
-    preferredOp = (String) properties.getOrDefault(PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    return null;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    lastNodeEvent.clear();
-    Map<String, Long> nodeTimes = (Map<String, Long>) state.get("lastNodeEvent");
-    if (nodeTimes != null) {
-      lastNodeEvent.putAll(nodeTimes);
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof MetricTrigger) {
-      MetricTrigger that = (MetricTrigger) old;
-      assert this.name.equals(that.name);
-      this.lastNodeEvent.clear();
-      this.lastNodeEvent.putAll(that.lastNodeEvent);
-    } else {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  public void run() {
-    AutoScaling.TriggerEventProcessor processor = processorRef.get();
-    if (processor == null) {
-      return;
-    }
-
-    Set<String> liveNodes = null;
-    if (node.equals(Policy.ANY)) {
-      if (collection.equals(Policy.ANY)) {
-        liveNodes = cloudManager.getClusterStateProvider().getLiveNodes();
-      } else {
-        final Set<String> nodes = new HashSet<>();
-        ClusterState.CollectionRef ref = cloudManager.getClusterStateProvider().getState(collection);
-        DocCollection docCollection;
-        if (ref == null || (docCollection = ref.get()) == null) {
-          log.warn("MetricTrigger could not find collection: {}", collection);
-          return;
-        }
-        if (shard.equals(Policy.ANY)) {
-          docCollection.getReplicas().forEach(replica -> {
-            nodes.add(replica.getNodeName());
-          });
-        } else {
-          Slice slice = docCollection.getSlice(shard);
-          if (slice == null) {
-            log.warn("MetricTrigger could not find collection: {} shard: {}", collection, shard);
-            return;
-          }
-          slice.getReplicas().forEach(replica -> nodes.add(replica.getNodeName()));
-        }
-        liveNodes = nodes;
-      }
-    } else {
-      liveNodes = Collections.singleton(node);
-    }
-
-    Map<String, Number> rates = new HashMap<>(liveNodes.size());
-    for (String node : liveNodes) {
-      Map<String, Object> values = cloudManager.getNodeStateProvider().getNodeValues(node, Collections.singletonList(metric));
-      values.forEach((tag, rate) -> rates.computeIfAbsent(node, s -> (Number) rate));
-    }
-
-    long now = cloudManager.getTimeSource().getTimeNs();
-    // check for exceeded rates and filter out those with less than waitFor from previous events
-    Map<String, Number> hotNodes = rates.entrySet().stream()
-        .filter(entry -> waitForElapsed(entry.getKey(), now, lastNodeEvent))
-        .filter(entry -> (below != null && Double.compare(entry.getValue().doubleValue(), below.doubleValue()) < 0) || (above != null && Double.compare(entry.getValue().doubleValue(), above.doubleValue()) > 0))
-        .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
-
-    if (hotNodes.isEmpty()) return;
-
-    final AtomicLong eventTime = new AtomicLong(now);
-    hotNodes.forEach((n, r) -> {
-      long time = lastNodeEvent.get(n);
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-
-    if (processor.process(new MetricBreachedEvent(getName(), collection, shard, preferredOp, eventTime.get(), metric, hotNodes))) {
-      hotNodes.keySet().forEach(node -> lastNodeEvent.put(node, now));
-    }
-  }
-
-  private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
-    Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
-    long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
-    log.trace("name={}, lastTime={}, elapsed={}", name, lastTime, elapsed);
-    if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
-      return false;
-    }
-    return true;
-  }
-
-  public static class MetricBreachedEvent extends TriggerEvent {
-    public MetricBreachedEvent(String source, String collection, String shard, String preferredOp, long eventTime, String metric, Map<String, Number> hotNodes) {
-      super(TriggerEventType.METRIC, source, eventTime, null);
-      properties.put(METRIC, metric);
-      properties.put(AutoScalingParams.NODE, hotNodes);
-      if (!collection.equals(Policy.ANY)) {
-        properties.put(AutoScalingParams.COLLECTION, collection);
-      }
-      if (!shard.equals(Policy.ANY))  {
-        properties.put(AutoScalingParams.SHARD, shard);
-      }
-      properties.put(PREFERRED_OP, preferredOp);
-
-      // specify requested ops
-      List<Op> ops = new ArrayList<>(hotNodes.size());
-      for (String n : hotNodes.keySet()) {
-        Op op = new Op(CollectionParams.CollectionAction.get(preferredOp));
-        op.addHint(Suggester.Hint.SRC_NODE, n);
-        if (!collection.equals(Policy.ANY)) {
-          if (!shard.equals(Policy.ANY)) {
-            op.addHint(Suggester.Hint.COLL_SHARD, new Pair<>(collection, shard));
-          } else {
-            op.addHint(Suggester.Hint.COLL, collection);
-          }
-        }
-        ops.add(op);
-      }
-      properties.put(TriggerEvent.REQUESTED_OPS, ops);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java
deleted file mode 100644
index 6202944..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-/**
- * Trigger for the {@link TriggerEventType#NODEADDED} event
- */
-public class NodeAddedTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private Set<String> lastLiveNodes = new HashSet<>();
-
-  private Map<String, Long> nodeNameVsTimeAdded = new HashMap<>();
-
-  private String preferredOp;
-
-  public NodeAddedTrigger(String name) {
-    super(TriggerEventType.NODEADDED, name);
-    TriggerUtils.validProperties(validProperties, PREFERRED_OP);
-  }
-
-  @Override
-  public void init() throws Exception {
-    super.init();
-    lastLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-    log.debug("NodeAddedTrigger {} - Initial livenodes: {}", name, lastLiveNodes);
-    log.debug("NodeAddedTrigger {} instantiated with properties: {}", name, properties);
-    // pick up added nodes for which marker paths were created
-    try {
-      List<String> added = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
-      added.forEach(n -> {
-        // don't add nodes that have since gone away
-        if (lastLiveNodes.contains(n)) {
-          log.debug("Adding node from marker path: {}", n);
-          nodeNameVsTimeAdded.put(n, cloudManager.getTimeSource().getTimeNs());
-        }
-        removeMarker(n);
-      });
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Exception retrieving nodeLost markers", e);
-    }
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    preferredOp = (String) properties.getOrDefault(PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-    preferredOp = preferredOp.toLowerCase(Locale.ROOT);
-    // verify
-    CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
-    switch (action) {
-      case ADDREPLICA:
-      case MOVEREPLICA:
-      case NONE:
-        break;
-      default:
-        throw new TriggerValidationException("Unsupported preferredOperation=" + preferredOp + " specified for node added trigger");
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof NodeAddedTrigger) {
-      NodeAddedTrigger that = (NodeAddedTrigger) old;
-      assert this.name.equals(that.name);
-      this.lastLiveNodes.clear();
-      this.lastLiveNodes.addAll(that.lastLiveNodes);
-      this.nodeNameVsTimeAdded.clear();
-      this.nodeNameVsTimeAdded.putAll(that.nodeNameVsTimeAdded);
-    } else  {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    Map<String,Object> state = new HashMap<>();
-    state.put("lastLiveNodes", lastLiveNodes);
-    state.put("nodeNameVsTimeAdded", nodeNameVsTimeAdded);
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    this.lastLiveNodes.clear();
-    this.nodeNameVsTimeAdded.clear();
-    Collection<String> lastLiveNodes = (Collection<String>)state.get("lastLiveNodes");
-    if (lastLiveNodes != null) {
-      this.lastLiveNodes.addAll(lastLiveNodes);
-    }
-    Map<String,Long> nodeNameVsTimeAdded = (Map<String,Long>)state.get("nodeNameVsTimeAdded");
-    if (nodeNameVsTimeAdded != null) {
-      this.nodeNameVsTimeAdded.putAll(nodeNameVsTimeAdded);
-    }
-  }
-
-  @Override
-  public void run() {
-    try {
-      synchronized (this) {
-        if (isClosed) {
-          log.warn("NodeAddedTrigger ran but was already closed");
-          throw new RuntimeException("Trigger has been closed");
-        }
-      }
-      log.debug("Running NodeAddedTrigger {}", name);
-
-      Set<String> newLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-      log.debug("Found livenodes: {}", newLiveNodes.size());
-
-      // have any nodes that we were tracking been removed from the cluster?
-      // if so, remove them from the tracking map
-      Set<String> trackingKeySet = nodeNameVsTimeAdded.keySet();
-      trackingKeySet.retainAll(newLiveNodes);
-
-      // have any new nodes been added?
-      Set<String> copyOfNew = new HashSet<>(newLiveNodes);
-      copyOfNew.removeAll(lastLiveNodes);
-      copyOfNew.forEach(n -> {
-        long eventTime = cloudManager.getTimeSource().getTimeNs();
-        log.debug("Tracking new node: {} at time {}", n, eventTime);
-        nodeNameVsTimeAdded.put(n, eventTime);
-      });
-
-      // has enough time expired to trigger events for a node?
-      List<String> nodeNames = new ArrayList<>();
-      List<Long> times = new ArrayList<>();
-      for (Iterator<Map.Entry<String, Long>> it = nodeNameVsTimeAdded.entrySet().iterator(); it.hasNext(); ) {
-        Map.Entry<String, Long> entry = it.next();
-        String nodeName = entry.getKey();
-        Long timeAdded = entry.getValue();
-        long now = cloudManager.getTimeSource().getTimeNs();
-        if (TimeUnit.SECONDS.convert(now - timeAdded, TimeUnit.NANOSECONDS) >= getWaitForSecond()) {
-          nodeNames.add(nodeName);
-          times.add(timeAdded);
-        }
-      }
-      AutoScaling.TriggerEventProcessor processor = processorRef.get();
-      if (!nodeNames.isEmpty()) {
-        if (processor != null) {
-          log.debug("NodeAddedTrigger {} firing registered processor for nodes: {} added at times {}, now={}", name,
-              nodeNames, times, cloudManager.getTimeSource().getTimeNs());
-          if (processor.process(new NodeAddedEvent(getEventType(), getName(), times, nodeNames, preferredOp))) {
-            // remove from tracking set only if the fire was accepted
-            nodeNames.forEach(n -> {
-              nodeNameVsTimeAdded.remove(n);
-              removeMarker(n);
-            });
-          }
-        } else  {
-          nodeNames.forEach(n -> {
-            nodeNameVsTimeAdded.remove(n);
-            removeMarker(n);
-          });
-        }
-      }
-      lastLiveNodes = new HashSet<>(newLiveNodes);
-    } catch (RuntimeException e) {
-      log.error("Unexpected exception in NodeAddedTrigger", e);
-    }
-  }
-
-  private void removeMarker(String nodeName) {
-    String path = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
-    try {
-      log.debug("NodeAddedTrigger {} - removing marker path: {}", name, path);
-      if (stateManager.hasData(path)) {
-        stateManager.removeData(path, -1);
-      }
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.debug("Exception removing nodeAdded marker " + nodeName, e);
-    }
-
-  }
-
-  public static class NodeAddedEvent extends TriggerEvent {
-
-    public NodeAddedEvent(TriggerEventType eventType, String source, List<Long> times, List<String> nodeNames, String preferredOp) {
-      // use the oldest time as the time of the event
-      super(eventType, source, times.get(0), null);
-      properties.put(NODE_NAMES, nodeNames);
-      properties.put(EVENT_TIMES, times);
-      properties.put(PREFERRED_OP, preferredOp);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
deleted file mode 100644
index ddb4913..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-/**
- * Trigger for the {@link TriggerEventType#NODELOST} event
- */
-public class NodeLostTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private Set<String> lastLiveNodes = new HashSet<>();
-
-  private Map<String, Long> nodeNameVsTimeRemoved = new HashMap<>();
-
-  private String preferredOp;
-
-  public NodeLostTrigger(String name) {
-    super(TriggerEventType.NODELOST, name);
-    TriggerUtils.validProperties(validProperties, PREFERRED_OP);
-  }
-
-  @Override
-  public void init() throws Exception {
-    super.init();
-    lastLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-    log.debug("NodeLostTrigger {} - Initial livenodes: {}", name, lastLiveNodes);
-    // pick up lost nodes for which marker paths were created
-    try {
-      List<String> lost = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
-      lost.forEach(n -> {
-        // don't add nodes that have since came back
-        if (!lastLiveNodes.contains(n)) {
-          log.debug("Adding lost node from marker path: {}", n);
-          nodeNameVsTimeRemoved.put(n, cloudManager.getTimeSource().getTimeNs());
-        }
-        removeMarker(n);
-      });
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Exception retrieving nodeLost markers", e);
-    }
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    preferredOp = (String) properties.getOrDefault(PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-    preferredOp = preferredOp.toLowerCase(Locale.ROOT);
-    // verify
-    CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
-    switch (action) {
-      case MOVEREPLICA:
-      case DELETENODE:
-      case NONE:
-        break;
-      default:
-        throw new TriggerValidationException("Unsupported preferredOperation=" + preferredOp + " specified for node lost trigger");
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof NodeLostTrigger) {
-      NodeLostTrigger that = (NodeLostTrigger) old;
-      assert this.name.equals(that.name);
-      this.lastLiveNodes.clear();
-      this.lastLiveNodes.addAll(that.lastLiveNodes);
-      this.nodeNameVsTimeRemoved.clear();
-      this.nodeNameVsTimeRemoved.putAll(that.nodeNameVsTimeRemoved);
-    } else  {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    Map<String,Object> state = new HashMap<>();
-    state.put("lastLiveNodes", lastLiveNodes);
-    state.put("nodeNameVsTimeRemoved", nodeNameVsTimeRemoved);
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    this.lastLiveNodes.clear();
-    this.nodeNameVsTimeRemoved.clear();
-    Collection<String> lastLiveNodes = (Collection<String>)state.get("lastLiveNodes");
-    if (lastLiveNodes != null) {
-      this.lastLiveNodes.addAll(lastLiveNodes);
-    }
-    Map<String,Long> nodeNameVsTimeRemoved = (Map<String,Long>)state.get("nodeNameVsTimeRemoved");
-    if (nodeNameVsTimeRemoved != null) {
-      this.nodeNameVsTimeRemoved.putAll(nodeNameVsTimeRemoved);
-    }
-  }
-
-  @Override
-  public void run() {
-    try {
-      synchronized (this) {
-        if (isClosed) {
-          log.warn("NodeLostTrigger ran but was already closed");
-          return;
-        }
-      }
-
-      Set<String> newLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-      log.debug("Running NodeLostTrigger: {} with currently live nodes: {}", name, newLiveNodes.size());
-
-      // have any nodes that we were tracking been added to the cluster?
-      // if so, remove them from the tracking map
-      Set<String> trackingKeySet = nodeNameVsTimeRemoved.keySet();
-      trackingKeySet.removeAll(newLiveNodes);
-
-      // have any nodes been removed?
-      Set<String> copyOfLastLiveNodes = new HashSet<>(lastLiveNodes);
-      copyOfLastLiveNodes.removeAll(newLiveNodes);
-      copyOfLastLiveNodes.forEach(n -> {
-        log.debug("Tracking lost node: {}", n);
-        nodeNameVsTimeRemoved.put(n, cloudManager.getTimeSource().getTimeNs());
-      });
-
-      // has enough time expired to trigger events for a node?
-      List<String> nodeNames = new ArrayList<>();
-      List<Long> times = new ArrayList<>();
-      for (Iterator<Map.Entry<String, Long>> it = nodeNameVsTimeRemoved.entrySet().iterator(); it.hasNext(); ) {
-        Map.Entry<String, Long> entry = it.next();
-        String nodeName = entry.getKey();
-        Long timeRemoved = entry.getValue();
-        long now = cloudManager.getTimeSource().getTimeNs();
-        if (TimeUnit.SECONDS.convert(now - timeRemoved, TimeUnit.NANOSECONDS) >= getWaitForSecond()) {
-          nodeNames.add(nodeName);
-          times.add(timeRemoved);
-        }
-      }
-      // fire!
-      AutoScaling.TriggerEventProcessor processor = processorRef.get();
-      if (!nodeNames.isEmpty()) {
-        if (processor != null) {
-          log.debug("NodeLostTrigger firing registered processor for lost nodes: {}", nodeNames);
-          if (processor.process(new NodeLostEvent(getEventType(), getName(), times, nodeNames, preferredOp)))  {
-            // remove from tracking set only if the fire was accepted
-            nodeNames.forEach(n -> {
-              nodeNameVsTimeRemoved.remove(n);
-              removeMarker(n);
-            });
-          } else  {
-            log.debug("NodeLostTrigger processor for lost nodes: {} is not ready, will try later", nodeNames);
-          }
-        } else  {
-          nodeNames.forEach(n -> {
-            nodeNameVsTimeRemoved.remove(n);
-            removeMarker(n);
-          });
-        }
-      }
-      lastLiveNodes = new HashSet<>(newLiveNodes);
-    } catch (RuntimeException e) {
-      log.error("Unexpected exception in NodeLostTrigger", e);
-    }
-  }
-
-  private void removeMarker(String nodeName) {
-    String path = ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH + "/" + nodeName;
-    try {
-      if (stateManager.hasData(path)) {
-        stateManager.removeData(path, -1);
-      }
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Exception removing nodeLost marker " + nodeName, e);
-    }
-  }
-
-  public static class NodeLostEvent extends TriggerEvent {
-
-    public NodeLostEvent(TriggerEventType eventType, String source, List<Long> times, List<String> nodeNames, String preferredOp) {
-      // use the oldest time as the time of the event
-      super(eventType, source, times.get(0), null);
-      properties.put(NODE_NAMES, nodeNames);
-      properties.put(EVENT_TIMES, times);
-      properties.put(PREFERRED_OP, preferredOp);
-    }
-  }
-}


[49/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
deleted file mode 100644
index 81cf374..0000000
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.embedded;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import com.google.common.base.Strings;
-import org.apache.commons.io.output.ByteArrayOutputStream;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.StreamingResponseCallback;
-import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
-import org.apache.solr.client.solrj.impl.BinaryRequestWriter.BAOS;
-import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
-import org.apache.solr.client.solrj.request.RequestWriter;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.ContentStreamBase;
-import org.apache.solr.common.util.JavaBinCodec;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.NodeConfig;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrXmlConfig;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.request.SolrRequestInfo;
-import org.apache.solr.response.BinaryResponseWriter;
-import org.apache.solr.response.ResultContext;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.servlet.SolrRequestParsers;
-
-import static org.apache.solr.common.params.CommonParams.PATH;
-
-/**
- * SolrClient that connects directly to a CoreContainer.
- *
- * @since solr 1.3
- */
-public class EmbeddedSolrServer extends SolrClient {
-
-  protected final CoreContainer coreContainer;
-  protected final String coreName;
-  private final SolrRequestParsers _parser;
-
-  /**
-   * Create an EmbeddedSolrServer using a given solr home directory
-   *
-   * @param solrHome        the solr home directory
-   * @param defaultCoreName the core to route requests to by default
-   */
-  public EmbeddedSolrServer(Path solrHome, String defaultCoreName) {
-    this(load(new CoreContainer(SolrXmlConfig.fromSolrHome(solrHome))), defaultCoreName);
-  }
-
-  /**
-   * Create an EmbeddedSolrServer using a NodeConfig
-   *
-   * @param nodeConfig      the configuration
-   * @param defaultCoreName the core to route requests to by default
-   */
-  public EmbeddedSolrServer(NodeConfig nodeConfig, String defaultCoreName) {
-    this(load(new CoreContainer(nodeConfig)), defaultCoreName);
-  }
-
-  private static CoreContainer load(CoreContainer cc) {
-    cc.load();
-    return cc;
-  }
-
-  /**
-   * Create an EmbeddedSolrServer wrapping a particular SolrCore
-   */
-  public EmbeddedSolrServer(SolrCore core) {
-    this(core.getCoreContainer(), core.getName());
-  }
-
-  /**
-   * Create an EmbeddedSolrServer wrapping a CoreContainer.
-   * <p>
-   * Note that EmbeddedSolrServer will shutdown the wrapped CoreContainer when
-   * {@link #close()} is called.
-   *
-   * @param coreContainer the core container
-   * @param coreName      the core to route requests to by default
-   */
-  public EmbeddedSolrServer(CoreContainer coreContainer, String coreName) {
-    if (coreContainer == null) {
-      throw new NullPointerException("CoreContainer instance required");
-    }
-    if (Strings.isNullOrEmpty(coreName))
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Core name cannot be empty");
-    this.coreContainer = coreContainer;
-    this.coreName = coreName;
-    _parser = new SolrRequestParsers(null);
-  }
-
-  // TODO-- this implementation sends the response to XML and then parses it.
-  // It *should* be able to convert the response directly into a named list.
-
-  @Override
-  public NamedList<Object> request(SolrRequest request, String coreName) throws SolrServerException, IOException {
-
-    String path = request.getPath();
-    if (path == null || !path.startsWith("/")) {
-      path = "/select";
-    }
-
-    SolrRequestHandler handler = coreContainer.getRequestHandler(path);
-    if (handler != null) {
-      try {
-        SolrQueryRequest req = _parser.buildRequestFrom(null, request.getParams(), getContentStreams(request));
-        req.getContext().put("httpMethod", request.getMethod().name());
-        req.getContext().put(PATH, path);
-        SolrQueryResponse resp = new SolrQueryResponse();
-        handler.handleRequest(req, resp);
-        checkForExceptions(resp);
-        return BinaryResponseWriter.getParsedResponse(req, resp);
-      } catch (IOException | SolrException iox) {
-        throw iox;
-      } catch (Exception ex) {
-        throw new SolrServerException(ex);
-      }
-    }
-
-    if (coreName == null)
-      coreName = this.coreName;
-
-    // Check for cores action
-    SolrQueryRequest req = null;
-    try (SolrCore core = coreContainer.getCore(coreName)) {
-
-      if (core == null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "No such core: " + coreName);
-      }
-
-      SolrParams params = request.getParams();
-      if (params == null) {
-        params = new ModifiableSolrParams();
-      }
-
-      // Extract the handler from the path or params
-      handler = core.getRequestHandler(path);
-      if (handler == null) {
-        if ("/select".equals(path) || "/select/".equalsIgnoreCase(path)) {
-          String qt = params.get(CommonParams.QT);
-          handler = core.getRequestHandler(qt);
-          if (handler == null) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + qt);
-          }
-        }
-      }
-
-      if (handler == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + path);
-      }
-
-      req = _parser.buildRequestFrom(core, params, getContentStreams(request));
-      req.getContext().put(PATH, path);
-      req.getContext().put("httpMethod", request.getMethod().name());
-      SolrQueryResponse rsp = new SolrQueryResponse();
-      SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
-
-      core.execute(handler, req, rsp);
-      checkForExceptions(rsp);
-
-      // Check if this should stream results
-      if (request.getStreamingResponseCallback() != null) {
-        try {
-          final StreamingResponseCallback callback = request.getStreamingResponseCallback();
-          BinaryResponseWriter.Resolver resolver =
-              new BinaryResponseWriter.Resolver(req, rsp.getReturnFields()) {
-                @Override
-                public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOException {
-                  // write an empty list...
-                  SolrDocumentList docs = new SolrDocumentList();
-                  docs.setNumFound(ctx.getDocList().matches());
-                  docs.setStart(ctx.getDocList().offset());
-                  docs.setMaxScore(ctx.getDocList().maxScore());
-                  codec.writeSolrDocumentList(docs);
-
-                  // This will transform
-                  writeResultsBody(ctx, codec);
-                }
-              };
-
-
-          try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
-            createJavaBinCodec(callback, resolver).setWritableDocFields(resolver).marshal(rsp.getValues(), out);
-
-            try (InputStream in = out.toInputStream()) {
-              return (NamedList<Object>) new JavaBinCodec(resolver).unmarshal(in);
-            }
-          }
-        } catch (Exception ex) {
-          throw new RuntimeException(ex);
-        }
-      }
-
-      // Now write it out
-      NamedList<Object> normalized = BinaryResponseWriter.getParsedResponse(req, rsp);
-      return normalized;
-    } catch (IOException | SolrException iox) {
-      throw iox;
-    } catch (Exception ex) {
-      throw new SolrServerException(ex);
-    } finally {
-      if (req != null) req.close();
-      SolrRequestInfo.clearRequestInfo();
-    }
-  }
-
-  private Set<ContentStream> getContentStreams(SolrRequest request) throws IOException {
-    if (request.getMethod() == SolrRequest.METHOD.GET) return null;
-    if (request instanceof ContentStreamUpdateRequest) {
-      ContentStreamUpdateRequest csur = (ContentStreamUpdateRequest) request;
-      Collection<ContentStream> cs = csur.getContentStreams();
-      if (cs != null) return new HashSet<>(cs);
-    }
-    RequestWriter.ContentWriter contentWriter = request.getContentWriter(CommonParams.JAVABIN_MIME);
-    final String cType = contentWriter == null ? CommonParams.JAVABIN_MIME : contentWriter.getContentType();
-
-    return Collections.singleton(new ContentStreamBase() {
-
-      @Override
-      public InputStream getStream() throws IOException {
-        BAOS baos = new BAOS();
-        if (contentWriter != null) {
-          contentWriter.write(baos);
-        } else {
-          new BinaryRequestWriter().write(request, baos);
-        }
-        return new ByteArrayInputStream(baos.toByteArray());
-      }
-
-      @Override
-      public String getContentType() {
-        return cType;
-
-      }
-    });
-  }
-
-  private JavaBinCodec createJavaBinCodec(final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) {
-    return new JavaBinCodec(resolver) {
-
-      @Override
-      public void writeSolrDocument(SolrDocument doc) {
-        callback.streamSolrDocument(doc);
-        //super.writeSolrDocument( doc, fields );
-      }
-
-      @Override
-      public void writeSolrDocumentList(SolrDocumentList docs) throws IOException {
-        if (docs.size() > 0) {
-          SolrDocumentList tmp = new SolrDocumentList();
-          tmp.setMaxScore(docs.getMaxScore());
-          tmp.setNumFound(docs.getNumFound());
-          tmp.setStart(docs.getStart());
-          docs = tmp;
-        }
-        callback.streamDocListInfo(docs.getNumFound(), docs.getStart(), docs.getMaxScore());
-        super.writeSolrDocumentList(docs);
-      }
-
-    };
-  }
-
-  private static void checkForExceptions(SolrQueryResponse rsp) throws Exception {
-    if (rsp.getException() != null) {
-      if (rsp.getException() instanceof SolrException) {
-        throw rsp.getException();
-      }
-      throw new SolrServerException(rsp.getException());
-    }
-
-  }
-
-  /**
-   * Shutdown all cores within the EmbeddedSolrServer instance
-   */
-  @Override
-  public void close() throws IOException {
-    coreContainer.shutdown();
-  }
-
-  /**
-   * Getter method for the CoreContainer
-   *
-   * @return the core container
-   */
-  public CoreContainer getCoreContainer() {
-    return coreContainer;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
deleted file mode 100644
index 28c3cdf..0000000
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettyConfig.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.embedded;
-
-import org.eclipse.jetty.servlet.ServletHolder;
-
-import javax.servlet.Filter;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
-public class JettyConfig {
-
-  public final int port;
-
-  public final String context;
-
-  public final boolean stopAtShutdown;
-  
-  public final Long waitForLoadingCoresToFinishMs;
-
-  public final Map<ServletHolder, String> extraServlets;
-
-  public final Map<Class<? extends Filter>, String> extraFilters;
-
-  public final SSLConfig sslConfig;
-
-  private JettyConfig(int port, String context, boolean stopAtShutdown, Long waitForLoadingCoresToFinishMs, Map<ServletHolder, String> extraServlets,
-                      Map<Class<? extends Filter>, String> extraFilters, SSLConfig sslConfig) {
-    this.port = port;
-    this.context = context;
-    this.stopAtShutdown = stopAtShutdown;
-    this.waitForLoadingCoresToFinishMs = waitForLoadingCoresToFinishMs;
-    this.extraServlets = extraServlets;
-    this.extraFilters = extraFilters;
-    this.sslConfig = sslConfig;
-  }
-
-  public static Builder builder() {
-    return new Builder();
-  }
-
-  public static Builder builder(JettyConfig other) {
-    Builder builder = new Builder();
-    builder.port = other.port;
-    builder.context = other.context;
-    builder.stopAtShutdown = other.stopAtShutdown;
-    builder.extraServlets = other.extraServlets;
-    builder.extraFilters = other.extraFilters;
-    builder.sslConfig = other.sslConfig;
-    return builder;
-  }
-
-  public static class Builder {
-
-    int port = 0;
-    String context = "/solr";
-    boolean stopAtShutdown = true;
-    Long waitForLoadingCoresToFinishMs = 300000L;
-    Map<ServletHolder, String> extraServlets = new TreeMap<>();
-    Map<Class<? extends Filter>, String> extraFilters = new LinkedHashMap<>();
-    SSLConfig sslConfig = null;
-
-    public Builder setPort(int port) {
-      this.port = port;
-      return this;
-    }
-
-    public Builder setContext(String context) {
-      this.context = context;
-      return this;
-    }
-
-    public Builder stopAtShutdown(boolean stopAtShutdown) {
-      this.stopAtShutdown = stopAtShutdown;
-      return this;
-    }
-    
-    public Builder waitForLoadingCoresToFinish(Long waitForLoadingCoresToFinishMs) {
-      this.waitForLoadingCoresToFinishMs = waitForLoadingCoresToFinishMs;
-      return this;
-    }
-
-    public Builder withServlet(ServletHolder servlet, String pathSpec) {
-      extraServlets.put(servlet, pathSpec);
-      return this;
-    }
-
-    public Builder withServlets(Map<ServletHolder, String> servlets) {
-      if (servlets != null)
-        extraServlets.putAll(servlets);
-      return this;
-    }
-
-    public Builder withFilter(Class<? extends Filter> filterClass, String pathSpec) {
-      extraFilters.put(filterClass, pathSpec);
-      return this;
-    }
-
-    public Builder withFilters(Map<Class<? extends Filter>, String> filters) {
-      if (filters != null)
-        extraFilters.putAll(filters);
-      return this;
-    }
-
-    public Builder withSSLConfig(SSLConfig sslConfig) {
-      this.sslConfig = sslConfig;
-      return this;
-    }
-
-    public JettyConfig build() {
-      return new JettyConfig(port, context, stopAtShutdown, waitForLoadingCoresToFinishMs, extraServlets, extraFilters, sslConfig);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
deleted file mode 100644
index 5fdec0f..0000000
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.embedded;
-
-import javax.servlet.DispatcherType;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.servlet.SolrDispatchFilter;
-import org.eclipse.jetty.server.Connector;
-import org.eclipse.jetty.server.HttpConfiguration;
-import org.eclipse.jetty.server.HttpConnectionFactory;
-import org.eclipse.jetty.server.SecureRequestCustomizer;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.ServerConnector;
-import org.eclipse.jetty.server.SslConnectionFactory;
-import org.eclipse.jetty.server.handler.gzip.GzipHandler;
-import org.eclipse.jetty.server.session.DefaultSessionIdManager;
-import org.eclipse.jetty.servlet.FilterHolder;
-import org.eclipse.jetty.servlet.ServletContextHandler;
-import org.eclipse.jetty.servlet.ServletHolder;
-import org.eclipse.jetty.servlet.Source;
-import org.eclipse.jetty.util.component.LifeCycle;
-import org.eclipse.jetty.util.ssl.SslContextFactory;
-import org.eclipse.jetty.util.thread.QueuedThreadPool;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-/**
- * Run solr using jetty
- * 
- * @since solr 1.3
- */
-public class JettySolrRunner {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final int THREAD_POOL_MAX_THREADS = 10000;
-  // NOTE: needs to be larger than SolrHttpClient.threadPoolSweeperMaxIdleTime
-  private static final int THREAD_POOL_MAX_IDLE_TIME_MS = 120000;
-  
-  Server server;
-
-  FilterHolder dispatchFilter;
-  FilterHolder debugFilter;
-
-  private boolean waitOnSolr = false;
-  private int jettyPort = -1;
-
-  private final JettyConfig config;
-  private final String solrHome;
-  private final Properties nodeProperties;
-
-  private volatile boolean startedBefore = false;
-
-  private LinkedList<FilterHolder> extraFilters;
-
-  private static final String excludePatterns = "/css/.+,/js/.+,/img/.+,/tpl/.+";
-
-  private int proxyPort = -1;
-
-  public static class DebugFilter implements Filter {
-    private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-    private AtomicLong nRequests = new AtomicLong();
-    
-    List<Delay> delays = new ArrayList<>();
-
-    public long getTotalRequests() {
-      return nRequests.get();
-
-    }
-    
-    /**
-     * Introduce a delay of specified milliseconds for the specified request.
-     *
-     * @param reason Info message logged when delay occurs
-     * @param count The count-th request will experience a delay
-     * @param delay There will be a delay of this many milliseconds
-     */
-    public void addDelay(String reason, int count, int delay) {
-      delays.add(new Delay(reason, count, delay));
-    }
-    
-    /**
-     * Remove any delay introduced before.
-     */
-    public void unsetDelay() {
-      delays.clear();
-    }
-
-
-    @Override
-    public void init(FilterConfig filterConfig) throws ServletException { }
-
-    @Override
-    public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
-      nRequests.incrementAndGet();
-      executeDelay();
-      filterChain.doFilter(servletRequest, servletResponse);
-    }
-
-    @Override
-    public void destroy() { }
-    
-    private void executeDelay() {
-      int delayMs = 0;
-      for (Delay delay: delays) {
-        this.log.info("Delaying "+delay.delayValue+", for reason: "+delay.reason);
-        if (delay.counter.decrementAndGet() == 0) {
-          delayMs += delay.delayValue;
-        }        
-      }
-
-      if (delayMs > 0) {
-        this.log.info("Pausing this socket connection for " + delayMs + "ms...");
-        try {
-          Thread.sleep(delayMs);
-        } catch (InterruptedException e) {
-          throw new RuntimeException(e);
-        }
-        this.log.info("Waking up after the delay of " + delayMs + "ms...");
-      }
-    }
-
-  }
-
-  /**
-   * Create a new JettySolrRunner.
-   *
-   * After construction, you must start the jetty with {@link #start()}
-   *
-   * @param solrHome the solr home directory to use
-   * @param context the context to run in
-   * @param port the port to run on
-   */
-  public JettySolrRunner(String solrHome, String context, int port) {
-    this(solrHome, JettyConfig.builder().setContext(context).setPort(port).build());
-  }
-
-
-  /**
-   * Construct a JettySolrRunner
-   *
-   * After construction, you must start the jetty with {@link #start()}
-   *
-   * @param solrHome    the base path to run from
-   * @param config the configuration
-   */
-  public JettySolrRunner(String solrHome, JettyConfig config) {
-    this(solrHome, new Properties(), config);
-  }
-
-  /**
-   * Construct a JettySolrRunner
-   *
-   * After construction, you must start the jetty with {@link #start()}
-   *
-   * @param solrHome            the solrHome to use
-   * @param nodeProperties      the container properties
-   * @param config         the configuration
-   */
-  public JettySolrRunner(String solrHome, Properties nodeProperties, JettyConfig config) {
-
-    this.solrHome = solrHome;
-    this.config = config;
-    this.nodeProperties = nodeProperties;
-
-    this.init(this.config.port);
-  }
-  
-  private void init(int port) {
-
-    QueuedThreadPool qtp = new QueuedThreadPool();
-    qtp.setMaxThreads(THREAD_POOL_MAX_THREADS);
-    qtp.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
-    qtp.setStopTimeout((int) TimeUnit.MINUTES.toMillis(1));
-    server = new Server(qtp);
-    server.manage(qtp);
-    server.setStopAtShutdown(config.stopAtShutdown);
-
-    if (System.getProperty("jetty.testMode") != null) {
-      // if this property is true, then jetty will be configured to use SSL
-      // leveraging the same system properties as java to specify
-      // the keystore/truststore if they are set unless specific config
-      // is passed via the constructor.
-      //
-      // This means we will use the same truststore, keystore (and keys) for
-      // the server as well as any client actions taken by this JVM in
-      // talking to that server, but for the purposes of testing that should 
-      // be good enough
-      final SslContextFactory sslcontext = SSLConfig.createContextFactory(config.sslConfig);
-      
-      ServerConnector connector;
-      if (sslcontext != null) {
-        HttpConfiguration configuration = new HttpConfiguration();
-        configuration.setSecureScheme("https");
-        configuration.addCustomizer(new SecureRequestCustomizer());
-        connector = new ServerConnector(server, new SslConnectionFactory(sslcontext, "http/1.1"),
-            new HttpConnectionFactory(configuration));
-      } else {
-        connector = new ServerConnector(server, new HttpConnectionFactory());
-      }
-
-      connector.setReuseAddress(true);
-      connector.setSoLingerTime(-1);
-      connector.setPort(port);
-      connector.setHost("127.0.0.1");
-      connector.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
-      
-      server.setConnectors(new Connector[] {connector});
-      server.setSessionIdManager(new DefaultSessionIdManager(server, new Random()));
-    } else {
-      ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory());
-      connector.setPort(port);
-      connector.setSoLingerTime(-1);
-      connector.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
-      server.setConnectors(new Connector[] {connector});
-    }
-
-    // Initialize the servlets
-    final ServletContextHandler root = new ServletContextHandler(server, config.context, ServletContextHandler.SESSIONS);
-
-    server.addLifeCycleListener(new LifeCycle.Listener() {
-
-      @Override
-      public void lifeCycleStopping(LifeCycle arg0) {
-      }
-
-      @Override
-      public void lifeCycleStopped(LifeCycle arg0) {}
-
-      @Override
-      public void lifeCycleStarting(LifeCycle arg0) {
-        synchronized (JettySolrRunner.this) {
-          waitOnSolr = true;
-          JettySolrRunner.this.notify();
-        }
-      }
-
-      @Override
-      public void lifeCycleStarted(LifeCycle arg0) {
-
-        jettyPort = getFirstConnectorPort();
-        int port = jettyPort;
-        if (proxyPort != -1) port = proxyPort;
-        nodeProperties.setProperty("hostPort", Integer.toString(port));
-        nodeProperties.setProperty("hostContext", config.context);
-
-        root.getServletContext().setAttribute(SolrDispatchFilter.PROPERTIES_ATTRIBUTE, nodeProperties);
-        root.getServletContext().setAttribute(SolrDispatchFilter.SOLRHOME_ATTRIBUTE, solrHome);
-
-        log.info("Jetty properties: {}", nodeProperties);
-
-        debugFilter = root.addFilter(DebugFilter.class, "*", EnumSet.of(DispatcherType.REQUEST) );
-        extraFilters = new LinkedList<>();
-        for (Class<? extends Filter> filterClass : config.extraFilters.keySet()) {
-          extraFilters.add(root.addFilter(filterClass, config.extraFilters.get(filterClass),
-              EnumSet.of(DispatcherType.REQUEST)));
-        }
-
-        for (ServletHolder servletHolder : config.extraServlets.keySet()) {
-          String pathSpec = config.extraServlets.get(servletHolder);
-          root.addServlet(servletHolder, pathSpec);
-        }
-        dispatchFilter = root.getServletHandler().newFilterHolder(Source.EMBEDDED);
-        dispatchFilter.setHeldClass(SolrDispatchFilter.class);
-        dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
-        root.addFilter(dispatchFilter, "*", EnumSet.of(DispatcherType.REQUEST));
-      }
-
-      @Override
-      public void lifeCycleFailure(LifeCycle arg0, Throwable arg1) {
-        System.clearProperty("hostPort");
-      }
-    });
-
-    // for some reason, there must be a servlet for this to get applied
-    root.addServlet(Servlet404.class, "/*");
-    GzipHandler gzipHandler = new GzipHandler();
-    gzipHandler.setHandler(root);
-
-    gzipHandler.setMinGzipSize(0);
-    gzipHandler.setCheckGzExists(false);
-    gzipHandler.setCompressionLevel(-1);
-    gzipHandler.setExcludedAgentPatterns(".*MSIE.6\\.0.*");
-    gzipHandler.setIncludedMethods("GET");
-
-    server.setHandler(gzipHandler);
-  }
-
-  /**
-   * @return the {@link SolrDispatchFilter} for this node
-   */
-  public SolrDispatchFilter getSolrDispatchFilter() { return (SolrDispatchFilter) dispatchFilter.getFilter(); }
-
-  /**
-   * @return the {@link CoreContainer} for this node
-   */
-  public CoreContainer getCoreContainer() {
-    if (getSolrDispatchFilter() == null || getSolrDispatchFilter().getCores() == null) {
-      return null;
-    }
-    return getSolrDispatchFilter().getCores();
-  }
-
-  public String getNodeName() {
-    return getCoreContainer().getZkController().getNodeName();
-  }
-
-  public boolean isRunning() {
-    return server.isRunning();
-  }
-  
-  public boolean isStopped() {
-    return server.isStopped();
-  }
-
-  // ------------------------------------------------------------------------------------------------
-  // ------------------------------------------------------------------------------------------------
-
-  /**
-   * Start the Jetty server
-   *
-   * If the server has been started before, it will restart using the same port
-   *
-   * @throws Exception if an error occurs on startup
-   */
-  public void start() throws Exception {
-    start(true);
-  }
-
-  /**
-   * Start the Jetty server
-   *
-   * @param reusePort when true, will start up on the same port as used by any
-   *                  previous runs of this JettySolrRunner.  If false, will use
-   *                  the port specified by the server's JettyConfig.
-   *
-   * @throws Exception if an error occurs on startup
-   */
-  public void start(boolean reusePort) throws Exception {
-    // Do not let Jetty/Solr pollute the MDC for this thread
-    Map<String, String> prevContext = MDC.getCopyOfContextMap();
-    MDC.clear();
-    try {
-      // if started before, make a new server
-      if (startedBefore) {
-        waitOnSolr = false;
-        int port = reusePort ? jettyPort : this.config.port;
-        init(port);
-      } else {
-        startedBefore = true;
-      }
-
-      if (!server.isRunning()) {
-        server.start();
-      }
-      synchronized (JettySolrRunner.this) {
-        int cnt = 0;
-        while (!waitOnSolr) {
-          this.wait(100);
-          if (cnt++ == 5) {
-            throw new RuntimeException("Jetty/Solr unresponsive");
-          }
-        }
-      }
-      
-      if (config.waitForLoadingCoresToFinishMs != null && config.waitForLoadingCoresToFinishMs > 0L) waitForLoadingCoresToFinish(config.waitForLoadingCoresToFinishMs);
-    } finally {
-      if (prevContext != null)  {
-        MDC.setContextMap(prevContext);
-      } else {
-        MDC.clear();
-      }
-    }
-  }
-
-  /**
-   * Stop the Jetty server
-   *
-   * @throws Exception if an error occurs on shutdown
-   */
-  public void stop() throws Exception {
-    // Do not let Jetty/Solr pollute the MDC for this thread
-    Map<String, String> prevContext = MDC.getCopyOfContextMap();
-    MDC.clear();
-    try {
-      Filter filter = dispatchFilter.getFilter();
-
-      server.stop();
-
-      if (server.getState().equals(Server.FAILED)) {
-        filter.destroy();
-        if (extraFilters != null) {
-          for (FilterHolder f : extraFilters) {
-            f.getFilter().destroy();
-          }
-        }
-      }
-
-      server.join();
-    } finally {
-      if (prevContext != null)  {
-        MDC.setContextMap(prevContext);
-      } else {
-        MDC.clear();
-      }
-    }
-  }
-
-  /**
-   * Returns the Local Port of the jetty Server.
-   * 
-   * @exception RuntimeException if there is no Connector
-   */
-  private int getFirstConnectorPort() {
-    Connector[] conns = server.getConnectors();
-    if (0 == conns.length) {
-      throw new RuntimeException("Jetty Server has no Connectors");
-    }
-    return ((ServerConnector) conns[0]).getLocalPort();
-  }
-  
-  /**
-   * Returns the Local Port of the jetty Server.
-   * 
-   * @exception RuntimeException if there is no Connector
-   */
-  public int getLocalPort() {
-    if (jettyPort == -1) {
-      throw new IllegalStateException("You cannot get the port until this instance has started");
-    }
-    return (proxyPort != -1) ? proxyPort : jettyPort;
-  }
-  
-  /**
-   * Sets the port of a local socket proxy that sits infront of this server; if set
-   * then all client traffic will flow through the proxy, giving us the ability to
-   * simulate network partitions very easily.
-   */
-  public void setProxyPort(int proxyPort) {
-    this.proxyPort = proxyPort;
-  }
-
-  /**
-   * Returns a base URL consisting of the protocol, host, and port for a
-   * Connector in use by the Jetty Server contained in this runner.
-   */
-  public URL getBaseUrl() {
-    String protocol = null;
-    try {
-      Connector[] conns = server.getConnectors();
-      if (0 == conns.length) {
-        throw new IllegalStateException("Jetty Server has no Connectors");
-      }
-      ServerConnector c = (ServerConnector) conns[0];
-      if (c.getLocalPort() < 0) {
-        throw new IllegalStateException("Jetty Connector is not open: " + 
-                                        c.getLocalPort());
-      }
-      protocol = c.getDefaultProtocol().startsWith("SSL")  ? "https" : "http";
-      return new URL(protocol, c.getHost(), c.getLocalPort(), config.context);
-
-    } catch (MalformedURLException e) {
-      throw new  IllegalStateException
-        ("Java could not make sense of protocol: " + protocol, e);
-    }
-  }
-
-  public SolrClient newClient() {
-    return new HttpSolrClient.Builder(getBaseUrl().toString()).build();
-  }
-  
-  public SolrClient newClient(int connectionTimeoutMillis, int socketTimeoutMillis) {
-    return new HttpSolrClient.Builder(getBaseUrl().toString())
-        .withConnectionTimeout(connectionTimeoutMillis)
-        .withSocketTimeout(socketTimeoutMillis)
-        .build();
-  }
-
-  public DebugFilter getDebugFilter() {
-    return (DebugFilter)debugFilter.getFilter();
-  }
-
-  // --------------------------------------------------------------
-  // --------------------------------------------------------------
-
-  /**
-   * This is a stupid hack to give jetty something to attach to
-   */
-  public static class Servlet404 extends HttpServlet {
-    @Override
-    public void service(HttpServletRequest req, HttpServletResponse res)
-        throws IOException {
-      res.sendError(404, "Can not find: " + req.getRequestURI());
-    }
-  }
-
-  /**
-   * A main class that starts jetty+solr This is useful for debugging
-   */
-  public static void main(String[] args) {
-    try {
-      JettySolrRunner jetty = new JettySolrRunner(".", "/solr", 8983);
-      jetty.start();
-    } catch (Exception ex) {
-      ex.printStackTrace();
-    }
-  }
-
-  /**
-   * @return the Solr home directory of this JettySolrRunner
-   */
-  public String getSolrHome() {
-    return solrHome;
-  }
-
-  /**
-   * @return this node's properties
-   */
-  public Properties getNodeProperties() {
-    return nodeProperties;
-  }
-
-  private void waitForLoadingCoresToFinish(long timeoutMs) {
-    if (dispatchFilter != null) {
-      SolrDispatchFilter solrFilter = (SolrDispatchFilter) dispatchFilter.getFilter();
-      CoreContainer cores = solrFilter.getCores();
-      if (cores != null) {
-        cores.waitForLoadingCoresToFinish(timeoutMs);
-      }
-    }
-  }
-  
-  static class Delay {
-    final AtomicInteger counter;
-    final int delayValue;
-    final String reason;
-    
-    public Delay(String reason, int counter, int delay) {
-      this.reason = reason;
-      this.counter = new AtomicInteger(counter);
-      this.delayValue = delay;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/client/solrj/embedded/SSLConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/SSLConfig.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/SSLConfig.java
deleted file mode 100644
index 62c9024..0000000
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/SSLConfig.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.embedded;
-
-import org.eclipse.jetty.util.ssl.SslContextFactory;
-
-/** 
- * Encapsulates settings related to SSL Configuration for an embedded Jetty Server.
- * NOTE: all other settings are ignored if {@link #isSSLMode} is false.
- * @see #setUseSSL
- */
-public class SSLConfig {
-  
-  private boolean useSsl;
-  private boolean clientAuth;
-  private String keyStore;
-  private String keyStorePassword;
-  private String trustStore;
-  private String trustStorePassword;
-
-  /** NOTE: all other settings are ignored if useSSL is false; trustStore settings are ignored if clientAuth is false */
-  public SSLConfig(boolean useSSL, boolean clientAuth, String keyStore, String keyStorePassword, String trustStore, String trustStorePassword) {
-    this.useSsl = useSSL;
-    this.clientAuth = clientAuth;
-    this.keyStore = keyStore;
-    this.keyStorePassword = keyStorePassword;
-    this.trustStore = trustStore;
-    this.trustStorePassword = trustStorePassword;
-  }
-  
-  public void setUseSSL(boolean useSSL) {
-    this.useSsl = useSSL;
-  }
-  
-  public void setClientAuth(boolean clientAuth) {
-    this.clientAuth = clientAuth;
-  }
-  
-  /** All other settings on this object are ignored unless this is true */
-  public boolean isSSLMode() {
-    return useSsl;
-  }
-  
-  public boolean isClientAuthMode() {
-    return clientAuth;
-  }
-
-  public String getKeyStore() {
-    return keyStore;
-  }
-
-  public String getKeyStorePassword() {
-    return keyStorePassword;
-  }
-
-  public String getTrustStore() {
-    return trustStore;
-  }
-
-  public String getTrustStorePassword() {
-    return trustStorePassword;
-  }
-
-  /**
-   * Returns an SslContextFactory that should be used by a jetty server based on the specified 
-   * SSLConfig param which may be null.
-   *
-   * if the SSLConfig param is non-null, then this method will return the results of 
-   * {@link #createContextFactory()}.
-   * 
-   * If the SSLConfig param is null, then this method will return null unless the 
-   * <code>tests.jettySsl</code> system property is true, in which case standard "javax.net.ssl.*" 
-   * system properties will be used instead, along with "tests.jettySsl.clientAuth".
-   * 
-   * @see #createContextFactory()
-   */
-  public static SslContextFactory createContextFactory(SSLConfig sslConfig) {
-
-    if (sslConfig != null) {
-      return sslConfig.createContextFactory();
-    }
-    // else...
-    if (Boolean.getBoolean("tests.jettySsl")) {
-      return configureSslFromSysProps();
-    }
-    // else...
-    return null;
-  }
-  
-  /**
-   * Returns an SslContextFactory that should be used by a jetty server based on this SSLConfig instance, 
-   * or null if SSL should not be used.
-   *
-   * The default implementation generates a simple factory according to the keystore, truststore, 
-   * and clientAuth properties of this object.
-   *
-   * @see #getKeyStore
-   * @see #getKeyStorePassword
-   * @see #isClientAuthMode
-   * @see #getTrustStore
-   * @see #getTrustStorePassword
-   */
-  public SslContextFactory createContextFactory() {
-
-    if (! isSSLMode()) {
-      return null;
-    }
-    // else...
-    
-    SslContextFactory factory = new SslContextFactory(false);
-    if (getKeyStore() != null)
-      factory.setKeyStorePath(getKeyStore());
-    if (getKeyStorePassword() != null)
-      factory.setKeyStorePassword(getKeyStorePassword());
-    
-    factory.setNeedClientAuth(isClientAuthMode());
-    
-    if (isClientAuthMode()) {
-      if (getTrustStore() != null)
-        factory.setTrustStorePath(getTrustStore());
-      if (getTrustStorePassword() != null)
-        factory.setTrustStorePassword(getTrustStorePassword());
-    }
-    return factory;
-
-  }
-
-  private static SslContextFactory configureSslFromSysProps() {
-
-    SslContextFactory sslcontext = new SslContextFactory(false);
-
-    if (null != System.getProperty("javax.net.ssl.keyStore")) {
-      sslcontext.setKeyStorePath
-          (System.getProperty("javax.net.ssl.keyStore"));
-    }
-    if (null != System.getProperty("javax.net.ssl.keyStorePassword")) {
-      sslcontext.setKeyStorePassword
-          (System.getProperty("javax.net.ssl.keyStorePassword"));
-    }
-    if (null != System.getProperty("javax.net.ssl.trustStore")) {
-      sslcontext.setTrustStorePath
-          (System.getProperty("javax.net.ssl.trustStore"));
-    }
-    if (null != System.getProperty("javax.net.ssl.trustStorePassword")) {
-      sslcontext.setTrustStorePassword
-          (System.getProperty("javax.net.ssl.trustStorePassword"));
-    }
-    sslcontext.setNeedClientAuth(Boolean.getBoolean("tests.jettySsl.clientAuth"));
-
-    return sslcontext;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java
deleted file mode 100644
index a74c745..0000000
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/package-info.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * SolrJ client implementations for embedded solr access.
- * <p>
- * See {@link org.apache.solr.client.solrj} for additional details.
- */
-package org.apache.solr.client.solrj.embedded;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java b/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java
deleted file mode 100644
index 1724b53..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ActionThrottle.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.util.TimeSource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// this class may be accessed by multiple threads, but only one at a time
-public class ActionThrottle {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private volatile Long lastActionStartedAt;
-  private volatile Long minMsBetweenActions;
-
-  private final String name;
-  private final TimeSource timeSource;
-
-  public ActionThrottle(String name, long minMsBetweenActions) {
-    this(name, minMsBetweenActions, TimeSource.NANO_TIME);
-  }
-  
-  public ActionThrottle(String name, long minMsBetweenActions, TimeSource timeSource) {
-    this.name = name;
-    this.minMsBetweenActions = minMsBetweenActions;
-    this.timeSource = timeSource;
-  }
-
-  public ActionThrottle(String name, long minMsBetweenActions, long lastActionStartedAt)  {
-    this(name, minMsBetweenActions, lastActionStartedAt, TimeSource.NANO_TIME);
-  }
-
-  public ActionThrottle(String name, long minMsBetweenActions, long lastActionStartedAt, TimeSource timeSource)  {
-    this.name = name;
-    this.minMsBetweenActions = minMsBetweenActions;
-    this.lastActionStartedAt = lastActionStartedAt;
-    this.timeSource = timeSource;
-  }
-
-  public void reset() {
-    lastActionStartedAt = null;
-  }
-
-  public void markAttemptingAction() {
-    lastActionStartedAt = timeSource.getTimeNs();
-  }
-  
-  public void minimumWaitBetweenActions() {
-    if (lastActionStartedAt == null) {
-      return;
-    }
-    long diff = timeSource.getTimeNs() - lastActionStartedAt;
-    int diffMs = (int) TimeUnit.MILLISECONDS.convert(diff, TimeUnit.NANOSECONDS);
-    long minNsBetweenActions = TimeUnit.NANOSECONDS.convert(minMsBetweenActions, TimeUnit.MILLISECONDS);
-    log.debug("The last {} attempt started {}ms ago.", name, diffMs);
-    int sleep = 0;
-    
-    if (diffMs > 0 && diff < minNsBetweenActions) {
-      sleep = (int) TimeUnit.MILLISECONDS.convert(minNsBetweenActions - diff, TimeUnit.NANOSECONDS);
-    } else if (diffMs == 0) {
-      sleep = minMsBetweenActions.intValue();
-    }
-    
-    if (sleep > 0) {
-      log.info("Throttling {} attempts - waiting for {}ms", name, sleep);
-      try {
-        timeSource.sleep(sleep);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-    }
-  }
-
-  public Long getLastActionStartedAt() {
-    return lastActionStartedAt;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java b/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java
deleted file mode 100644
index c6bd807..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ActiveReplicaWatcher.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Watch for replicas to become {@link org.apache.solr.common.cloud.Replica.State#ACTIVE}. Watcher is
- * terminated (its {@link #onStateChanged(Set, DocCollection)} method returns false) when all listed
- * replicas become active.
- * <p>Additionally, the provided {@link SolrCloseableLatch} instance can be used to await
- * for all listed replicas to become active.</p>
- */
-public class ActiveReplicaWatcher implements CollectionStateWatcher {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final String collection;
-  private final List<String> replicaIds = new ArrayList<>();
-  private final List<String> solrCoreNames = new ArrayList<>();
-  private final List<Replica> activeReplicas = new ArrayList<>();
-
-  private int lastZkVersion = -1;
-
-  private SolrCloseableLatch latch;
-
-  /**
-   * Construct the watcher. At least one replicaId or solrCoreName must be provided.
-   * @param collection collection name
-   * @param replicaIds list of replica id-s
-   * @param solrCoreNames list of SolrCore names
-   * @param latch optional latch to await for all provided replicas to become active. This latch will be
-   *                       counted down by at most the number of provided replica id-s / SolrCore names.
-   */
-  public ActiveReplicaWatcher(String collection, List<String> replicaIds, List<String> solrCoreNames, SolrCloseableLatch latch) {
-    if (replicaIds == null && solrCoreNames == null) {
-      throw new IllegalArgumentException("Either replicaId or solrCoreName must be provided.");
-    }
-    if (replicaIds != null) {
-      this.replicaIds.addAll(replicaIds);
-    }
-    if (solrCoreNames != null) {
-      this.solrCoreNames.addAll(solrCoreNames);
-    }
-    if (this.replicaIds.isEmpty() && this.solrCoreNames.isEmpty()) {
-      throw new IllegalArgumentException("At least one replicaId or solrCoreName must be provided");
-    }
-    this.collection = collection;
-    this.latch = latch;
-  }
-
-  /**
-   * Collection name.
-   */
-  public String getCollection() {
-    return collection;
-  }
-
-  /**
-   * Return the list of active replicas found so far.
-   */
-  public List<Replica> getActiveReplicas() {
-    return activeReplicas;
-  }
-
-  /**
-   * Return the list of replica id-s that are not active yet (or unverified).
-   */
-  public List<String> getReplicaIds() {
-    return replicaIds;
-  }
-
-  /**
-   * Return a list of SolrCore names that are not active yet (or unverified).
-   */
-  public List<String> getSolrCoreNames() {
-    return solrCoreNames;
-  }
-
-  @Override
-  public String toString() {
-    return "ActiveReplicaWatcher@" + Long.toHexString(hashCode()) + "{" +
-        "collection='" + collection + '\'' +
-        ", replicaIds=" + replicaIds +
-        ", solrCoreNames=" + solrCoreNames +
-        ", latch=" + (latch != null ? latch.getCount() : "null") + "," +
-        ", activeReplicas=" + activeReplicas +
-        '}';
-  }
-
-  // synchronized due to SOLR-11535
-  @Override
-  public synchronized boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-    log.debug("-- onStateChanged@" + Long.toHexString(hashCode()) + ": replicaIds=" + replicaIds + ", solrCoreNames=" + solrCoreNames +
-        (latch != null ? "\nlatch count=" + latch.getCount() : "") +
-        "\ncollectionState=" + collectionState);
-    if (collectionState == null) { // collection has been deleted - don't wait
-      log.debug("-- collection deleted, decrementing latch by " + replicaIds.size() + solrCoreNames.size());
-      if (latch != null) {
-        for (int i = 0; i < replicaIds.size() + solrCoreNames.size(); i++) {
-          latch.countDown();
-        }
-      }
-      replicaIds.clear();
-      solrCoreNames.clear();
-      return true;
-    }
-    if (replicaIds.isEmpty() && solrCoreNames.isEmpty()) {
-      log.debug("-- already done, exiting...");
-      return true;
-    }
-    if (collectionState.getZNodeVersion() == lastZkVersion) {
-      log.debug("-- spurious call with already seen zkVersion=" + lastZkVersion + ", ignoring...");
-      return false;
-    }
-    lastZkVersion = collectionState.getZNodeVersion();
-
-    for (Slice slice : collectionState.getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replicaIds.contains(replica.getName())) {
-          if (replica.isActive(liveNodes)) {
-            activeReplicas.add(replica);
-            replicaIds.remove(replica.getName());
-            if (latch != null) {
-              latch.countDown();
-            }
-          }
-        } else if (solrCoreNames.contains(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
-          if (replica.isActive(liveNodes)) {
-            activeReplicas.add(replica);
-            solrCoreNames.remove(replica.getStr(ZkStateReader.CORE_NAME_PROP));
-            if (latch != null) {
-              latch.countDown();
-            }
-          }
-        }
-      }
-    }
-    log.debug("-- " + Long.toHexString(hashCode()) + " now latch count=" + latch.getCount());
-    if (replicaIds.isEmpty() && solrCoreNames.isEmpty()) {
-      return true;
-    } else {
-      return false;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
deleted file mode 100644
index 9b16d23..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.ConfigSetService;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class CloudConfigSetService extends ConfigSetService {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private final ZkController zkController;
-
-  public CloudConfigSetService(SolrResourceLoader loader, ZkController zkController) {
-    super(loader);
-    this.zkController = zkController;
-  }
-
-  @Override
-  public SolrResourceLoader createCoreResourceLoader(CoreDescriptor cd) {
-    try {
-      // for back compat with cores that can create collections without the collections API
-      if (!zkController.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + cd.getCollectionName(), true)) {
-        CreateCollectionCmd.createCollectionZkNode(zkController.getSolrCloudManager().getDistribStateManager(), cd.getCollectionName(), cd.getCloudDescriptor().getParams());
-      }
-    } catch (KeeperException e) {
-      SolrException.log(log, null, e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      SolrException.log(log, null, e);
-    }
-
-    String configName = zkController.getZkStateReader().readConfigName(cd.getCollectionName());
-    return new ZkSolrResourceLoader(cd.getInstanceDir(), configName, parentLoader.getClassLoader(),
-        cd.getSubstitutableProperties(), zkController);
-  }
-
-  @Override
-  public String configName(CoreDescriptor cd) {
-    return "collection " + cd.getCloudDescriptor().getCollectionName();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
deleted file mode 100644
index 068191e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.solr.common.StringUtils;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.util.PropertiesUtil;
-
-import com.google.common.base.Strings;
-
-public class CloudDescriptor {
-
-  private final CoreDescriptor cd;
-  private String shardId;
-  private String collectionName;
-  private String roles = null;
-  private Integer numShards;
-  private String nodeName = null;
-  private Map<String,String> collectionParams = new HashMap<>();
-
-  private volatile boolean isLeader = false;
-  
-  // set to true once a core has registered in zk
-  // set to false on detecting a session expiration
-  private volatile boolean hasRegistered = false;
-  private volatile Replica.State lastPublished = Replica.State.ACTIVE;
-
-  public static final String NUM_SHARDS = "numShards";
-  
-  public static final String REPLICA_TYPE = "replicaType";
-  
-  /**
-   * The type of replica this core hosts
-   */
-  private final Replica.Type replicaType;
-
-  public CloudDescriptor(String coreName, Properties props, CoreDescriptor cd) {
-    this.cd = cd;
-    this.shardId = props.getProperty(CoreDescriptor.CORE_SHARD, null);
-    if (Strings.isNullOrEmpty(shardId))
-      this.shardId = null;
-    // If no collection name is specified, we default to the core name
-    this.collectionName = props.getProperty(CoreDescriptor.CORE_COLLECTION, coreName);
-    this.roles = props.getProperty(CoreDescriptor.CORE_ROLES, null);
-    this.nodeName = props.getProperty(CoreDescriptor.CORE_NODE_NAME);
-    if (Strings.isNullOrEmpty(nodeName))
-      this.nodeName = null;
-    this.numShards = PropertiesUtil.toInteger(props.getProperty(CloudDescriptor.NUM_SHARDS), null);
-    String replicaTypeStr = props.getProperty(CloudDescriptor.REPLICA_TYPE);
-    if (Strings.isNullOrEmpty(replicaTypeStr)) {
-      this.replicaType = Replica.Type.NRT;
-    } else {
-      this.replicaType = Replica.Type.valueOf(replicaTypeStr);
-    }
-    for (String propName : props.stringPropertyNames()) {
-      if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-        collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), props.getProperty(propName));
-      }
-    }
-  }
-  
-  public boolean requiresTransactionLog() {
-    return this.replicaType != Replica.Type.PULL;
-  }
-  
-  public Replica.State getLastPublished() {
-    return lastPublished;
-  }
-
-  public void setLastPublished(Replica.State state) {
-    lastPublished = state;
-  }
-
-  public boolean isLeader() {
-    return isLeader;
-  }
-  
-  public void setLeader(boolean isLeader) {
-    this.isLeader = isLeader;
-  }
-  
-  public boolean hasRegistered() {
-    return hasRegistered;
-  }
-  
-  public void setHasRegistered(boolean hasRegistered) {
-    this.hasRegistered = hasRegistered;
-  }
-
-  public void setShardId(String shardId) {
-    this.shardId = shardId;
-  }
-  
-  public String getShardId() {
-    return shardId;
-  }
-  
-  public String getCollectionName() {
-    return collectionName;
-  }
-
-  public void setCollectionName(String collectionName) {
-    this.collectionName = collectionName;
-  }
-
-  public String getRoles(){
-    return roles;
-  }
-  
-  public void setRoles(String roles){
-    this.roles = roles;
-  }
-  
-  /** Optional parameters that can change how a core is created. */
-  public Map<String, String> getParams() {
-    return collectionParams;
-  }
-
-  // setting only matters on core creation
-  public Integer getNumShards() {
-    return numShards;
-  }
-  
-  public void setNumShards(int numShards) {
-    this.numShards = numShards;
-  }
-  
-  public String getCoreNodeName() {
-    return nodeName;
-  }
-
-  public void setCoreNodeName(String nodeName) {
-    this.nodeName = nodeName;
-    if(nodeName==null) cd.getPersistableStandardProperties().remove(CoreDescriptor.CORE_NODE_NAME);
-    else cd.getPersistableStandardProperties().setProperty(CoreDescriptor.CORE_NODE_NAME, nodeName);
-  }
-
-  public void reload(CloudDescriptor reloadFrom) {
-    if (reloadFrom == null) return;
-
-    setShardId(StringUtils.isEmpty(reloadFrom.getShardId()) ? getShardId() : reloadFrom.getShardId());
-    setCollectionName(StringUtils.isEmpty(reloadFrom.getCollectionName()) ? getCollectionName() : reloadFrom.getCollectionName());
-    setRoles(StringUtils.isEmpty(reloadFrom.getRoles()) ? getRoles() : reloadFrom.getRoles());
-    if (reloadFrom.getNumShards() != null) {
-      setNumShards(reloadFrom.getNumShards());
-    }
-    setCoreNodeName(StringUtils.isEmpty(reloadFrom.getCoreNodeName()) ? getCoreNodeName() : reloadFrom.getCoreNodeName());
-    setLeader(reloadFrom.isLeader);
-    setHasRegistered(reloadFrom.hasRegistered);
-    setLastPublished(reloadFrom.getLastPublished());
-
-    for (Map.Entry<String, String> ent : reloadFrom.getParams().entrySet()) {
-      collectionParams.put(ent.getKey(), ent.getValue());
-    }
-  }
-
-  public Replica.Type getReplicaType() {
-    return replicaType;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
deleted file mode 100644
index 302703b..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-public class CloudUtil {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-
-  /**
-   * See if coreNodeName has been taken over by another baseUrl and unload core
-   * + throw exception if it has been.
-   */
-  public static void checkSharedFSFailoverReplaced(CoreContainer cc, CoreDescriptor desc) {
-    if (!cc.isSharedFs(desc)) return;
-
-    ZkController zkController = cc.getZkController();
-    String thisCnn = zkController.getCoreNodeName(desc);
-    String thisBaseUrl = zkController.getBaseUrl();
-
-    log.debug("checkSharedFSFailoverReplaced running for coreNodeName={} baseUrl={}", thisCnn, thisBaseUrl);
-
-    // if we see our core node name on a different base url, unload
-    final DocCollection docCollection = zkController.getClusterState().getCollectionOrNull(desc.getCloudDescriptor().getCollectionName());
-    if (docCollection != null && docCollection.getSlicesMap() != null) {
-      Map<String,Slice> slicesMap = docCollection.getSlicesMap();
-      for (Slice slice : slicesMap.values()) {
-        for (Replica replica : slice.getReplicas()) {
-
-          String cnn = replica.getName();
-          String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
-          log.debug("compare against coreNodeName={} baseUrl={}", cnn, baseUrl);
-
-          if (thisCnn != null && thisCnn.equals(cnn)
-              && !thisBaseUrl.equals(baseUrl)) {
-            if (cc.getLoadedCoreNames().contains(desc.getName())) {
-              cc.unload(desc.getName());
-            }
-
-            try {
-              FileUtils.deleteDirectory(desc.getInstanceDir().toFile());
-            } catch (IOException e) {
-              SolrException.log(log, "Failed to delete instance dir for core:"
-                  + desc.getName() + " dir:" + desc.getInstanceDir());
-            }
-            log.error("", new SolrException(ErrorCode.SERVER_ERROR,
-                "Will not load SolrCore " + desc.getName()
-                    + " because it has been replaced due to failover."));
-            throw new SolrException(ErrorCode.SERVER_ERROR,
-                "Will not load SolrCore " + desc.getName()
-                    + " because it has been replaced due to failover.");
-          }
-        }
-      }
-    }
-  }
-
-  public static boolean replicaExists(ClusterState clusterState, String collection, String shard, String coreNodeName) {
-    DocCollection docCollection = clusterState.getCollectionOrNull(collection);
-    if (docCollection != null) {
-      Slice slice = docCollection.getSlice(shard);
-      if (slice != null) {
-        return slice.getReplica(coreNodeName) != null;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Returns a displayable unified path to the given resource. For non-solrCloud that will be the
-   * same as getConfigDir, but for Cloud it will be getConfigSetZkPath ending in a /
-   * <p>
-   * <b>Note:</b> Do not use this to generate a valid file path, but for debug printing etc
-   * @param loader Resource loader instance
-   * @return a String of path to resource
-   */
-  public static String unifiedResourcePath(SolrResourceLoader loader) {
-    return (loader instanceof ZkSolrResourceLoader) ?
-            ((ZkSolrResourceLoader) loader).getConfigSetZkPath() + "/" :
-            loader.getConfigDir() + File.separator;
-  }
-
-  /**Read the list of public keys from ZK
-   */
-
-  public static Map<String, byte[]> getTrustedKeys(SolrZkClient zk, String dir) {
-    Map<String, byte[]> result = new HashMap<>();
-    try {
-      List<String> children = zk.getChildren("/keys/" + dir, null, true);
-      for (String key : children) {
-        if (key.endsWith(".der")) result.put(key, zk.getData("/keys/" + dir +
-            "/" + key, null, null, true));
-      }
-    } catch (KeeperException.NoNodeException e) {
-      log.info("Error fetching key names");
-      return Collections.EMPTY_MAP;
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(ErrorCode.SERVER_ERROR,"Unable to read crypto keys",e );
-    } catch (KeeperException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,"Unable to read crypto keys",e );
-    }
-    return result;
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/CurrentCoreDescriptorProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CurrentCoreDescriptorProvider.java b/solr/core/src/java/org/apache/solr/cloud/CurrentCoreDescriptorProvider.java
deleted file mode 100644
index 29d0751..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CurrentCoreDescriptorProvider.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.List;
-
-import org.apache.solr.core.CoreDescriptor;
-
-/**
- * Provide the current list of registered {@link CoreDescriptor}s.
- */
-public abstract class CurrentCoreDescriptorProvider {
-  public abstract List<CoreDescriptor> getCurrentDescriptors();
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java b/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
deleted file mode 100644
index c9f12e9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkCmdExecutor;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NodeExistsException;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * A distributed map.
- * This supports basic map functions e.g. get, put, contains for interaction with zk which
- * don't have to be ordered i.e. DistributedQueue.
- */
-public class DistributedMap {
-  protected final String dir;
-
-  protected SolrZkClient zookeeper;
-
-  protected static final String PREFIX = "mn-";
-
-  public DistributedMap(SolrZkClient zookeeper, String dir) {
-    this.dir = dir;
-
-    ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zookeeper.getZkClientTimeout());
-    try {
-      cmdExecutor.ensureExists(dir, zookeeper);
-    } catch (KeeperException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    }
-
-    this.zookeeper = zookeeper;
-  }
-
-
-  public void put(String trackingId, byte[] data) throws KeeperException, InterruptedException {
-    zookeeper.makePath(dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, false, true);
-  }
-  
-  /**
-   * Puts an element in the map only if there isn't one with the same trackingId already
-   * @return True if the the element was added. False if it wasn't (because the key already exists)
-   */
-  public boolean putIfAbsent(String trackingId, byte[] data) throws KeeperException, InterruptedException {
-    try {
-      zookeeper.makePath(dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, true, true);
-      return true;
-    } catch (NodeExistsException e) {
-      return false;
-    }
-  }
-
-  public byte[] get(String trackingId) throws KeeperException, InterruptedException {
-    return zookeeper.getData(dir + "/" + PREFIX + trackingId, null, null, true);
-  }
-
-  public boolean contains(String trackingId) throws KeeperException, InterruptedException {
-    return zookeeper.exists(dir + "/" + PREFIX + trackingId, true);
-  }
-
-  public int size() throws KeeperException, InterruptedException {
-    Stat stat = new Stat();
-    zookeeper.getData(dir, null, stat, true);
-    return stat.getNumChildren();
-  }
-
-  /**
-   * return true if the znode was successfully deleted
-   *        false if the node didn't exist and therefore not deleted
-   *        exception an exception occurred while deleting
-   */
-  public boolean remove(String trackingId) throws KeeperException, InterruptedException {
-    try {
-      zookeeper.delete(dir + "/" + PREFIX + trackingId, -1, true);
-    } catch (KeeperException.NoNodeException e) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Helper method to clear all child nodes for a parent node.
-   */
-  public void clear() throws KeeperException, InterruptedException {
-    List<String> childNames = zookeeper.getChildren(dir, null, true);
-    for(String childName: childNames) {
-      zookeeper.delete(dir + "/" + childName, -1, true);
-    }
-
-  }
-  
-  /**
-   * Returns the keys of all the elements in the map
-   */
-  public Collection<String> keys() throws KeeperException, InterruptedException {
-    List<String> childs = zookeeper.getChildren(dir, null, true);
-    final List<String> ids = new ArrayList<>(childs.size());
-    childs.stream().forEach((child) -> ids.add(child.substring(PREFIX.length())));
-    return ids;
-
-  }
-
-}


[31/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
deleted file mode 100644
index 83384fb..0000000
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ /dev/null
@@ -1,1874 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import org.apache.http.auth.AuthSchemeProvider;
-import org.apache.http.client.CredentialsProvider;
-import org.apache.http.config.Lookup;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
-import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder;
-import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.AuthSchemeRegistryProvider;
-import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.CredentialsProviderProvider;
-import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.cloud.autoscaling.AutoScalingHandler;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.SnapShooter;
-import org.apache.solr.handler.admin.AutoscalingHistoryHandler;
-import org.apache.solr.handler.admin.CollectionsHandler;
-import org.apache.solr.handler.admin.ConfigSetsHandler;
-import org.apache.solr.handler.admin.CoreAdminHandler;
-import org.apache.solr.handler.admin.HealthCheckHandler;
-import org.apache.solr.handler.admin.InfoHandler;
-import org.apache.solr.handler.admin.MetricsCollectorHandler;
-import org.apache.solr.handler.admin.MetricsHandler;
-import org.apache.solr.handler.admin.MetricsHistoryHandler;
-import org.apache.solr.handler.admin.SecurityConfHandler;
-import org.apache.solr.handler.admin.SecurityConfHandlerLocal;
-import org.apache.solr.handler.admin.SecurityConfHandlerZk;
-import org.apache.solr.handler.admin.ZookeeperInfoHandler;
-import org.apache.solr.handler.admin.ZookeeperStatusHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.logging.LogWatcher;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.metrics.SolrCoreMetricManager;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.search.SolrFieldCacheBean;
-import org.apache.solr.security.AuthenticationPlugin;
-import org.apache.solr.security.AuthorizationPlugin;
-import org.apache.solr.security.HttpClientBuilderPlugin;
-import org.apache.solr.security.PKIAuthenticationPlugin;
-import org.apache.solr.security.PublicKeyHandler;
-import org.apache.solr.security.SecurityPluginHolder;
-import org.apache.solr.update.SolrCoreState;
-import org.apache.solr.update.UpdateShardHandler;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.OrderedExecutor;
-import org.apache.solr.util.stats.MetricUtils;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Objects.requireNonNull;
-import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
-import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
-import static org.apache.solr.common.params.CommonParams.AUTOSCALING_HISTORY_PATH;
-import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.HEALTH_CHECK_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.INFO_HANDLER_PATH;
-import static org.apache.solr.common.params.CommonParams.METRICS_HISTORY_PATH;
-import static org.apache.solr.common.params.CommonParams.METRICS_PATH;
-import static org.apache.solr.common.params.CommonParams.ZK_PATH;
-import static org.apache.solr.common.params.CommonParams.ZK_STATUS_PATH;
-import static org.apache.solr.core.CorePropertiesLocator.PROPERTIES_FILENAME;
-import static org.apache.solr.security.AuthenticationPlugin.AUTHENTICATION_PLUGIN_PROP;
-
-/**
- *
- * @since solr 1.3
- */
-public class CoreContainer {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  final SolrCores solrCores = new SolrCores(this);
-
-  public static class CoreLoadFailure {
-
-    public final CoreDescriptor cd;
-    public final Exception exception;
-
-    public CoreLoadFailure(CoreDescriptor cd, Exception loadFailure) {
-      this.cd = new CoreDescriptor(cd.getName(), cd);
-      this.exception = loadFailure;
-    }
-  }
-
-  protected final Map<String, CoreLoadFailure> coreInitFailures = new ConcurrentHashMap<>();
-
-  protected CoreAdminHandler coreAdminHandler = null;
-  protected CollectionsHandler collectionsHandler = null;
-  protected HealthCheckHandler healthCheckHandler = null;
-
-  private InfoHandler infoHandler;
-  protected ConfigSetsHandler configSetsHandler = null;
-
-  private PKIAuthenticationPlugin pkiAuthenticationPlugin;
-
-  protected Properties containerProperties;
-
-  private ConfigSetService coreConfigService;
-
-  protected ZkContainer zkSys = new ZkContainer();
-  protected ShardHandlerFactory shardHandlerFactory;
-
-  private UpdateShardHandler updateShardHandler;
-
-  private ExecutorService coreContainerWorkExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(
-      new DefaultSolrThreadFactory("coreContainerWorkExecutor") );
-
-  private final OrderedExecutor replayUpdatesExecutor;
-
-  protected LogWatcher logging = null;
-
-  private CloserThread backgroundCloser = null;
-  protected final NodeConfig cfg;
-  protected final SolrResourceLoader loader;
-
-  protected final String solrHome;
-
-  protected final CoresLocator coresLocator;
-
-  private String hostName;
-
-  private final BlobRepository blobRepository = new BlobRepository(this);
-
-  private PluginBag<SolrRequestHandler> containerHandlers = new PluginBag<>(SolrRequestHandler.class, null);
-
-  private boolean asyncSolrCoreLoad;
-
-  protected SecurityConfHandler securityConfHandler;
-
-  private SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin;
-
-  private SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin;
-
-  private BackupRepositoryFactory backupRepoFactory;
-
-  protected SolrMetricManager metricManager;
-
-  protected String metricTag = Integer.toHexString(hashCode());
-
-  protected MetricsHandler metricsHandler;
-
-  protected MetricsHistoryHandler metricsHistoryHandler;
-
-  protected MetricsCollectorHandler metricsCollectorHandler;
-
-  protected AutoscalingHistoryHandler autoscalingHistoryHandler;
-
-
-  // Bits for the state variable.
-  public final static long LOAD_COMPLETE = 0x1L;
-  public final static long CORE_DISCOVERY_COMPLETE = 0x2L;
-  public final static long INITIAL_CORE_LOAD_COMPLETE = 0x4L;
-  private volatile long status = 0L;
-
-  protected AutoScalingHandler autoScalingHandler;
-
-  private enum CoreInitFailedAction { fromleader, none }
-
-  /**
-   * This method instantiates a new instance of {@linkplain BackupRepository}.
-   *
-   * @param repositoryName The name of the backup repository (Optional).
-   *                       If not specified, a default implementation is used.
-   * @return a new instance of {@linkplain BackupRepository}.
-   */
-  public BackupRepository newBackupRepository(Optional<String> repositoryName) {
-    BackupRepository repository;
-    if (repositoryName.isPresent()) {
-      repository = backupRepoFactory.newInstance(getResourceLoader(), repositoryName.get());
-    } else {
-      repository = backupRepoFactory.newInstance(getResourceLoader());
-    }
-    return repository;
-  }
-
-  public ExecutorService getCoreZkRegisterExecutorService() {
-    return zkSys.getCoreZkRegisterExecutorService();
-  }
-
-  public SolrRequestHandler getRequestHandler(String path) {
-    return RequestHandlerBase.getRequestHandler(path, containerHandlers);
-  }
-
-  public PluginBag<SolrRequestHandler> getRequestHandlers() {
-    return this.containerHandlers;
-  }
-
-  {
-    log.debug("New CoreContainer " + System.identityHashCode(this));
-  }
-
-  /**
-   * Create a new CoreContainer using system properties to detect the solr home
-   * directory.  The container's cores are not loaded.
-   * @see #load()
-   */
-  public CoreContainer() {
-    this(new SolrResourceLoader(SolrResourceLoader.locateSolrHome()));
-  }
-
-  /**
-   * Create a new CoreContainer using the given SolrResourceLoader.  The container's
-   * cores are not loaded.
-   * @param loader the SolrResourceLoader
-   * @see #load()
-   */
-  public CoreContainer(SolrResourceLoader loader) {
-    this(SolrXmlConfig.fromSolrHome(loader, loader.getInstancePath()));
-  }
-
-  /**
-   * Create a new CoreContainer using the given solr home directory.  The container's
-   * cores are not loaded.
-   * @param solrHome a String containing the path to the solr home directory
-   * @see #load()
-   */
-  public CoreContainer(String solrHome) {
-    this(new SolrResourceLoader(Paths.get(solrHome)));
-  }
-
-  /**
-   * Create a new CoreContainer using the given SolrResourceLoader,
-   * configuration and CoresLocator.  The container's cores are
-   * not loaded.
-   * @param config a ConfigSolr representation of this container's configuration
-   * @see #load()
-   */
-  public CoreContainer(NodeConfig config) {
-    this(config, new Properties());
-  }
-
-  public CoreContainer(NodeConfig config, Properties properties) {
-    this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()));
-  }
-
-  public CoreContainer(NodeConfig config, Properties properties, boolean asyncSolrCoreLoad) {
-    this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()), asyncSolrCoreLoad);
-  }
-
-  public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator) {
-    this(config, properties, locator, false);
-  }
-
-  public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator, boolean asyncSolrCoreLoad) {
-    this.loader = config.getSolrResourceLoader();
-    this.solrHome = loader.getInstancePath().toString();
-    containerHandlers.put(PublicKeyHandler.PATH, new PublicKeyHandler());
-    this.cfg = requireNonNull(config);
-    this.coresLocator = locator;
-    this.containerProperties = new Properties(properties);
-    this.asyncSolrCoreLoad = asyncSolrCoreLoad;
-    this.replayUpdatesExecutor = new OrderedExecutor(
-        cfg.getReplayUpdatesThreads(),
-        ExecutorUtil.newMDCAwareCachedThreadPool(
-            cfg.getReplayUpdatesThreads(),
-            new DefaultSolrThreadFactory("replayUpdatesExecutor")));
-  }
-
-  private synchronized void initializeAuthorizationPlugin(Map<String, Object> authorizationConf) {
-    authorizationConf = Utils.getDeepCopy(authorizationConf, 4);
-    //Initialize the Authorization module
-    SecurityPluginHolder<AuthorizationPlugin> old = authorizationPlugin;
-    SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin = null;
-    if (authorizationConf != null) {
-      String klas = (String) authorizationConf.get("class");
-      if (klas == null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for authorization plugin");
-      }
-      if (old != null && old.getZnodeVersion() == readVersion(authorizationConf)) {
-        return;
-      }
-      log.info("Initializing authorization plugin: " + klas);
-      authorizationPlugin = new SecurityPluginHolder<>(readVersion(authorizationConf),
-          getResourceLoader().newInstance(klas, AuthorizationPlugin.class));
-
-      // Read and pass the authorization context to the plugin
-      authorizationPlugin.plugin.init(authorizationConf);
-    } else {
-      log.debug("Security conf doesn't exist. Skipping setup for authorization module.");
-    }
-    this.authorizationPlugin = authorizationPlugin;
-    if (old != null) {
-      try {
-        old.plugin.close();
-      } catch (Exception e) {
-      }
-    }
-  }
-
-  private synchronized void initializeAuthenticationPlugin(Map<String, Object> authenticationConfig) {
-    authenticationConfig = Utils.getDeepCopy(authenticationConfig, 4);
-    String pluginClassName = null;
-    if (authenticationConfig != null) {
-      if (authenticationConfig.containsKey("class")) {
-        pluginClassName = String.valueOf(authenticationConfig.get("class"));
-      } else {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "No 'class' specified for authentication in ZK.");
-      }
-    }
-
-    if (pluginClassName != null) {
-      log.debug("Authentication plugin class obtained from security.json: "+pluginClassName);
-    } else if (System.getProperty(AUTHENTICATION_PLUGIN_PROP) != null) {
-      pluginClassName = System.getProperty(AUTHENTICATION_PLUGIN_PROP);
-      log.debug("Authentication plugin class obtained from system property '" +
-          AUTHENTICATION_PLUGIN_PROP + "': " + pluginClassName);
-    } else {
-      log.debug("No authentication plugin used.");
-    }
-    SecurityPluginHolder<AuthenticationPlugin> old = authenticationPlugin;
-    SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin = null;
-
-    // Initialize the plugin
-    if (pluginClassName != null) {
-      log.info("Initializing authentication plugin: " + pluginClassName);
-      authenticationPlugin = new SecurityPluginHolder<>(readVersion(authenticationConfig),
-          getResourceLoader().newInstance(pluginClassName,
-              AuthenticationPlugin.class,
-              null,
-              new Class[]{CoreContainer.class},
-              new Object[]{this}));
-    }
-    if (authenticationPlugin != null) {
-      authenticationPlugin.plugin.init(authenticationConfig);
-      setupHttpClientForAuthPlugin(authenticationPlugin.plugin);
-    }
-    this.authenticationPlugin = authenticationPlugin;
-    try {
-      if (old != null) old.plugin.close();
-    } catch (Exception e) {/*do nothing*/ }
-
-  }
-
-  private void setupHttpClientForAuthPlugin(Object authcPlugin) {
-    if (authcPlugin instanceof HttpClientBuilderPlugin) {
-      // Setup HttpClient for internode communication
-      SolrHttpClientBuilder builder = ((HttpClientBuilderPlugin) authcPlugin).getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
-      
-      // The default http client of the core container's shardHandlerFactory has already been created and
-      // configured using the default httpclient configurer. We need to reconfigure it using the plugin's
-      // http client configurer to set it up for internode communication.
-      log.debug("Reconfiguring HttpClient settings.");
-
-      SolrHttpClientContextBuilder httpClientBuilder = new SolrHttpClientContextBuilder();
-      if (builder.getCredentialsProviderProvider() != null) {
-        httpClientBuilder.setDefaultCredentialsProvider(new CredentialsProviderProvider() {
-          
-          @Override
-          public CredentialsProvider getCredentialsProvider() {
-            return builder.getCredentialsProviderProvider().getCredentialsProvider();
-          }
-        });
-      }
-      if (builder.getAuthSchemeRegistryProvider() != null) {
-        httpClientBuilder.setAuthSchemeRegistryProvider(new AuthSchemeRegistryProvider() {
-
-          @Override
-          public Lookup<AuthSchemeProvider> getAuthSchemeRegistry() {
-            return builder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry();
-          }
-        });
-      }
-
-      HttpClientUtil.setHttpClientRequestContextBuilder(httpClientBuilder);
-
-    } else {
-      if (pkiAuthenticationPlugin != null) {
-        //this happened due to an authc plugin reload. no need to register the pkiAuthc plugin again
-        if(pkiAuthenticationPlugin.isInterceptorRegistered()) return;
-        log.info("PKIAuthenticationPlugin is managing internode requests");
-        setupHttpClientForAuthPlugin(pkiAuthenticationPlugin);
-        pkiAuthenticationPlugin.setInterceptorRegistered();
-      }
-    }
-  }
-
-  private static int readVersion(Map<String, Object> conf) {
-    if (conf == null) return -1;
-    Map meta = (Map) conf.get("");
-    if (meta == null) return -1;
-    Number v = (Number) meta.get("v");
-    return v == null ? -1 : v.intValue();
-  }
-
-  /**
-   * This method allows subclasses to construct a CoreContainer
-   * without any default init behavior.
-   *
-   * @param testConstructor pass (Object)null.
-   * @lucene.experimental
-   */
-  protected CoreContainer(Object testConstructor) {
-    solrHome = null;
-    loader = null;
-    coresLocator = null;
-    cfg = null;
-    containerProperties = null;
-    replayUpdatesExecutor = null;
-  }
-
-  public static CoreContainer createAndLoad(Path solrHome) {
-    return createAndLoad(solrHome, solrHome.resolve(SolrXmlConfig.SOLR_XML_FILE));
-  }
-
-  /**
-   * Create a new CoreContainer and load its cores
-   * @param solrHome the solr home directory
-   * @param configFile the file containing this container's configuration
-   * @return a loaded CoreContainer
-   */
-  public static CoreContainer createAndLoad(Path solrHome, Path configFile) {
-    SolrResourceLoader loader = new SolrResourceLoader(solrHome);
-    CoreContainer cc = new CoreContainer(SolrXmlConfig.fromFile(loader, configFile));
-    try {
-      cc.load();
-    } catch (Exception e) {
-      cc.shutdown();
-      throw e;
-    }
-    return cc;
-  }
-
-  public Properties getContainerProperties() {
-    return containerProperties;
-  }
-
-  public PKIAuthenticationPlugin getPkiAuthenticationPlugin() {
-    return pkiAuthenticationPlugin;
-  }
-
-  public SolrMetricManager getMetricManager() {
-    return metricManager;
-  }
-
-  public MetricsHandler getMetricsHandler() {
-    return metricsHandler;
-  }
-
-  public MetricsHistoryHandler getMetricsHistoryHandler() {
-    return metricsHistoryHandler;
-  }
-
-  public OrderedExecutor getReplayUpdatesExecutor() {
-    return replayUpdatesExecutor;
-  }
-
-  //-------------------------------------------------------------------
-  // Initialization / Cleanup
-  //-------------------------------------------------------------------
-
-  /**
-   * Load the cores defined for this CoreContainer
-   */
-  public void load()  {
-    log.debug("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath());
-
-    // add the sharedLib to the shared resource loader before initializing cfg based plugins
-    String libDir = cfg.getSharedLibDirectory();
-    if (libDir != null) {
-      Path libPath = loader.getInstancePath().resolve(libDir);
-      try {
-        loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
-        loader.reloadLuceneSPI();
-      } catch (IOException e) {
-        if (!libDir.equals("lib")) { // Don't complain if default "lib" dir does not exist
-          log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
-        }
-      }
-    }
-
-    metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
-
-    coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(
-        coreContainerWorkExecutor, null,
-        metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
-        SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
-
-    shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
-    if (shardHandlerFactory instanceof SolrMetricProducer) {
-      SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
-      metricProducer.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, "httpShardHandler");
-    }
-
-    updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
-    updateShardHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, "updateShardHandler");
-
-    solrCores.load(loader);
-
-
-    logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
-
-    hostName = cfg.getNodeName();
-
-    zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
-    if(isZooKeeperAware())  pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName(),
-        (PublicKeyHandler) containerHandlers.get(PublicKeyHandler.PATH));
-
-    MDCLoggingContext.setNode(this);
-
-    securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
-    reloadSecurityProperties();
-    this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
-
-    createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
-    createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
-    collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
-    healthCheckHandler = createHandler(HEALTH_CHECK_HANDLER_PATH, cfg.getHealthCheckHandlerClass(), HealthCheckHandler.class);
-    infoHandler        = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
-    coreAdminHandler   = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
-    configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
-
-    // metricsHistoryHandler uses metricsHandler, so create it first
-    metricsHandler = new MetricsHandler(this);
-    containerHandlers.put(METRICS_PATH, metricsHandler);
-    metricsHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_PATH);
-
-    createMetricsHistoryHandler();
-
-    autoscalingHistoryHandler = createHandler(AUTOSCALING_HISTORY_PATH, AutoscalingHistoryHandler.class.getName(), AutoscalingHistoryHandler.class);
-    metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
-    // may want to add some configuration here in the future
-    metricsCollectorHandler.init(null);
-
-    containerHandlers.put(AUTHZ_PATH, securityConfHandler);
-    securityConfHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, AUTHZ_PATH);
-    containerHandlers.put(AUTHC_PATH, securityConfHandler);
-
-
-    PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
-    metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.node);
-    metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jvm);
-    metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jetty);
-
-    coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
-
-    containerProperties.putAll(cfg.getSolrProperties());
-
-    // initialize gauges for reporting the number of cores and disk total/free
-
-    String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
-    String metricTag = Integer.toHexString(hashCode());
-    metricManager.registerGauge(null, registryName, () -> solrCores.getCores().size(),
-        metricTag,true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
-    metricManager.registerGauge(null, registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(),
-        metricTag,true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
-    metricManager.registerGauge(null, registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(),
-        metricTag,true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
-    Path dataHome = cfg.getSolrDataHome() != null ? cfg.getSolrDataHome() : cfg.getCoreRootDirectory();
-    metricManager.registerGauge(null, registryName, () -> dataHome.toFile().getTotalSpace(),
-        metricTag,true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
-    metricManager.registerGauge(null, registryName, () -> dataHome.toFile().getUsableSpace(),
-        metricTag,true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
-    metricManager.registerGauge(null, registryName, () -> dataHome.toAbsolutePath().toString(),
-        metricTag,true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs");
-    metricManager.registerGauge(null, registryName, () -> {
-          try {
-            return org.apache.lucene.util.IOUtils.spins(dataHome.toAbsolutePath());
-          } catch (IOException e) {
-            // default to spinning
-            return true;
-          }
-        },
-        metricTag,true, "spins", SolrInfoBean.Category.CONTAINER.toString(), "fs");
-    metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(),
-        metricTag,true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
-    metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(),
-        metricTag,true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
-    metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toAbsolutePath().toString(),
-        metricTag,true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
-    metricManager.registerGauge(null, registryName, () -> {
-          try {
-            return org.apache.lucene.util.IOUtils.spins(cfg.getCoreRootDirectory().toAbsolutePath());
-          } catch (IOException e) {
-            // default to spinning
-            return true;
-          }
-        },
-        metricTag,true, "spins", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
-    // add version information
-    metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getSpecificationVersion(),
-        metricTag,true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
-    metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getImplementationVersion(),
-        metricTag,true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
-
-    SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
-    fieldCacheBean.initializeMetrics(metricManager, registryName, metricTag, null);
-
-    if (isZooKeeperAware()) {
-      metricManager.loadClusterReporters(metricReporters, this);
-    }
-
-    // setup executor to load cores in parallel
-    ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(
-        ExecutorUtil.newMDCAwareFixedThreadPool(
-            cfg.getCoreLoadThreadCount(isZooKeeperAware()),
-            new DefaultSolrThreadFactory("coreLoadExecutor")), null,
-        metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
-        SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
-    final List<Future<SolrCore>> futures = new ArrayList<>();
-    try {
-      List<CoreDescriptor> cds = coresLocator.discover(this);
-      if (isZooKeeperAware()) {
-        //sort the cores if it is in SolrCloud. In standalone node the order does not matter
-        CoreSorter coreComparator = new CoreSorter().init(this);
-        cds = new ArrayList<>(cds);//make a copy
-        Collections.sort(cds, coreComparator::compare);
-      }
-      checkForDuplicateCoreNames(cds);
-      status |= CORE_DISCOVERY_COMPLETE;
-
-      for (final CoreDescriptor cd : cds) {
-        if (cd.isTransient() || !cd.isLoadOnStartup()) {
-          solrCores.addCoreDescriptor(cd);
-        } else if (asyncSolrCoreLoad) {
-          solrCores.markCoreAsLoading(cd);
-        }
-        if (cd.isLoadOnStartup()) {
-          futures.add(coreLoadExecutor.submit(() -> {
-            SolrCore core;
-            try {
-              if (zkSys.getZkController() != null) {
-                zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
-              }
-              solrCores.waitAddPendingCoreOps(cd.getName());
-              core = createFromDescriptor(cd, false, false);
-            } finally {
-              solrCores.removeFromPendingOps(cd.getName());
-              if (asyncSolrCoreLoad) {
-                solrCores.markCoreAsNotLoading(cd);
-              }
-            }
-            try {
-              zkSys.registerInZk(core, true, false);
-            } catch (RuntimeException e) {
-              SolrException.log(log, "Error registering SolrCore", e);
-            }
-            return core;
-          }));
-        }
-      }
-
-
-      // Start the background thread
-      backgroundCloser = new CloserThread(this, solrCores, cfg);
-      backgroundCloser.start();
-
-    } finally {
-      if (asyncSolrCoreLoad && futures != null) {
-
-        coreContainerWorkExecutor.submit(() -> {
-          try {
-            for (Future<SolrCore> future : futures) {
-              try {
-                future.get();
-              } catch (InterruptedException e) {
-                Thread.currentThread().interrupt();
-              } catch (ExecutionException e) {
-                log.error("Error waiting for SolrCore to be loaded on startup", e.getCause());
-              }
-            }
-          } finally {
-            ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
-          }
-        });
-      } else {
-        ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
-      }
-    }
-
-    if (isZooKeeperAware()) {
-      zkSys.getZkController().checkOverseerDesignate();
-      // initialize this handler here when SolrCloudManager is ready
-      autoScalingHandler = new AutoScalingHandler(getZkController().getSolrCloudManager(), loader);
-      containerHandlers.put(AutoScalingHandler.HANDLER_PATH, autoScalingHandler);
-      autoScalingHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, AutoScalingHandler.HANDLER_PATH);
-    }
-    // This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
-    status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
-  }
-
-  // MetricsHistoryHandler supports both cloud and standalone configs
-  private void createMetricsHistoryHandler() {
-    PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
-    Map<String, Object> initArgs;
-    if (plugin != null && plugin.initArgs != null) {
-      initArgs = plugin.initArgs.asMap(5);
-      initArgs.put(MetricsHistoryHandler.ENABLE_PROP, plugin.isEnabled());
-    } else {
-      initArgs = new HashMap<>();
-    }
-    String name;
-    SolrCloudManager cloudManager;
-    SolrClient client;
-    if (isZooKeeperAware()) {
-      name = getZkController().getNodeName();
-      cloudManager = getZkController().getSolrCloudManager();
-      client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
-          .withHttpClient(updateShardHandler.getDefaultHttpClient()).build();
-    } else {
-      name = getNodeConfig().getNodeName();
-      if (name == null || name.isEmpty()) {
-        name = "localhost";
-      }
-      cloudManager = null;
-      client = new EmbeddedSolrServer(this, CollectionAdminParams.SYSTEM_COLL) {
-        @Override
-        public void close() throws IOException {
-          // do nothing - we close the container ourselves
-        }
-      };
-      // enable local metrics unless specifically set otherwise
-      if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_NODES_PROP)) {
-        initArgs.put(MetricsHistoryHandler.ENABLE_NODES_PROP, true);
-      }
-      if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_REPLICAS_PROP)) {
-        initArgs.put(MetricsHistoryHandler.ENABLE_REPLICAS_PROP, true);
-      }
-    }
-    metricsHistoryHandler = new MetricsHistoryHandler(name, metricsHandler,
-        client, cloudManager, initArgs);
-    containerHandlers.put(METRICS_HISTORY_PATH, metricsHistoryHandler);
-    metricsHistoryHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_HISTORY_PATH);
-  }
-
-  public void securityNodeChanged() {
-    log.info("Security node changed, reloading security.json");
-    reloadSecurityProperties();
-  }
-
-  /**
-   * Make sure securityConfHandler is initialized
-   */
-  private void reloadSecurityProperties() {
-    SecurityConfHandler.SecurityConfig securityConfig = securityConfHandler.getSecurityConfig(false);
-    initializeAuthorizationPlugin((Map<String, Object>) securityConfig.getData().get("authorization"));
-    initializeAuthenticationPlugin((Map<String, Object>) securityConfig.getData().get("authentication"));
-  }
-
-  private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) {
-    Map<String, Path> addedCores = Maps.newHashMap();
-    for (CoreDescriptor cd : cds) {
-      final String name = cd.getName();
-      if (addedCores.containsKey(name))
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            String.format(Locale.ROOT, "Found multiple cores with the name [%s], with instancedirs [%s] and [%s]",
-                name, addedCores.get(name), cd.getInstanceDir()));
-      addedCores.put(name, cd.getInstanceDir());
-    }
-  }
-
-  private volatile boolean isShutDown = false;
-
-  public boolean isShutDown() {
-    return isShutDown;
-  }
-
-  /**
-   * Stops all cores.
-   */
-  public void shutdown() {
-    log.info("Shutting down CoreContainer instance="
-        + System.identityHashCode(this));
-
-    isShutDown = true;
-
-    ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);
-    replayUpdatesExecutor.shutdownAndAwaitTermination();
-
-    if (metricsHistoryHandler != null) {
-      IOUtils.closeQuietly(metricsHistoryHandler.getSolrClient());
-      metricsHistoryHandler.close();
-    }
-
-    if (metricManager != null) {
-      metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node));
-      metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm));
-      metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty));
-
-      metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node), metricTag);
-      metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm), metricTag);
-      metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty), metricTag);
-    }
-
-    if (isZooKeeperAware()) {
-      cancelCoreRecoveries();
-      zkSys.zkController.publishNodeAsDown(zkSys.zkController.getNodeName());
-      try {
-        zkSys.zkController.removeEphemeralLiveNode();
-      } catch (Exception e) {
-        log.warn("Error removing live node. Continuing to close CoreContainer", e);
-      }
-      if (metricManager != null) {
-        metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.cluster));
-      }
-    }
-
-    try {
-      if (coreAdminHandler != null) coreAdminHandler.shutdown();
-    } catch (Exception e) {
-      log.warn("Error shutting down CoreAdminHandler. Continuing to close CoreContainer.", e);
-    }
-
-    try {
-      // First wake up the closer thread, it'll terminate almost immediately since it checks isShutDown.
-      synchronized (solrCores.getModifyLock()) {
-        solrCores.getModifyLock().notifyAll(); // wake up anyone waiting
-      }
-      if (backgroundCloser != null) { // Doesn't seem right, but tests get in here without initializing the core.
-        try {
-          while (true) {
-            backgroundCloser.join(15000);
-            if (backgroundCloser.isAlive()) {
-              synchronized (solrCores.getModifyLock()) {
-                solrCores.getModifyLock().notifyAll(); // there is a race we have to protect against
-              }
-            } else {
-              break;
-            }
-          }
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          if (log.isDebugEnabled()) {
-            log.debug("backgroundCloser thread was interrupted before finishing");
-          }
-        }
-      }
-      // Now clear all the cores that are being operated upon.
-      solrCores.close();
-
-      // It's still possible that one of the pending dynamic load operation is waiting, so wake it up if so.
-      // Since all the pending operations queues have been drained, there should be nothing to do.
-      synchronized (solrCores.getModifyLock()) {
-        solrCores.getModifyLock().notifyAll(); // wake up the thread
-      }
-
-    } finally {
-      try {
-        if (shardHandlerFactory != null) {
-          shardHandlerFactory.close();
-        }
-      } finally {
-        try {
-          if (updateShardHandler != null) {
-            updateShardHandler.close();
-          }
-        } finally {
-          // we want to close zk stuff last
-          zkSys.close();
-        }
-      }
-    }
-
-    // It should be safe to close the authorization plugin at this point.
-    try {
-      if(authorizationPlugin != null) {
-        authorizationPlugin.plugin.close();
-      }
-    } catch (IOException e) {
-      log.warn("Exception while closing authorization plugin.", e);
-    }
-
-    // It should be safe to close the authentication plugin at this point.
-    try {
-      if(authenticationPlugin != null) {
-        authenticationPlugin.plugin.close();
-        authenticationPlugin = null;
-      }
-    } catch (Exception e) {
-      log.warn("Exception while closing authentication plugin.", e);
-    }
-
-    org.apache.lucene.util.IOUtils.closeWhileHandlingException(loader); // best effort
-  }
-
-  public void cancelCoreRecoveries() {
-
-    List<SolrCore> cores = solrCores.getCores();
-
-    // we must cancel without holding the cores sync
-    // make sure we wait for any recoveries to stop
-    for (SolrCore core : cores) {
-      try {
-        core.getSolrCoreState().cancelRecovery();
-      } catch (Exception e) {
-        SolrException.log(log, "Error canceling recovery for core", e);
-      }
-    }
-  }
-
-  @Override
-  protected void finalize() throws Throwable {
-    try {
-      if(!isShutDown){
-        log.error("CoreContainer was not close prior to finalize(), indicates a bug -- POSSIBLE RESOURCE LEAK!!!  instance=" + System.identityHashCode(this));
-      }
-    } finally {
-      super.finalize();
-    }
-  }
-
-  public CoresLocator getCoresLocator() {
-    return coresLocator;
-  }
-
-  protected SolrCore registerCore(CoreDescriptor cd, SolrCore core, boolean registerInZk, boolean skipRecovery) {
-    if( core == null ) {
-      throw new RuntimeException( "Can not register a null core." );
-    }
-
-    if (isShutDown) {
-      core.close();
-      throw new IllegalStateException("This CoreContainer has been closed");
-    }
-    SolrCore old = solrCores.putCore(cd, core);
-      /*
-      * set both the name of the descriptor and the name of the
-      * core, since the descriptors name is used for persisting.
-      */
-
-    core.setName(cd.getName());
-
-    coreInitFailures.remove(cd.getName());
-
-    if( old == null || old == core) {
-      log.debug( "registering core: " + cd.getName() );
-      if (registerInZk) {
-        zkSys.registerInZk(core, false, skipRecovery);
-      }
-      return null;
-    }
-    else {
-      log.debug( "replacing core: " + cd.getName() );
-      old.close();
-      if (registerInZk) {
-        zkSys.registerInZk(core, false, skipRecovery);
-      }
-      return old;
-    }
-  }
-
-  /**
-   * Creates a new core, publishing the core state to the cluster
-   * @param coreName the core name
-   * @param parameters the core parameters
-   * @return the newly created core
-   */
-  public SolrCore create(String coreName, Map<String, String> parameters) {
-    return create(coreName, cfg.getCoreRootDirectory().resolve(coreName), parameters, false);
-  }
-
-  /**
-   * Creates a new core in a specified instance directory, publishing the core state to the cluster
-   * @param coreName the core name
-   * @param instancePath the instance directory
-   * @param parameters the core parameters
-   * @return the newly created core
-   */
-  public SolrCore create(String coreName, Path instancePath, Map<String, String> parameters, boolean newCollection) {
-
-    CoreDescriptor cd = new CoreDescriptor(coreName, instancePath, parameters, getContainerProperties(), isZooKeeperAware());
-
-    // TODO: There's a race here, isn't there?
-    // Since the core descriptor is removed when a core is unloaded, it should never be anywhere when a core is created.
-    if (getAllCoreNames().contains(coreName)) {
-      log.warn("Creating a core with existing name is not allowed");
-      // TODO: Shouldn't this be a BAD_REQUEST?
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists.");
-    }
-
-    boolean preExisitingZkEntry = false;
-    try {
-      if (getZkController() != null) {
-        if (!Overseer.isLegacy(getZkController().getZkStateReader())) {
-          if (cd.getCloudDescriptor().getCoreNodeName() == null) {
-            throw new SolrException(ErrorCode.SERVER_ERROR, "non legacy mode coreNodeName missing " + parameters.toString());
-
-          }
-        }
-        preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
-      }
-
-      // Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
-      // first and clean it up if there's an error.
-      coresLocator.create(this, cd);
-
-      SolrCore core = null;
-      try {
-        solrCores.waitAddPendingCoreOps(cd.getName());
-        core = createFromDescriptor(cd, true, newCollection);
-        coresLocator.persist(this, cd); // Write out the current core properties in case anything changed when the core was created
-      } finally {
-        solrCores.removeFromPendingOps(cd.getName());
-      }
-
-      return core;
-    } catch (Exception ex) {
-      // First clean up any core descriptor, there should never be an existing core.properties file for any core that 
-      // failed to be created on-the-fly. 
-      coresLocator.delete(this, cd);
-      if (isZooKeeperAware() && !preExisitingZkEntry) {
-        try {
-          getZkController().unregister(coreName, cd);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          SolrException.log(log, null, e);
-        } catch (KeeperException e) {
-          SolrException.log(log, null, e);
-        } catch (Exception e) {
-          SolrException.log(log, null, e);
-        }
-      }
-
-      Throwable tc = ex;
-      Throwable c = null;
-      do {
-        tc = tc.getCause();
-        if (tc != null) {
-          c = tc;
-        }
-      } while (tc != null);
-
-      String rootMsg = "";
-      if (c != null) {
-        rootMsg = " Caused by: " + c.getMessage();
-      }
-
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Error CREATEing SolrCore '" + coreName + "': " + ex.getMessage() + rootMsg, ex);
-    }
-  }
-
-  /**
-   * Creates a new core based on a CoreDescriptor.
-   *
-   * @param dcore        a core descriptor
-   * @param publishState publish core state to the cluster if true
-   *
-   * WARNING: Any call to this method should be surrounded by a try/finally block
-   *          that calls solrCores.waitAddPendingCoreOps(...) and solrCores.removeFromPendingOps(...)
-   *
-   *  <pre>
-   *   <code>
-   *   try {
-   *      solrCores.waitAddPendingCoreOps(dcore.getName());
-   *      createFromDescriptor(...);
-   *   } finally {
-   *      solrCores.removeFromPendingOps(dcore.getName());
-   *   }
-   *   </code>
-   * </pre>
-   *
-   *  Trying to put the waitAddPending... in this method results in Bad Things Happening due to race conditions.
-   *  getCore() depends on getting the core returned _if_ it's in the pending list due to some other thread opening it.
-   *  If the core is not in the pending list and not loaded, then getCore() calls this method. Anything that called
-   *  to check if the core was loaded _or_ in pending ops and, based on the return called createFromDescriptor would
-   *  introduce a race condition, see getCore() for the place it would be a problem
-   *
-   * @return the newly created core
-   */
-  @SuppressWarnings("resource")
-  private SolrCore createFromDescriptor(CoreDescriptor dcore, boolean publishState, boolean newCollection) {
-
-    if (isShutDown) {
-      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown.");
-    }
-
-    SolrCore core = null;
-    try {
-      MDCLoggingContext.setCoreDescriptor(this, dcore);
-      SolrIdentifierValidator.validateCoreName(dcore.getName());
-      if (zkSys.getZkController() != null) {
-        zkSys.getZkController().preRegister(dcore, publishState);
-      }
-
-      ConfigSet coreConfig = getConfigSet(dcore);
-      dcore.setConfigSetTrusted(coreConfig.isTrusted());
-      log.info("Creating SolrCore '{}' using configuration from {}, trusted={}", dcore.getName(), coreConfig.getName(), dcore.isConfigSetTrusted());
-      try {
-        core = new SolrCore(this, dcore, coreConfig);
-      } catch (SolrException e) {
-        core = processCoreCreateException(e, dcore, coreConfig);
-      }
-
-      // always kick off recovery if we are in non-Cloud mode
-      if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) {
-        core.getUpdateHandler().getUpdateLog().recoverFromLog();
-      }
-
-      registerCore(dcore, core, publishState, newCollection);
-
-      return core;
-    } catch (Exception e) {
-      coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
-      if (e instanceof ZkController.NotInClusterStateException && !newCollection) {
-        // this mostly happen when the core is deleted when this node is down
-        unload(dcore.getName(), true, true, true);
-        throw e;
-      }
-      solrCores.removeCoreDescriptor(dcore);
-      final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
-      if(core != null && !core.isClosed())
-        IOUtils.closeQuietly(core);
-      throw solrException;
-    } catch (Throwable t) {
-      SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
-      coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
-      solrCores.removeCoreDescriptor(dcore);
-      if(core != null && !core.isClosed())
-        IOUtils.closeQuietly(core);
-      throw t;
-    } finally {
-      MDCLoggingContext.clear();
-    }
-  }
-
-  public boolean isSharedFs(CoreDescriptor cd) {
-    try (SolrCore core = this.getCore(cd.getName())) {
-      if (core != null) {
-        return core.getDirectoryFactory().isSharedStorage();
-      } else {
-        ConfigSet configSet = getConfigSet(cd);
-        return DirectoryFactory.loadDirectoryFactory(configSet.getSolrConfig(), this, null).isSharedStorage();
-      }
-    }
-  }
-
-  private ConfigSet getConfigSet(CoreDescriptor cd) {
-    return coreConfigService.getConfig(cd);
-  }
-  
-  /**
-   * Take action when we failed to create a SolrCore. If error is due to corrupt index, try to recover. Various recovery
-   * strategies can be specified via system properties "-DCoreInitFailedAction={fromleader, none}"
-   *
-   * @see CoreInitFailedAction
-   *
-   * @param original
-   *          the problem seen when loading the core the first time.
-   * @param dcore
-   *          core descriptor for the core to create
-   * @param coreConfig
-   *          core config for the core to create
-   * @return if possible
-   * @throws SolrException
-   *           rethrows the original exception if we will not attempt to recover, throws a new SolrException with the
-   *           original exception as a suppressed exception if there is a second problem creating the solr core.
-   */
-  private SolrCore processCoreCreateException(SolrException original, CoreDescriptor dcore, ConfigSet coreConfig) {
-    // Traverse full chain since CIE may not be root exception
-    Throwable cause = original;
-    while ((cause = cause.getCause()) != null) {
-      if (cause instanceof CorruptIndexException) {
-        break;
-      }
-    }
-    
-    // If no CorruptIndexException, nothing we can try here
-    if (cause == null) throw original;
-    
-    CoreInitFailedAction action = CoreInitFailedAction.valueOf(System.getProperty(CoreInitFailedAction.class.getSimpleName(), "none"));
-    log.debug("CorruptIndexException while creating core, will attempt to repair via {}", action);
-    
-    switch (action) {
-      case fromleader: // Recovery from leader on a CorruptedIndexException
-        if (isZooKeeperAware()) {
-          CloudDescriptor desc = dcore.getCloudDescriptor();
-          try {
-            Replica leader = getZkController().getClusterState()
-                .getCollection(desc.getCollectionName())
-                .getSlice(desc.getShardId())
-                .getLeader();
-            if (leader != null && leader.getState() == State.ACTIVE) {
-              log.info("Found active leader, will attempt to create fresh core and recover.");
-              resetIndexDirectory(dcore, coreConfig);
-              // the index of this core is emptied, its term should be set to 0
-              getZkController().getShardTerms(desc.getCollectionName(), desc.getShardId()).setTermToZero(desc.getCoreNodeName());
-              return new SolrCore(this, dcore, coreConfig);
-            }
-          } catch (SolrException se) {
-            se.addSuppressed(original);
-            throw se;
-          }
-        }
-        throw original;
-      case none:
-        throw original;
-      default:
-        log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
-            action, Arrays.asList(CoreInitFailedAction.values()));
-        throw original;
-    }
-  }
-
-  /**
-   * Write a new index directory for the a SolrCore, but do so without loading it.
-   */
-  private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) {
-    SolrConfig config = coreConfig.getSolrConfig();
-
-    String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, dcore.getName());
-    DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName);
-    String dataDir = SolrCore.findDataDir(df, null, config, dcore);
-
-    String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
-    SolrCore.modifyIndexProps(df, dataDir, config, tmpIdxDirName);
-
-    // Free the directory object that we had to create for this
-    Directory dir = null;
-    try {
-      dir = df.get(dataDir, DirContext.META_DATA, config.indexConfig.lockType);
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-    } finally {
-      try {
-        df.release(dir);
-        df.doneWithDirectory(dir);
-      } catch (IOException e) {
-        SolrException.log(log, e);
-      }
-    }
-  }
-
-  /**
-   * @return a Collection of registered SolrCores
-   */
-  public Collection<SolrCore> getCores() {
-    return solrCores.getCores();
-  }
-
-  /**
-   * Gets the cores that are currently loaded, i.e. cores that have
-   * 1: loadOnStartup=true and are either not-transient or, if transient, have been loaded and have not been aged out
-   * 2: loadOnStartup=false and have been loaded but are either non-transient or have not been aged out.
-   *
-   * Put another way, this will not return any names of cores that are lazily loaded but have not been called for yet
-   * or are transient and either not loaded or have been swapped out.
-   *
-   */
-  public Collection<String> getLoadedCoreNames() {
-    return solrCores.getLoadedCoreNames();
-  }
-
-  /** This method is currently experimental.
-   *
-   * @return a Collection of the names that a specific core object is mapped to, there are more than one.
-   */
-  public Collection<String> getNamesForCore(SolrCore core) {
-    return solrCores.getNamesForCore(core);
-  }
-
-  /**
-   * get a list of all the cores that are currently known, whether currently loaded or not
-   * @return a list of all the available core names in either permanent or transient cores
-   *
-   */
-  public Collection<String> getAllCoreNames() {
-    return solrCores.getAllCoreNames();
-
-  }
-
-  /**
-   * Returns an immutable Map of Exceptions that occured when initializing 
-   * SolrCores (either at startup, or do to runtime requests to create cores) 
-   * keyed off of the name (String) of the SolrCore that had the Exception 
-   * during initialization.
-   * <p>
-   * While the Map returned by this method is immutable and will not change 
-   * once returned to the client, the source data used to generate this Map 
-   * can be changed as various SolrCore operations are performed:
-   * </p>
-   * <ul>
-   *  <li>Failed attempts to create new SolrCores will add new Exceptions.</li>
-   *  <li>Failed attempts to re-create a SolrCore using a name already contained in this Map will replace the Exception.</li>
-   *  <li>Failed attempts to reload a SolrCore will cause an Exception to be added to this list -- even though the existing SolrCore with that name will continue to be available.</li>
-   *  <li>Successful attempts to re-created a SolrCore using a name already contained in this Map will remove the Exception.</li>
-   *  <li>Registering an existing SolrCore with a name already contained in this Map (ie: ALIAS or SWAP) will remove the Exception.</li>
-   * </ul>
-   */
-  public Map<String, CoreLoadFailure> getCoreInitFailures() {
-    return ImmutableMap.copyOf(coreInitFailures);
-  }
-
-
-  // ---------------- Core name related methods ---------------
-
-  private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
-    if (oldDesc == null) {
-      return null;
-    }
-
-    CorePropertiesLocator cpl = new CorePropertiesLocator(null);
-    CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
-
-    // Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
-    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
-    // of core discovery without writing the core.properties file out first.
-    //
-    // TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway.
-    if (ret == null) {
-      oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up.
-      return oldDesc;
-
-    }
-    // The CloudDescriptor bit here is created in a very convoluted way, requiring access to private methods
-    // in ZkController. When reloading, this behavior is identical to what used to happen where a copy of the old
-    // CoreDescriptor was just re-used.
-
-    if (ret.getCloudDescriptor() != null) {
-      ret.getCloudDescriptor().reload(oldDesc.getCloudDescriptor());
-    }
-
-    return ret;
-  }
-
-  /**
-   * Recreates a SolrCore.
-   * While the new core is loading, requests will continue to be dispatched to
-   * and processed by the old core
-   * 
-   * @param name the name of the SolrCore to reload
-   */
-  public void reload(String name) {
-    SolrCore core = solrCores.getCoreFromAnyList(name, false);
-    if (core != null) {
-
-      // The underlying core properties files may have changed, we don't really know. So we have a (perhaps) stale
-      // CoreDescriptor and we need to reload it from the disk files
-      CoreDescriptor cd = reloadCoreDescriptor(core.getCoreDescriptor());
-      solrCores.addCoreDescriptor(cd);
-      try {
-        solrCores.waitAddPendingCoreOps(cd.getName());
-        ConfigSet coreConfig = coreConfigService.getConfig(cd);
-        log.info("Reloading SolrCore '{}' using configuration from {}", cd.getName(), coreConfig.getName());
-        SolrCore newCore = core.reload(coreConfig);
-        registerCore(cd, newCore, false, false);
-        if (getZkController() != null) {
-          DocCollection docCollection = getZkController().getClusterState().getCollection(cd.getCollectionName());
-          Replica replica = docCollection.getReplica(cd.getCloudDescriptor().getCoreNodeName());
-          assert replica != null;
-          if (replica.getType() == Replica.Type.TLOG) { //TODO: needed here?
-            getZkController().stopReplicationFromLeader(core.getName());
-            if (!cd.getCloudDescriptor().isLeader()) {
-              getZkController().startReplicationFromLeader(newCore.getName(), true);
-            }
-
-          } else if(replica.getType() == Replica.Type.PULL) {
-            getZkController().stopReplicationFromLeader(core.getName());
-            getZkController().startReplicationFromLeader(newCore.getName(), false);
-          }
-        }
-      } catch (SolrCoreState.CoreIsClosedException e) {
-        throw e;
-      } catch (Exception e) {
-        coreInitFailures.put(cd.getName(), new CoreLoadFailure(cd, e));
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to reload core [" + cd.getName() + "]", e);
-      }
-      finally {
-        solrCores.removeFromPendingOps(cd.getName());
-      }
-    } else {
-      CoreLoadFailure clf = coreInitFailures.get(name);
-      if (clf != null) {
-        try {
-          solrCores.waitAddPendingCoreOps(clf.cd.getName());
-          createFromDescriptor(clf.cd, true, false);
-        } finally {
-          solrCores.removeFromPendingOps(clf.cd.getName());
-        }
-      } else {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name );
-      }
-    }
-  }
-
-  /**
-   * Swaps two SolrCore descriptors.
-   */
-  public void swap(String n0, String n1) {
-    if( n0 == null || n1 == null ) {
-      throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Can not swap unnamed cores." );
-    }
-    solrCores.swap(n0, n1);
-
-    coresLocator.swap(this, solrCores.getCoreDescriptor(n0), solrCores.getCoreDescriptor(n1));
-
-    log.info("swapped: " + n0 + " with " + n1);
-  }
-
-  /**
-   * Unload a core from this container, leaving all files on disk
-   * @param name the name of the core to unload
-   */
-  public void unload(String name) {
-    unload(name, false, false, false);
-  }
-
-  /**
-   * Unload a core from this container, optionally removing the core's data and configuration
-   *
-   * @param name the name of the core to unload
-   * @param deleteIndexDir if true, delete the core's index on close
-   * @param deleteDataDir if true, delete the core's data directory on close
-   * @param deleteInstanceDir if true, delete the core's instance directory on close
-   */
-  public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) {
-
-    CoreDescriptor cd = solrCores.getCoreDescriptor(name);
-    
-    if (name != null) {
-      // check for core-init errors first
-      CoreLoadFailure loadFailure = coreInitFailures.remove(name);
-      if (loadFailure != null) {
-        // getting the index directory requires opening a DirectoryFactory with a SolrConfig, etc,
-        // which we may not be able to do because of the init error.  So we just go with what we
-        // can glean from the CoreDescriptor - datadir and instancedir
-        SolrCore.deleteUnloadedCore(loadFailure.cd, deleteDataDir, deleteInstanceDir);
-        // If last time around we didn't successfully load, make sure that all traces of the coreDescriptor are gone.
-        if (cd != null) {
-          solrCores.removeCoreDescriptor(cd);
-          coresLocator.delete(this, cd);
-        }
-        return;
-      }
-    }
-      
-    if (cd == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
-    }
-
-    boolean close = solrCores.isLoadedNotPendingClose(name);
-    SolrCore core = solrCores.remove(name);
-
-    solrCores.removeCoreDescriptor(cd);
-    coresLocator.delete(this, cd);
-    if (core == null) {
-      // transient core
-      SolrCore.deleteUnloadedCore(cd, deleteDataDir, deleteInstanceDir);
-      return;
-    }
-
-    // delete metrics specific to this core
-    metricManager.removeRegistry(core.getCoreMetricManager().getRegistryName());
-
-    if (zkSys.getZkController() != null) {
-      // cancel recovery in cloud mode
-      core.getSolrCoreState().cancelRecovery();
-      if (cd.getCloudDescriptor().getReplicaType() == Replica.Type.PULL
-          || cd.getCloudDescriptor().getReplicaType() == Replica.Type.TLOG) {
-        // Stop replication if this is part of a pull/tlog replica before closing the core
-        zkSys.getZkController().stopReplicationFromLeader(name);
-      }
-    }
-    
-    core.unloadOnClose(cd, deleteIndexDir, deleteDataDir, deleteInstanceDir);
-    if (close)
-      core.closeAndWait();
-
-    if (zkSys.getZkController() != null) {
-      try {
-        zkSys.getZkController().unregister(name, cd);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted while unregistering core [" + name + "] from cloud state");
-      } catch (KeeperException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
-      }
-    }
-  }
-
-  public void rename(String name, String toName) {
-    SolrIdentifierValidator.validateCoreName(toName);
-    try (SolrCore core = getCore(name)) {
-      if (core != null) {
-        String oldRegistryName = core.getCoreMetricManager().getRegistryName();
-        String newRegistryName = SolrCoreMetricManager.createRegistryName(core, toName);
-        metricManager.swapRegistries(oldRegistryName, newRegistryName);
-        // The old coreDescriptor is obsolete, so remove it. registerCore will put it back.
-        CoreDescriptor cd = core.getCoreDescriptor();
-        solrCores.removeCoreDescriptor(cd);
-        cd.setProperty("name", toName);
-        solrCores.addCoreDescriptor(cd);
-        core.setName(toName);
-        registerCore(cd, core, true, false);
-        SolrCore old = solrCores.remove(name);
-
-        coresLocator.rename(this, old.getCoreDescriptor(), core.getCoreDescriptor());
-      }
-    }
-  }
-
-  /**
-   * Get the CoreDescriptors for all cores managed by this container
-   * @return a List of CoreDescriptors
-   */
-  public List<CoreDescriptor> getCoreDescriptors() {
-    return solrCores.getCoreDescriptors();
-  }
-
-  public CoreDescriptor getCoreDescriptor(String coreName) {
-    return solrCores.getCoreDescriptor(coreName);
-  }
-
-  public Path getCoreRootDirectory() {
-    return cfg.getCoreRootDirectory();
-  }
-
-  /**
-   * Gets a core by name and increase its refcount.
-   *
-   * @see SolrCore#close()
-   * @param name the core name
-   * @return the core if found, null if a SolrCore by this name does not exist
-   * @exception SolrCoreInitializationException if a SolrCore with this name failed to be initialized
-   */
-  public SolrCore getCore(String name) {
-
-    // Do this in two phases since we don't want to lock access to the cores over a load.
-    SolrCore core = solrCores.getCoreFromAnyList(name, true);
-
-    // If a core is loaded, we're done just return it.
-    if (core != null) {
-      return core;
-    }
-
-    // If it's not yet loaded, we can check if it's had a core init failure and "do the right thing"
-    CoreDescriptor desc = solrCores.getCoreDescriptor(name);
-
-    // if there was an error initializing this core, throw a 500
-    // error with the details for clients attempting to access it.
-    CoreLoadFailure loadFailure = getCoreInitFailures().get(name);
-    if (null != loadFailure) {
-      throw new SolrCoreInitializationException(name, loadFailure.exception);
-    }
-    // This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores,
-    // we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that).
-    // But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if
-    // the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and
-    // TestLazyCores
-    if (desc == null || zkSys.getZkController() != null) return null;
-
-    // This will put an entry in pending core ops if the core isn't loaded. Here's where moving the
-    // waitAddPendingCoreOps to createFromDescriptor would introduce a race condition.
-    core = solrCores.waitAddPendingCoreOps(name);
-
-    if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off
-    // the wait as a consequence of shutting down.
-    try {
-      if (core == null) {
-        if (zkSys.getZkController() != null) {
-          zkSys.getZkController().throwErrorIfReplicaReplaced(desc);
-        }
-        core = createFromDescriptor(desc, true, false); // This should throw an error if it fails.
-      }
-      core.open();
-    }
-    finally {
-      solrCores.removeFromPendingOps(name);
-    }
-
-    return core;
-  }
-
-  public BlobRepository getBlobRepository(){
-    return blobRepository;
-  }
-  
-  /**
-   * If using asyncSolrCoreLoad=true, calling this after {@link #load()} will
-   * not return until all cores have finished loading.
-   * 
-   * @param timeoutMs timeout, upon which method simply returns
-   */
-  public void waitForLoadingCoresToFinish(long timeoutMs) {
-    solrCores.waitForLoadingCoresToFinish(timeoutMs);
-  }
-  
-  public void waitForLoadingCore(String name, long timeoutMs) {
-    solrCores.waitForLoadingCoreToFinish(name, timeoutMs);
-  }
-
-  // ---------------- CoreContainer request handlers --------------
-
-  protected <T> T createHandler(String path, String handlerClass, Class<T> clazz) {
-    T handler = loader.newInstance(handlerClass, clazz, null, new Class[] { CoreContainer.class }, new Object[] { this });
-    if (handler instanceof SolrRequestHandler) {
-      containerHandlers.put(path, (SolrRequestHandler)handler);
-    }
-    if (handler instanceof SolrMetricProducer) {
-      ((SolrMetricProducer)handler).initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, path);
-    }
-    return handler;
-  }
-
-  public CoreAdminHandler getMultiCoreHandler() {
-    return coreAdminHandler;
-  }
-
-  public CollectionsHandler getCollectionsHandler() {
-    return collectionsHandler;
-  }
-
-  public HealthCheckHandler getHealthCheckHandler() { return healthCheckHandler; }
-
-  public InfoHandler getInfoHandler() {
-    return infoHandler;
-  }
-
-  public ConfigSetsHandler getConfigSetsHandler() {
-    return configSetsHandler;
-  }
-
-  public String getHostName() {
-    return this.hostName;
-  }
-
-  /**
-   * Gets the alternate path for multicore handling:
-   * This is used in case there is a registered unnamed core (aka name is "") to
-   * declare an alternate way of accessing named cores.
-   * This can also be used in a pseudo single-core environment so admins can prepare
-   * a new version before swapping.
-   */
-  public String getManagementPath() {
-    return cfg.getManagementPath();
-  }
-
-  public LogWatcher getLogging() {
-    return logging;
-  }
-
-  /**
-   * Determines whether the core is already loaded or not but does NOT load the core
-   *
-   */
-  public boolean isLoaded(String name) {
-    return solrCores.isLoaded(name);
-  }
-
-  public boolean isLoadedNotPendingClose(String name) {
-    return solrCores.isLoadedNotPendingClose(name);
-  }
-
-  /**
-   * Gets a solr core descriptor for a core that is not loaded. Note that if the caller calls this on a
-   * loaded core, the unloaded descriptor will be returned.
-   *
-   * @param cname - name of the unloaded core descriptor to load. NOTE:
-   * @return a coreDescriptor. May return null
-   */
-  public CoreDescriptor getUnloadedCoreDescriptor(String cname) {
-    return solrCores.getUnloadedCoreDescriptor(cname);
-  }
-
-  public String getSolrHome() {
-    return solrHome;
-  }
-
-  public boolean isZooKeeperAware() {
-    return zkSys.getZkController() != null;
-  }
-  
-  public ZkController getZkController() {
-    return zkSys.getZkController();
-  }
-  
-  public NodeConfig getConfig() {
-    return cfg;
-  }
-
-  /** The default ShardHandlerFactory used to communicate with other solr instances */
-  public ShardHandlerFactory getShardHandlerFactory() {
-    return shardHandlerFactory;
-  }
-  
-  public UpdateShardHandler getUpdateShardHandler() {
-    return updateShardHandler;
-  }
-
-  public SolrResourceLoader getResourceLoader() {
-    return loader;
-  }
-  
-  public boolean isCoreLoading(String name) {
-    return solrCores.isCoreLoading(name);
-  }
-
-  public AuthorizationPlugin getAuthorizationPlugin() {
-    return authorizationPlugin == null ? null : authorizationPlugin.plugin;
-  }
-
-  public AuthenticationPlugin getAuthenticationPlugin() {
-    return authenticationPlugin == null ? null : authenticationPlugin.plugin;
-  }
-
-  public NodeConfig getNodeConfig() {
-    return cfg;
-  }
-
-  public long getStatus() {
-    return status;
-  }
-  
-  // Occasaionally we need to access the transient cache handler in places other than coreContainer.
-  public TransientSolrCoreCache getTransientCache() {
-    return solrCores.getTransientCacheHandler();
-  }
-
-
-  /**
-   *
-   * @param cd CoreDescriptor, presumably a deficient one
-   * @param prop The property that needs to be repaired.
-   * @return true if we were able to successfuly perisist the repaired coreDescriptor, false otherwise.
-   *
-   * See SOLR-11503, This can be removed when there's no chance we'll need to upgrade a
-   * Solr installation created with legacyCloud=true from 6.6.1 through 7.1
-   */
-  public boolean repairCoreProperty(CoreDescriptor cd, String prop) {
-    // So far, coreNodeName is the only property that we need to repair, this may get more complex as other properties
-    // are added.
-
-    if (CoreDescriptor.CORE_NODE_NAME.equals(prop) == false) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          String.format(Locale.ROOT,"The only supported property for repair is currently [%s]",
-              CoreDescriptor.CORE_NODE_NAME));
-    }
-
-    // Try to read the coreNodeName from the cluster state.
-
-    String coreName = cd.getName();
-    DocCollection coll = getZkController().getZkStateReader().getClusterState().getCollection(cd.getCollectionName());
-    for (Replica rep : coll.getReplicas()) {
-      if (coreName.equals(rep.getCoreName())) {
-        log.warn("Core properties file for node {} found with no coreNodeName, attempting to repair with value {}. See SOLR-11503. " +
-            "This message should only appear if upgrading from collections created Solr 6.6.1 through 7.1.",
-            rep.getCoreName(), rep.getName());
-        cd.getCloudDescriptor().setCoreNodeName(rep.getName());
-        coresLocator.persist(this, cd);
-        return true;
-      }
-    }
-    log.error("Could not repair coreNodeName in core.properties file for core {}", coreName);
-    return false;
-  }
-
-  /**
-   * @param solrCore te core against which we check if there has been a tragic exception
-   * @return whether this solr core has tragic exception
-   */
-  public boolean checkTragicException(SolrCore solrCore) {
-    Throwable tragicException;
-    try {
-      tragicException = solrCore.getSolrCoreState().getTragicException();
-    } catch (IOException e) {
-      // failed to open an indexWriter
-      tragicException = e;
-    }
-
-    if (tragicException != null) {
-      if (isZooKeeperAware()) {
-        getZkController().giveupLeadership(solrCore.getCoreDescriptor(), tragicException);
-      }
-    }
-    
-    return tragicException != null;
-  }
-
-}
-
-class CloserThread extends Thread {
-  CoreContainer container;
-  SolrCores solrCores;
-  NodeConfig cfg;
-
-
-  CloserThread(CoreContainer container, SolrCores solrCores, NodeConfig cfg) {
-    this.container = container;
-    this.solrCores = solrCores;
-    this.cfg = cfg;
-  }
-
-  // It's important that this be the _only_ thread removing things from pendingDynamicCloses!
-  // This is single-threaded, but I tried a multi-threaded approach and didn't see any performance gains, so
-  // there's no good justification for the complexity. I suspect that the locking on things like DefaultSolrCoreState
-  // essentially create a single-threaded process anyway.
-  @Override
-  public void run() {
-    while (! container.isShutDown()) {
-      synchronized (solrCores.getModifyLock()) { // need this so we can wait and be awoken.
-        try {
-          solrCores.getModifyLock().wait();
-        } catch (InterruptedException e) {
-          // Well, if we've been told to stop, we will. Otherwise, continue on and check to see if there are
-          // any cores to close.
-        }
-      }
-      for (SolrCore removeMe = solrCores.getCoreToClose();
-           removeMe != null && !container.isShutDown();
-           removeMe = solrCores.getCoreToClose()) {
-        try {
-          removeMe.close();
-        } finally {
-          solrCores.removeFromPendingOps(removeMe.getName());
-        }
-      }
-    }
-  }
-}


[09/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
deleted file mode 100644
index 01f8875..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
+++ /dev/null
@@ -1,1570 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.FixedBitSet;
-import org.apache.solr.client.solrj.util.ClientUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.request.SimpleFacets;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.PointField;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.DocSet;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.search.facet.FacetDebugInfo;
-import org.apache.solr.util.RTimer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Computes facets -- aggregations with counts of terms or ranges over the whole search results.
- *
- * @since solr 1.3
- */
-@SuppressWarnings("rawtypes")
-public class FacetComponent extends SearchComponent {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  public static final String COMPONENT_NAME = "facet";
-
-  public static final String FACET_QUERY_KEY = "facet_queries";
-  public static final String FACET_FIELD_KEY = "facet_fields";
-  public static final String FACET_RANGES_KEY = "facet_ranges";
-  public static final String FACET_INTERVALS_KEY = "facet_intervals";
-
-  private static final String PIVOT_KEY = "facet_pivot";
-  private static final String PIVOT_REFINE_PREFIX = "{!"+PivotFacet.REFINE_PARAM+"=";
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    if (rb.req.getParams().getBool(FacetParams.FACET, false)) {
-      rb.setNeedDocSet(true);
-      rb.doFacets = true;
-
-      // Deduplicate facet params
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      SolrParams origParams = rb.req.getParams();
-      Iterator<String> iter = origParams.getParameterNamesIterator();
-      while (iter.hasNext()) {
-        String paramName = iter.next();
-        // Deduplicate the list with LinkedHashSet, but _only_ for facet params.
-        if (!paramName.startsWith(FacetParams.FACET)) {
-          params.add(paramName, origParams.getParams(paramName));
-          continue;
-        }
-        HashSet<String> deDupe = new LinkedHashSet<>(Arrays.asList(origParams.getParams(paramName)));
-        params.add(paramName, deDupe.toArray(new String[deDupe.size()]));
-
-      }
-      rb.req.setParams(params);
-
-      // Initialize context
-      FacetContext.initContext(rb);
-    }
-  }
-
-  /* Custom facet components can return a custom SimpleFacets object */
-  protected SimpleFacets newSimpleFacets(SolrQueryRequest req, DocSet docSet, SolrParams params, ResponseBuilder rb) {
-    return new SimpleFacets(req, docSet, params, rb);
-  }
-
-  /**
-   * Encapsulates facet ranges and facet queries such that their parameters
-   * are parsed and cached for efficient re-use.
-   * <p>
-   * An instance of this class is initialized and kept in the request context via the static
-   * method {@link org.apache.solr.handler.component.FacetComponent.FacetContext#initContext(ResponseBuilder)} and
-   * can be retrieved via {@link org.apache.solr.handler.component.FacetComponent.FacetContext#getFacetContext(SolrQueryRequest)}
-   * <p>
-   * This class is used exclusively in a single-node context (i.e. non distributed requests or an individual shard
-   * request). Also see {@link org.apache.solr.handler.component.FacetComponent.FacetInfo} which is
-   * dedicated exclusively for merging responses from multiple shards and plays no role during computation of facet
-   * counts in a single node request.
-   *
-   * <b>This API is experimental and subject to change</b>
-   *
-   * @see org.apache.solr.handler.component.FacetComponent.FacetInfo
-   */
-  public static class FacetContext {
-    private static final String FACET_CONTEXT_KEY = "_facet.context";
-
-    private final List<RangeFacetRequest> allRangeFacets; // init in constructor
-    private final List<FacetBase> allQueryFacets; // init in constructor
-
-    private final Map<String, List<RangeFacetRequest>> taggedRangeFacets;
-    private final Map<String, List<FacetBase>> taggedQueryFacets;
-
-    /**
-     * Initializes FacetContext using request parameters and saves it in the request
-     * context which can be retrieved via {@link #getFacetContext(SolrQueryRequest)}
-     *
-     * @param rb the ResponseBuilder object from which the request parameters are read
-     *           and to which the FacetContext object is saved.
-     */
-    public static void initContext(ResponseBuilder rb)  {
-      // Parse facet queries and ranges and put them in the request
-      // context so that they can be hung under pivots if needed without re-parsing
-      List<RangeFacetRequest> facetRanges = null;
-      List<FacetBase> facetQueries = null;
-
-      String[] ranges = rb.req.getParams().getParams(FacetParams.FACET_RANGE);
-      if (ranges != null) {
-        facetRanges = new ArrayList<>(ranges.length);
-        for (String range : ranges) {
-          RangeFacetRequest rangeFacetRequest = new RangeFacetRequest(rb, range);
-          facetRanges.add(rangeFacetRequest);
-        }
-      }
-
-      String[] queries = rb.req.getParams().getParams(FacetParams.FACET_QUERY);
-      if (queries != null)  {
-        facetQueries = new ArrayList<>();
-        for (String query : queries) {
-          facetQueries.add(new FacetBase(rb, FacetParams.FACET_QUERY, query));
-        }
-      }
-
-      rb.req.getContext().put(FACET_CONTEXT_KEY, new FacetContext(facetRanges, facetQueries));
-    }
-
-    private FacetContext(List<RangeFacetRequest> allRangeFacets, List<FacetBase> allQueryFacets) {
-      // avoid NPEs, set to empty list if parameters are null
-      this.allRangeFacets = allRangeFacets == null ? Collections.emptyList() : allRangeFacets;
-      this.allQueryFacets = allQueryFacets == null ? Collections.emptyList() : allQueryFacets;
-
-      taggedRangeFacets = new HashMap<>();
-      for (RangeFacetRequest rf : this.allRangeFacets) {
-        for (String tag : rf.getTags()) {
-          List<RangeFacetRequest> list = taggedRangeFacets.get(tag);
-          if (list == null) {
-            list = new ArrayList<>(1); // typically just one object
-            taggedRangeFacets.put(tag, list);
-          }
-          list.add(rf);
-        }
-      }
-
-      taggedQueryFacets = new HashMap<>();
-      for (FacetBase qf : this.allQueryFacets) {
-        for (String tag : qf.getTags()) {
-          List<FacetBase> list = taggedQueryFacets.get(tag);
-          if (list == null) {
-            list = new ArrayList<>(1);
-            taggedQueryFacets.put(tag, list);
-          }
-          list.add(qf);
-        }
-      }
-    }
-
-    /**
-     * Return the {@link org.apache.solr.handler.component.FacetComponent.FacetContext} instance
-     * cached in the request context.
-     *
-     * @param req the {@link SolrQueryRequest}
-     * @return the cached FacetContext instance
-     * @throws IllegalStateException if no cached FacetContext instance is found in the request context
-     */
-    public static FacetContext getFacetContext(SolrQueryRequest req) throws IllegalStateException {
-      FacetContext result = (FacetContext) req.getContext().get(FACET_CONTEXT_KEY);
-      if (null == result) {
-        throw new IllegalStateException("FacetContext can't be accessed before it's initialized in request context");
-      }
-      return result;
-    }
-
-    /**
-     * @return a {@link List} of {@link RangeFacetRequest} objects each representing a facet.range to be
-     * computed. Returns an empty list if no facet.range were requested.
-     */
-    public List<RangeFacetRequest> getAllRangeFacetRequests() {
-      return allRangeFacets;
-    }
-
-    /**
-     * @return a {@link List} of {@link org.apache.solr.handler.component.FacetComponent.FacetBase} objects
-     * each representing a facet.query to be computed. Returns an empty list of no facet.query were requested.
-     */
-    public List<FacetBase> getAllQueryFacets() {
-      return allQueryFacets;
-    }
-
-    /**
-     * @param tag a String tag usually specified via local param on a facet.pivot
-     * @return a list of {@link RangeFacetRequest} objects which have been tagged with the given tag.
-     * Returns an empty list if none found.
-     */
-    public List<RangeFacetRequest> getRangeFacetRequestsForTag(String tag) {
-      List<RangeFacetRequest> list = taggedRangeFacets.get(tag);
-      return list == null ? Collections.emptyList() : list;
-    }
-
-    /**
-     * @param tag a String tag usually specified via local param on a facet.pivot
-     * @return a list of {@link org.apache.solr.handler.component.FacetComponent.FacetBase} objects which have been
-     * tagged with the given tag. Returns and empty List if none found.
-     */
-    public List<FacetBase> getQueryFacetsForTag(String tag) {
-      List<FacetBase> list = taggedQueryFacets.get(tag);
-      return list == null ? Collections.emptyList() : list;
-    }
-  }
-  
-  /**
-   * Actually run the query
-   */
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-
-    if (rb.doFacets) {
-      SolrParams params = rb.req.getParams();
-      SimpleFacets f = newSimpleFacets(rb.req, rb.getResults().docSet, params, rb);
-
-      RTimer timer = null;
-      FacetDebugInfo fdebug = null;
-
-      if (rb.isDebug()) {
-        fdebug = new FacetDebugInfo();
-        rb.req.getContext().put("FacetDebugInfo-nonJson", fdebug);
-        timer = new RTimer();
-      }
-
-      NamedList<Object> counts = FacetComponent.getFacetCounts(f, fdebug);
-      String[] pivots = params.getParams(FacetParams.FACET_PIVOT);
-      if (!ArrayUtils.isEmpty(pivots)) {
-        PivotFacetProcessor pivotProcessor 
-          = new PivotFacetProcessor(rb.req, rb.getResults().docSet, params, rb);
-        SimpleOrderedMap<List<NamedList<Object>>> v 
-          = pivotProcessor.process(pivots);
-        if (v != null) {
-          counts.add(PIVOT_KEY, v);
-        }
-      }
-
-      if (fdebug != null) {
-        long timeElapsed = (long) timer.getTime();
-        fdebug.setElapse(timeElapsed);
-      }
-
-      rb.rsp.add("facet_counts", counts);
-    }
-  }
-
-  public static NamedList<Object> getFacetCounts(SimpleFacets simpleFacets) {
-    return getFacetCounts(simpleFacets, null);
-  }
-
-  /**
-   * Looks at various Params to determining if any simple Facet Constraint count
-   * computations are desired.
-   *
-   * @see SimpleFacets#getFacetQueryCounts
-   * @see SimpleFacets#getFacetFieldCounts
-   * @see RangeFacetProcessor#getFacetRangeCounts
-   * @see RangeFacetProcessor#getFacetIntervalCounts
-   * @see FacetParams#FACET
-   * @return a NamedList of Facet Count info or null
-   */
-  public static NamedList<Object> getFacetCounts(SimpleFacets simpleFacets, FacetDebugInfo fdebug) {
-    // if someone called this method, benefit of the doubt: assume true
-    if (!simpleFacets.getGlobalParams().getBool(FacetParams.FACET, true))
-      return null;
-
-    RangeFacetProcessor rangeFacetProcessor = new RangeFacetProcessor(simpleFacets.getRequest(), simpleFacets.getDocsOrig(), simpleFacets.getGlobalParams(), simpleFacets.getResponseBuilder());
-    NamedList<Object> counts = new SimpleOrderedMap<>();
-    try {
-      counts.add(FACET_QUERY_KEY, simpleFacets.getFacetQueryCounts());
-      if (fdebug != null) {
-        FacetDebugInfo fd = new FacetDebugInfo();
-        fd.putInfoItem("action", "field facet");
-        fd.setProcessor(simpleFacets.getClass().getSimpleName());
-        fdebug.addChild(fd);
-        simpleFacets.setFacetDebugInfo(fd);
-        final RTimer timer = new RTimer();
-        counts.add(FACET_FIELD_KEY, simpleFacets.getFacetFieldCounts());
-        long timeElapsed = (long) timer.getTime();
-        fd.setElapse(timeElapsed);
-      } else {
-        counts.add(FACET_FIELD_KEY, simpleFacets.getFacetFieldCounts());
-      }
-      counts.add(FACET_RANGES_KEY, rangeFacetProcessor.getFacetRangeCounts());
-      counts.add(FACET_INTERVALS_KEY, simpleFacets.getFacetIntervalCounts());
-      counts.add(SpatialHeatmapFacets.RESPONSE_KEY, simpleFacets.getHeatmapCounts());
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    } catch (SyntaxError e) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, e);
-    }
-    return counts;
-  }
-
-  private static final String commandPrefix = "{!" + CommonParams.TERMS + "=$";
-  
-  @Override
-  public int distributedProcess(ResponseBuilder rb) throws IOException {
-    if (!rb.doFacets) {
-      return ResponseBuilder.STAGE_DONE;
-    }
-
-    if (rb.stage != ResponseBuilder.STAGE_GET_FIELDS) {
-      return ResponseBuilder.STAGE_DONE;
-    }
-    // Overlap facet refinement requests (those shards that we need a count
-    // for particular facet values from), where possible, with
-    // the requests to get fields (because we know that is the
-    // only other required phase).
-    // We do this in distributedProcess so we can look at all of the
-    // requests in the outgoing queue at once.
-
-    for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
-      List<String> distribFieldFacetRefinements = null;
-
-      // FieldFacetAdditions
-      for (DistribFieldFacet dff : rb._facetInfo.facets.values()) {
-        if (!dff.needRefinements) continue;
-        List<String> refList = dff._toRefine[shardNum];
-        if (refList == null || refList.size() == 0) continue;
-
-        String key = dff.getKey(); // reuse the same key that was used for the
-                                   // main facet
-        String termsKey = key + "__terms";
-        String termsVal = StrUtils.join(refList, ',');
-
-        String facetCommand;
-        // add terms into the original facet.field command
-        // do it via parameter reference to avoid another layer of encoding.
-
-        String termsKeyEncoded = ClientUtils.encodeLocalParamVal(termsKey);
-        if (dff.localParams != null) {
-          facetCommand = commandPrefix + termsKeyEncoded + " "
-              + dff.facetStr.substring(2);
-        } else {
-          facetCommand = commandPrefix + termsKeyEncoded + '}' + dff.field;
-        }
-
-        if (distribFieldFacetRefinements == null) {
-          distribFieldFacetRefinements = new ArrayList<>();
-        }
-
-        distribFieldFacetRefinements.add(facetCommand);
-        distribFieldFacetRefinements.add(termsKey);
-        distribFieldFacetRefinements.add(termsVal);
-      }
-
-      if (distribFieldFacetRefinements != null) {
-        String shard = rb.shards[shardNum];
-        ShardRequest shardsRefineRequest = null;
-        boolean newRequest = false;
-
-        // try to find a request that is already going out to that shard.
-        // If nshards becomes too great, we may want to move to hashing for
-        // better scalability.
-        for (ShardRequest sreq : rb.outgoing) {
-          if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0
-              && sreq.shards != null
-              && sreq.shards.length == 1
-              && sreq.shards[0].equals(shard)) {
-            shardsRefineRequest = sreq;
-            break;
-          }
-        }
-
-        if (shardsRefineRequest == null) {
-          // we didn't find any other suitable requests going out to that shard,
-          // so create one ourselves.
-          newRequest = true;
-          shardsRefineRequest = new ShardRequest();
-          shardsRefineRequest.shards = new String[] { rb.shards[shardNum] };
-          shardsRefineRequest.params = new ModifiableSolrParams(rb.req.getParams());
-          // don't request any documents
-          shardsRefineRequest.params.remove(CommonParams.START);
-          shardsRefineRequest.params.set(CommonParams.ROWS, "0");
-        }
-
-        shardsRefineRequest.purpose |= ShardRequest.PURPOSE_REFINE_FACETS;
-        shardsRefineRequest.params.set(FacetParams.FACET, "true");
-        removeMainFacetTypeParams(shardsRefineRequest);
-
-        for (int i = 0; i < distribFieldFacetRefinements.size();) {
-          String facetCommand = distribFieldFacetRefinements.get(i++);
-          String termsKey = distribFieldFacetRefinements.get(i++);
-          String termsVal = distribFieldFacetRefinements.get(i++);
-
-          shardsRefineRequest.params.add(FacetParams.FACET_FIELD,
-              facetCommand);
-          shardsRefineRequest.params.set(termsKey, termsVal);
-        }
-
-        if (newRequest) {
-          rb.addRequest(this, shardsRefineRequest);
-        }
-      }
-
-
-      // PivotFacetAdditions
-      if (doAnyPivotFacetRefinementRequestsExistForShard(rb._facetInfo, shardNum)) {
-        enqueuePivotFacetShardRequests(rb, shardNum);
-      }
-
-    } // for shardNum
-
-    return ResponseBuilder.STAGE_DONE;
-  }
-
-  public static String[] FACET_TYPE_PARAMS = {
-      FacetParams.FACET_FIELD, FacetParams.FACET_PIVOT, FacetParams.FACET_QUERY, FacetParams.FACET_DATE,
-      FacetParams.FACET_RANGE, FacetParams.FACET_INTERVAL, FacetParams.FACET_HEATMAP
-  };
-
-  private void removeMainFacetTypeParams(ShardRequest shardsRefineRequest) {
-    for (String param : FACET_TYPE_PARAMS) {
-      shardsRefineRequest.params.remove(param);
-    }
-  }
-
-  private void enqueuePivotFacetShardRequests(ResponseBuilder rb, int shardNum) {
-
-    FacetInfo fi = rb._facetInfo;
-    
-    ShardRequest shardsRefineRequestPivot = new ShardRequest();
-    shardsRefineRequestPivot.shards = new String[] {rb.shards[shardNum]};
-    shardsRefineRequestPivot.params = new ModifiableSolrParams(rb.req.getParams());
-
-    // don't request any documents
-    shardsRefineRequestPivot.params.remove(CommonParams.START);
-    shardsRefineRequestPivot.params.set(CommonParams.ROWS, "0");
-    
-    shardsRefineRequestPivot.purpose |= ShardRequest.PURPOSE_REFINE_PIVOT_FACETS;
-    shardsRefineRequestPivot.params.set(FacetParams.FACET, "true");
-    removeMainFacetTypeParams(shardsRefineRequestPivot);
-    shardsRefineRequestPivot.params.set(FacetParams.FACET_PIVOT_MINCOUNT, -1);
-    shardsRefineRequestPivot.params.remove(FacetParams.FACET_OFFSET);
-    
-    for (int pivotIndex = 0; pivotIndex < fi.pivotFacets.size(); pivotIndex++) {
-      String pivotFacetKey = fi.pivotFacets.getName(pivotIndex);
-      PivotFacet pivotFacet = fi.pivotFacets.getVal(pivotIndex);
-
-      List<PivotFacetValue> queuedRefinementsForShard = 
-        pivotFacet.getQueuedRefinements(shardNum);
-
-      if ( ! queuedRefinementsForShard.isEmpty() ) {
-        
-        String fieldsKey = PivotFacet.REFINE_PARAM + fi.pivotRefinementCounter;
-        String command;
-        
-        if (pivotFacet.localParams != null) {
-          command = PIVOT_REFINE_PREFIX + fi.pivotRefinementCounter + " "
-            + pivotFacet.facetStr.substring(2);
-        } else {
-          command = PIVOT_REFINE_PREFIX + fi.pivotRefinementCounter + "}"
-            + pivotFacet.getKey();
-        }
-        
-        shardsRefineRequestPivot.params.add(FacetParams.FACET_PIVOT, command);
-        for (PivotFacetValue refinementValue : queuedRefinementsForShard) {
-          String refinementStr = PivotFacetHelper
-            .encodeRefinementValuePath(refinementValue.getValuePath());
-          shardsRefineRequestPivot.params.add(fieldsKey, refinementStr);
-          
-        }
-      }
-      fi.pivotRefinementCounter++;
-    }
-    
-    rb.addRequest(this, shardsRefineRequestPivot);
-  }
-  
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who,ShardRequest sreq) {
-
-    if (!rb.doFacets) return;
-    
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0) {
-      sreq.purpose |= ShardRequest.PURPOSE_GET_FACETS;
-      
-      FacetInfo fi = rb._facetInfo;
-      if (fi == null) {
-        rb._facetInfo = fi = new FacetInfo();
-        fi.parse(rb.req.getParams(), rb);
-      }
-      
-      modifyRequestForFieldFacets(rb, sreq, fi);
-
-      modifyRequestForRangeFacets(sreq);
-      
-      modifyRequestForPivotFacets(rb, sreq, fi.pivotFacets);
-
-      SpatialHeatmapFacets.distribModifyRequest(sreq, fi.heatmapFacets);
-      
-      sreq.params.remove(FacetParams.FACET_MINCOUNT);
-      sreq.params.remove(FacetParams.FACET_OFFSET);
-      
-    } else {
-      // turn off faceting on other requests
-      sreq.params.set(FacetParams.FACET, "false");
-      // we could optionally remove faceting params
-    }
-  }
-
-  // we must get all the range buckets back in order to have coherent lists at the end, see SOLR-6154
-  private void modifyRequestForRangeFacets(ShardRequest sreq) {
-    // Collect all the range fields.
-    final String[] fields = sreq.params.getParams(FacetParams.FACET_RANGE);
-    if (fields != null) {
-      for (String field : fields) {
-        sreq.params.set("f." + field + ".facet.mincount", "0");
-      }
-    }
-  }
-
-  private void modifyRequestForFieldFacets(ResponseBuilder rb, ShardRequest sreq, FacetInfo fi) {
-    for (DistribFieldFacet dff : fi.facets.values()) {
-      
-      String paramStart = "f." + dff.field + '.';
-      sreq.params.remove(paramStart + FacetParams.FACET_MINCOUNT);
-      sreq.params.remove(paramStart + FacetParams.FACET_OFFSET);
-      
-      dff.initialLimit = dff.limit <= 0 ? dff.limit : dff.offset + dff.limit;
-      
-      if (dff.sort.equals(FacetParams.FACET_SORT_COUNT)) {
-        if (dff.limit > 0) {
-          // set the initial limit higher to increase accuracy
-          dff.initialLimit = doOverRequestMath(dff.initialLimit, dff.overrequestRatio, 
-                                               dff.overrequestCount);
-        }
-        dff.initialMincount = Math.min(dff.minCount, 1);
-      } else {
-        // we're sorting by index order.
-        // if minCount==0, we should always be able to get accurate results w/o
-        // over-requesting or refining
-        // if minCount==1, we should be able to get accurate results w/o
-        // over-requesting, but we'll need to refine
-        // if minCount==n (>1), we can set the initialMincount to
-        // minCount/nShards, rounded up.
-        // For example, we know that if minCount=10 and we have 3 shards, then
-        // at least one shard must have a count of 4 for the term
-        // For the minCount>1 case, we can generate too short of a list (miss
-        // terms at the end of the list) unless limit==-1
-        // For example: each shard could produce a list of top 10, but some of
-        // those could fail to make it into the combined list (i.e.
-        // we needed to go beyond the top 10 to generate the top 10 combined).
-        // Overrequesting can help a little here, but not as
-        // much as when sorting by count.
-        if (dff.minCount <= 1) {
-          dff.initialMincount = dff.minCount;
-        } else {
-          dff.initialMincount = (int) Math.ceil((double) dff.minCount / rb.slices.length);
-        }
-      }
-
-      // Currently this is for testing only and allows overriding of the
-      // facet.limit set to the shards
-      dff.initialLimit = rb.req.getParams().getInt("facet.shard.limit", dff.initialLimit);
-      
-      sreq.params.set(paramStart + FacetParams.FACET_LIMIT, dff.initialLimit);
-      sreq.params.set(paramStart + FacetParams.FACET_MINCOUNT, dff.initialMincount);
-
-    }
-  }
-  
-  private void modifyRequestForPivotFacets(ResponseBuilder rb,
-                                           ShardRequest sreq, 
-                                           SimpleOrderedMap<PivotFacet> pivotFacets) {
-    for (Entry<String,PivotFacet> pfwEntry : pivotFacets) {
-      PivotFacet pivot = pfwEntry.getValue();
-      for (String pivotField : StrUtils.splitSmart(pivot.getKey(), ',')) {
-        modifyRequestForIndividualPivotFacets(rb, sreq, pivotField);
-      }
-    }
-  }
-  
-  private void modifyRequestForIndividualPivotFacets(ResponseBuilder rb, ShardRequest sreq, 
-                                                     String fieldToOverRequest) {
-
-    final SolrParams originalParams = rb.req.getParams();
-    final String paramStart = "f." + fieldToOverRequest + ".";
-
-    final int requestedLimit = originalParams.getFieldInt(fieldToOverRequest,
-                                                          FacetParams.FACET_LIMIT, 100);
-    sreq.params.remove(paramStart + FacetParams.FACET_LIMIT);
-
-    final int offset = originalParams.getFieldInt(fieldToOverRequest,
-                                                  FacetParams.FACET_OFFSET, 0);
-    sreq.params.remove(paramStart + FacetParams.FACET_OFFSET);
-    
-    final double overRequestRatio = originalParams.getFieldDouble
-      (fieldToOverRequest, FacetParams.FACET_OVERREQUEST_RATIO, 1.5);
-    sreq.params.remove(paramStart + FacetParams.FACET_OVERREQUEST_RATIO);
-    
-    final int overRequestCount = originalParams.getFieldInt
-      (fieldToOverRequest, FacetParams.FACET_OVERREQUEST_COUNT, 10);
-    sreq.params.remove(paramStart + FacetParams.FACET_OVERREQUEST_COUNT);
-    
-    final int requestedMinCount = originalParams.getFieldInt
-      (fieldToOverRequest, FacetParams.FACET_PIVOT_MINCOUNT, 1);
-    sreq.params.remove(paramStart + FacetParams.FACET_PIVOT_MINCOUNT);
-
-    final String defaultSort = (requestedLimit > 0)
-      ? FacetParams.FACET_SORT_COUNT : FacetParams.FACET_SORT_INDEX;
-    final String sort = originalParams.getFieldParam
-      (fieldToOverRequest, FacetParams.FACET_SORT, defaultSort);
-
-    int shardLimit = requestedLimit + offset;
-    int shardMinCount = requestedMinCount;
-
-    // per-shard mincount & overrequest
-    if ( FacetParams.FACET_SORT_INDEX.equals(sort) && 
-         1 < requestedMinCount && 
-         0 < requestedLimit) {
-
-      // We can divide the mincount by num shards rounded up, because unless 
-      // a single shard has at least that many it can't compete...
-      shardMinCount = (int) Math.ceil((double) requestedMinCount / rb.slices.length);
-
-      // ...but we still need to overrequest to reduce chances of missing something
-      shardLimit = doOverRequestMath(shardLimit, overRequestRatio, overRequestCount);
-
-      // (for mincount <= 1, no overrequest needed)
-
-    } else if ( FacetParams.FACET_SORT_COUNT.equals(sort) ) {
-      if ( 0 < requestedLimit ) {
-        shardLimit = doOverRequestMath(shardLimit, overRequestRatio, overRequestCount);
-      }
-      shardMinCount = Math.min(requestedMinCount, 1);
-    } 
-    sreq.params.set(paramStart + FacetParams.FACET_LIMIT, shardLimit);
-    sreq.params.set(paramStart + FacetParams.FACET_PIVOT_MINCOUNT, shardMinCount);
-  }
-  
-  private int doOverRequestMath(int limit, double ratio, int count) {
-    // NOTE: normally, "1.0F < ratio"
-    //
-    // if the user chooses a ratio < 1, we allow it and don't "bottom out" at
-    // the original limit until *after* we've also added the count.
-    int adjustedLimit = (int) (limit * ratio) + count;
-    return Math.max(limit, adjustedLimit);
-  }
-  
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-    if (!rb.doFacets) return;
-    
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FACETS) != 0) {
-      countFacets(rb, sreq);
-    } else {
-      // at present PURPOSE_REFINE_FACETS and PURPOSE_REFINE_PIVOT_FACETS
-      // don't co-exist in individual requests, but don't assume that
-      // will always be the case
-      if ((sreq.purpose & ShardRequest.PURPOSE_REFINE_FACETS) != 0) {
-        refineFacets(rb, sreq);
-      }
-      if ((sreq.purpose & ShardRequest.PURPOSE_REFINE_PIVOT_FACETS) != 0) {
-        refinePivotFacets(rb, sreq);
-      }
-    }
-  }
-  
-  private void countFacets(ResponseBuilder rb, ShardRequest sreq) {
-    FacetInfo fi = rb._facetInfo;
-    
-    for (ShardResponse srsp : sreq.responses) {
-      int shardNum = rb.getShardNum(srsp.getShard());
-      NamedList facet_counts = null;
-      try {
-        facet_counts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
-      } catch (Exception ex) {
-        if (ShardParams.getShardsTolerantAsBool(rb.req.getParams())) {
-          continue; // looks like a shard did not return anything
-        }
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Unable to read facet info for shard: " + srsp.getShard(), ex);
-      }
-      
-      // handle facet queries
-      NamedList facet_queries = (NamedList) facet_counts.get("facet_queries");
-      if (facet_queries != null) {
-        for (int i = 0; i < facet_queries.size(); i++) {
-          String returnedKey = facet_queries.getName(i);
-          long count = ((Number) facet_queries.getVal(i)).longValue();
-          QueryFacet qf = fi.queryFacets.get(returnedKey);
-          qf.count += count;
-        }
-      }
-
-      // step through each facet.field, adding results from this shard
-      NamedList facet_fields = (NamedList) facet_counts.get("facet_fields");
-      
-      if (facet_fields != null) {
-        for (DistribFieldFacet dff : fi.facets.values()) {
-          dff.add(shardNum, (NamedList) facet_fields.get(dff.getKey()), dff.initialLimit);
-        }
-      }
-
-      // Distributed facet_ranges
-      @SuppressWarnings("unchecked")
-      SimpleOrderedMap<SimpleOrderedMap<Object>> rangesFromShard = (SimpleOrderedMap<SimpleOrderedMap<Object>>)
-          facet_counts.get("facet_ranges");
-      if (rangesFromShard != null)  {
-        RangeFacetRequest.DistribRangeFacet.mergeFacetRangesFromShardResponse(fi.rangeFacets, rangesFromShard);
-      }
-
-      // Distributed facet_intervals
-      doDistribIntervals(fi, facet_counts);
-      
-      // Distributed facet_pivots - this is just the per shard collection,
-      // refinement reqs still needed (below) once we've considered every shard
-      doDistribPivots(rb, shardNum, facet_counts);
-
-      // Distributed facet_heatmaps
-      SpatialHeatmapFacets.distribHandleResponse(fi.heatmapFacets, facet_counts);
-
-    } // end for-each-response-in-shard-request...
-    
-    // refine each pivot based on the new shard data
-    for (Entry<String,PivotFacet> pivotFacet : fi.pivotFacets) {
-      pivotFacet.getValue().queuePivotRefinementRequests();
-    }
-    
-    //
-    // This code currently assumes that there will be only a single
-    // request ((with responses from all shards) sent out to get facets...
-    // otherwise we would need to wait until all facet responses were received.
-    //
-    for (DistribFieldFacet dff : fi.facets.values()) {
-      // no need to check these facets for refinement
-      if (dff.initialLimit <= 0 && dff.initialMincount <= 1) continue;
-
-      // only other case where index-sort doesn't need refinement is if minCount==0
-      if (dff.minCount <= 1 && dff.sort.equals(FacetParams.FACET_SORT_INDEX)) continue;
-
-      @SuppressWarnings("unchecked") // generic array's are annoying
-      List<String>[] tmp = (List<String>[]) new List[rb.shards.length];
-      dff._toRefine = tmp;
-
-      ShardFacetCount[] counts = dff.getCountSorted();
-      int ntop = Math.min(counts.length, 
-                          dff.limit >= 0 ? dff.offset + dff.limit : Integer.MAX_VALUE);
-      long smallestCount = counts.length == 0 ? 0 : counts[ntop - 1].count;
-      
-      for (int i = 0; i < counts.length; i++) {
-        ShardFacetCount sfc = counts[i];
-        boolean needRefinement = false;
-
-        if (i < ntop) {
-          // automatically flag the top values for refinement
-          // this should always be true for facet.sort=index
-          needRefinement = true;
-        } else {
-          // this logic should only be invoked for facet.sort=index (for now)
-          
-          // calculate the maximum value that this term may have
-          // and if it is >= smallestCount, then flag for refinement
-          long maxCount = sfc.count;
-          for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
-            FixedBitSet fbs = dff.counted[shardNum];
-            // fbs can be null if a shard request failed
-            if (fbs != null && (sfc.termNum >= fbs.length() || !fbs.get(sfc.termNum))) {
-              // if missing from this shard, add the max it could be
-              maxCount += dff.maxPossible(shardNum);
-            }
-          }
-          if (maxCount >= smallestCount) {
-            // TODO: on a tie, we could check the term values
-            needRefinement = true;
-          }
-        }
-
-        if (needRefinement) {
-          // add a query for each shard missing the term that needs refinement
-          for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
-            FixedBitSet fbs = dff.counted[shardNum];
-            // fbs can be null if a shard request failed
-            if (fbs != null &&
-                (sfc.termNum >= fbs.length() || !fbs.get(sfc.termNum)) &&
-                dff.maxPossible(shardNum) > 0) {
-
-              dff.needRefinements = true;
-              List<String> lst = dff._toRefine[shardNum];
-              if (lst == null) {
-                lst = dff._toRefine[shardNum] = new ArrayList<>();
-              }
-              lst.add(sfc.name);
-            }
-          }
-        }
-      }
-    }
-    removeFieldFacetsUnderLimits(rb);
-    removeRangeFacetsUnderLimits(rb);
-    removeQueryFacetsUnderLimits(rb);
-
-  }
-
-  private void removeQueryFacetsUnderLimits(ResponseBuilder rb) {
-    if (rb.stage != ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      return;
-    }
-    FacetInfo fi = rb._facetInfo;
-    Map<String, QueryFacet> query_facets = fi.queryFacets;
-    if (query_facets == null) {
-      return;
-    }
-    LinkedHashMap<String, QueryFacet> newQueryFacets = new LinkedHashMap<>();
-
-    // The
-    int minCount = rb.req.getParams().getInt(FacetParams.FACET_MINCOUNT, 0);
-    boolean replace = false;
-    for (Map.Entry<String, QueryFacet> ent : query_facets.entrySet()) {
-      if (ent.getValue().count >= minCount) {
-        newQueryFacets.put(ent.getKey(), ent.getValue());
-      } else {
-        log.trace("Removing facetQuery/key: " + ent.getKey() + "/" + ent.getValue().toString() + " mincount=" + minCount);
-        replace = true;
-      }
-    }
-    if (replace) {
-      fi.queryFacets = newQueryFacets;
-    }
-  }
-
-  private void removeRangeFacetsUnderLimits(ResponseBuilder rb) {
-    if (rb.stage != ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      return;
-    }
-
-    FacetInfo fi = rb._facetInfo;
-    for (Map.Entry<String, RangeFacetRequest.DistribRangeFacet> entry : fi.rangeFacets.entrySet()) {
-      final String field = entry.getKey();
-      final RangeFacetRequest.DistribRangeFacet rangeFacet = entry.getValue();
-
-      int minCount = rb.req.getParams().getFieldInt(field, FacetParams.FACET_MINCOUNT, 0);
-      if (minCount == 0) {
-        continue;
-      }
-
-      rangeFacet.removeRangeFacetsUnderLimits(minCount);
-    }
-  }
-
-  private void removeFieldFacetsUnderLimits(ResponseBuilder rb) {
-    if (rb.stage != ResponseBuilder.STAGE_DONE) {
-      return;
-    }
-
-    FacetInfo fi = rb._facetInfo;
-    if (fi.facets == null) {
-      return;
-    }
-    // Do field facets
-    for (Entry<String, DistribFieldFacet> ent : fi.facets.entrySet()) {
-      String field = ent.getKey();
-      int minCount = rb.req.getParams().getFieldInt(field, FacetParams.FACET_MINCOUNT, 0);
-      if (minCount == 0) { // return them all
-        continue;
-      }
-      ent.getValue().respectMinCount(minCount);
-    }
-  }
-
-  // The implementation below uses the first encountered shard's
-  // facet_intervals as the basis for subsequent shards' data to be merged.
-  private void doDistribIntervals(FacetInfo fi, NamedList facet_counts) {
-    @SuppressWarnings("unchecked")
-    SimpleOrderedMap<SimpleOrderedMap<Integer>> facet_intervals =
-        (SimpleOrderedMap<SimpleOrderedMap<Integer>>)
-            facet_counts.get("facet_intervals");
-
-    if (facet_intervals != null) {
-
-      for (Map.Entry<String, SimpleOrderedMap<Integer>> entry : facet_intervals) {
-        final String field = entry.getKey();
-        SimpleOrderedMap<Integer> existingCounts = fi.intervalFacets.get(field);
-        if (existingCounts == null) {
-          // first time we've seen this field, no merging
-          fi.intervalFacets.add(field, entry.getValue());
-
-        } else {
-          // not the first time, merge current field counts
-          Iterator<Map.Entry<String, Integer>> newItr = entry.getValue().iterator();
-          Iterator<Map.Entry<String, Integer>> exItr = existingCounts.iterator();
-
-          // all intervals should be returned by each shard, even if they have zero count,
-          // and in the same order
-          while (exItr.hasNext()) {
-            Map.Entry<String, Integer> exItem = exItr.next();
-            if (!newItr.hasNext()) {
-              throw new SolrException(ErrorCode.SERVER_ERROR,
-                  "Interval facet shard response missing key: " + exItem.getKey());
-            }
-            Map.Entry<String, Integer> newItem = newItr.next();
-            if (!newItem.getKey().equals(exItem.getKey())) {
-              throw new SolrException(ErrorCode.SERVER_ERROR,
-                  "Interval facet shard response has extra key: " + newItem.getKey());
-            }
-            exItem.setValue(exItem.getValue() + newItem.getValue());
-          }
-          if (newItr.hasNext()) {
-            throw new SolrException(ErrorCode.SERVER_ERROR,
-                "Interval facet shard response has at least one extra key: "
-                + newItr.next().getKey());
-          }
-        }
-      }
-    }
-  }
-
-  private void doDistribPivots(ResponseBuilder rb, int shardNum, NamedList facet_counts) {
-    @SuppressWarnings("unchecked")
-    SimpleOrderedMap<List<NamedList<Object>>> facet_pivot 
-      = (SimpleOrderedMap<List<NamedList<Object>>>) facet_counts.get(PIVOT_KEY);
-    
-    if (facet_pivot != null) {
-      for (Map.Entry<String,List<NamedList<Object>>> pivot : facet_pivot) {
-        final String pivotName = pivot.getKey();
-        PivotFacet facet = rb._facetInfo.pivotFacets.get(pivotName);
-        facet.mergeResponseFromShard(shardNum, rb, pivot.getValue());
-      }
-    }
-  }
-
-
-  private void refineFacets(ResponseBuilder rb, ShardRequest sreq) {
-    FacetInfo fi = rb._facetInfo;
-
-    for (ShardResponse srsp : sreq.responses) {
-      // int shardNum = rb.getShardNum(srsp.shard);
-      NamedList facet_counts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
-      NamedList facet_fields = (NamedList) facet_counts.get("facet_fields");
-      
-      if (facet_fields == null) continue; // this can happen when there's an exception
-      
-      for (int i = 0; i < facet_fields.size(); i++) {
-        String key = facet_fields.getName(i);
-        DistribFieldFacet dff = fi.facets.get(key);
-        if (dff == null) continue;
-
-        NamedList shardCounts = (NamedList) facet_fields.getVal(i);
-        
-        for (int j = 0; j < shardCounts.size(); j++) {
-          String name = shardCounts.getName(j);
-          long count = ((Number) shardCounts.getVal(j)).longValue();
-          ShardFacetCount sfc = dff.counts.get(name);
-          if (sfc == null) {
-            // we got back a term we didn't ask for?
-            log.error("Unexpected term returned for facet refining. key=" + key
-                      + " term='" + name + "'" + "\n\trequest params=" + sreq.params
-                      + "\n\ttoRefine=" + dff._toRefine + "\n\tresponse="
-                      + shardCounts);
-            continue;
-          }
-          sfc.count += count;
-        }
-      }
-    }
-  }
-  
-  private void refinePivotFacets(ResponseBuilder rb, ShardRequest sreq) {
-    // This is after the shard has returned the refinement request
-    FacetInfo fi = rb._facetInfo;
-    for (ShardResponse srsp : sreq.responses) {
-      
-      int shardNumber = rb.getShardNum(srsp.getShard());
-      
-      NamedList facetCounts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
-      
-      @SuppressWarnings("unchecked")
-      NamedList<List<NamedList<Object>>> pivotFacetResponsesFromShard 
-        = (NamedList<List<NamedList<Object>>>) facetCounts.get(PIVOT_KEY);
-
-      if (null == pivotFacetResponsesFromShard) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-                                "No pivot refinement response from shard: " + srsp.getShard());
-      }
-      
-      for (Entry<String,List<NamedList<Object>>> pivotFacetResponseFromShard : pivotFacetResponsesFromShard) {
-        PivotFacet masterPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey());
-        masterPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue());  
-        masterPivotFacet.removeAllRefinementsForShard(shardNumber);
-      }
-    }
-    
-    if (allPivotFacetsAreFullyRefined(fi)) {
-      for (Entry<String,PivotFacet> pf : fi.pivotFacets) {
-        pf.getValue().queuePivotRefinementRequests();
-      }
-      reQueuePivotFacetShardRequests(rb);
-    }
-  }
-  
-  private boolean allPivotFacetsAreFullyRefined(FacetInfo fi) {
-    
-    for (Entry<String,PivotFacet> pf : fi.pivotFacets) {
-      if (pf.getValue().isRefinementsRequired()) {
-        return false;
-      }
-    }
-    return true;
-  }
-  
-  private boolean doAnyPivotFacetRefinementRequestsExistForShard(FacetInfo fi,
-                                                                 int shardNum) {
-    for (int i = 0; i < fi.pivotFacets.size(); i++) {
-      PivotFacet pf = fi.pivotFacets.getVal(i);
-      if ( ! pf.getQueuedRefinements(shardNum).isEmpty() ) {
-        return true;
-      }
-    }
-    return false;
-  }
-  
-  private void reQueuePivotFacetShardRequests(ResponseBuilder rb) {
-    for (int shardNum = 0; shardNum < rb.shards.length; shardNum++) {
-      if (doAnyPivotFacetRefinementRequestsExistForShard(rb._facetInfo, shardNum)) {
-        enqueuePivotFacetShardRequests(rb, shardNum);
-      }
-    }
-  }
-  
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    if (!rb.doFacets || rb.stage != ResponseBuilder.STAGE_GET_FIELDS) return;
-    // wait until STAGE_GET_FIELDS
-    // so that "result" is already stored in the response (for aesthetics)
-    
-    FacetInfo fi = rb._facetInfo;
-
-    NamedList<Object> facet_counts = new SimpleOrderedMap<>();
-    
-    NamedList<Number> facet_queries = new SimpleOrderedMap<>();
-    facet_counts.add("facet_queries", facet_queries);
-    for (QueryFacet qf : fi.queryFacets.values()) {
-      facet_queries.add(qf.getKey(), num(qf.count));
-    }
-    
-    NamedList<Object> facet_fields = new SimpleOrderedMap<>();
-    facet_counts.add("facet_fields", facet_fields);
-    
-    for (DistribFieldFacet dff : fi.facets.values()) {
-      // order is important for facet values, so use NamedList
-      NamedList<Object> fieldCounts = new NamedList<>(); 
-      facet_fields.add(dff.getKey(), fieldCounts);
-      
-      ShardFacetCount[] counts;
-      boolean countSorted = dff.sort.equals(FacetParams.FACET_SORT_COUNT);
-      if (countSorted) {
-        counts = dff.countSorted;
-        if (counts == null || dff.needRefinements) {
-          counts = dff.getCountSorted();
-        }
-      } else if (dff.sort.equals(FacetParams.FACET_SORT_INDEX)) {
-        counts = dff.getLexSorted();
-      } else { // TODO: log error or throw exception?
-        counts = dff.getLexSorted();
-      }
-      
-      if (countSorted) {
-        int end = dff.limit < 0 
-          ? counts.length : Math.min(dff.offset + dff.limit, counts.length);
-        for (int i = dff.offset; i < end; i++) {
-          if (counts[i].count < dff.minCount) {
-            break;
-          }
-          fieldCounts.add(counts[i].name, num(counts[i].count));
-        }
-      } else {
-        int off = dff.offset;
-        int lim = dff.limit >= 0 ? dff.limit : Integer.MAX_VALUE;
-        
-        // index order...
-        for (int i = 0; i < counts.length; i++) {
-          long count = counts[i].count;
-          if (count < dff.minCount) continue;
-          if (off > 0) {
-            off--;
-            continue;
-          }
-          if (lim <= 0) {
-            break;
-          }
-          lim--;
-          fieldCounts.add(counts[i].name, num(count));
-        }
-      }
-
-      if (dff.missing) {
-        fieldCounts.add(null, num(dff.missingCount));
-      }
-    }
-
-    SimpleOrderedMap<SimpleOrderedMap<Object>> rangeFacetOutput = new SimpleOrderedMap<>();
-    for (Map.Entry<String, RangeFacetRequest.DistribRangeFacet> entry : fi.rangeFacets.entrySet()) {
-      String key = entry.getKey();
-      RangeFacetRequest.DistribRangeFacet value = entry.getValue();
-      rangeFacetOutput.add(key, value.rangeFacet);
-    }
-    facet_counts.add("facet_ranges", rangeFacetOutput);
-
-    facet_counts.add("facet_intervals", fi.intervalFacets);
-    facet_counts.add(SpatialHeatmapFacets.RESPONSE_KEY,
-        SpatialHeatmapFacets.distribFinish(fi.heatmapFacets, rb));
-
-    if (fi.pivotFacets != null && fi.pivotFacets.size() > 0) {
-      facet_counts.add(PIVOT_KEY, createPivotFacetOutput(rb));
-    }
-
-    rb.rsp.add("facet_counts", facet_counts);
-
-    rb._facetInfo = null;  // could be big, so release asap
-  }
-
-  private SimpleOrderedMap<List<NamedList<Object>>> createPivotFacetOutput(ResponseBuilder rb) {
-    
-    SimpleOrderedMap<List<NamedList<Object>>> combinedPivotFacets = new SimpleOrderedMap<>();
-    for (Entry<String,PivotFacet> entry : rb._facetInfo.pivotFacets) {
-      String key = entry.getKey();
-      PivotFacet pivot = entry.getValue();
-      List<NamedList<Object>> trimmedPivots = pivot.getTrimmedPivotsAsListOfNamedLists();
-      if (null == trimmedPivots) {
-        trimmedPivots = Collections.<NamedList<Object>>emptyList();
-      }
-
-      combinedPivotFacets.add(key, trimmedPivots);
-    }
-    return combinedPivotFacets;
-  }
-
-  // use <int> tags for smaller facet counts (better back compatibility)
-
-  /**
-   * @param val a primitive long value
-   * @return an {@link Integer} if the value of the argument is less than {@link Integer#MAX_VALUE}
-   * else a @{link java.lang.Long}
-   */
-  static Number num(long val) {
-   if (val < Integer.MAX_VALUE) return (int)val;
-   else return val;
-  }
-
-  /**
-   * @param val a {@link java.lang.Long} value
-   * @return an {@link Integer} if the value of the argument is less than {@link Integer#MAX_VALUE}
-   * else a @{link java.lang.Long}
-   */
-  static Number num(Long val) {
-    if (val.longValue() < Integer.MAX_VALUE) return val.intValue();
-    else return val;
-  }
-
-
-  /////////////////////////////////////////////
-  ///  SolrInfoBean
-  ////////////////////////////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Handle Faceting";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-
-  /**
-   * This class is used exclusively for merging results from each shard
-   * in a distributed facet request. It plays no role in the computation
-   * of facet counts inside a single node.
-   *
-   * A related class {@link org.apache.solr.handler.component.FacetComponent.FacetContext}
-   * exists for assisting computation inside a single node.
-   *
-   * <b>This API is experimental and subject to change</b>
-   *
-   * @see org.apache.solr.handler.component.FacetComponent.FacetContext
-   */
-  public static class FacetInfo {
-    /**
-     * Incremented counter used to track the values being refined in a given request.
-     * This counter is used in conjunction with {@link PivotFacet#REFINE_PARAM} to identify
-     * which refinement values are associated with which pivots.
-     */
-    int pivotRefinementCounter = 0;
-
-    public LinkedHashMap<String,QueryFacet> queryFacets;
-    public LinkedHashMap<String,DistribFieldFacet> facets;
-    public SimpleOrderedMap<SimpleOrderedMap<Object>> dateFacets
-      = new SimpleOrderedMap<>();
-    public LinkedHashMap<String, RangeFacetRequest.DistribRangeFacet> rangeFacets
-            = new LinkedHashMap<>();
-    public SimpleOrderedMap<SimpleOrderedMap<Integer>> intervalFacets
-      = new SimpleOrderedMap<>();
-    public SimpleOrderedMap<PivotFacet> pivotFacets
-      = new SimpleOrderedMap<>();
-    public LinkedHashMap<String,SpatialHeatmapFacets.HeatmapFacet> heatmapFacets;
-
-    void parse(SolrParams params, ResponseBuilder rb) {
-      queryFacets = new LinkedHashMap<>();
-      facets = new LinkedHashMap<>();
-
-      String[] facetQs = params.getParams(FacetParams.FACET_QUERY);
-      if (facetQs != null) {
-        for (String query : facetQs) {
-          QueryFacet queryFacet = new QueryFacet(rb, query);
-          queryFacets.put(queryFacet.getKey(), queryFacet);
-        }
-      }
-      
-      String[] facetFs = params.getParams(FacetParams.FACET_FIELD);
-      if (facetFs != null) {
-        
-        for (String field : facetFs) {
-          final DistribFieldFacet ff;
-          
-          if (params.getFieldBool(field, FacetParams.FACET_EXISTS, false)) {
-            // cap facet count by 1 with this method
-            ff = new DistribFacetExistsField(rb, field);
-          } else {
-            ff = new DistribFieldFacet(rb, field);
-          }
-          facets.put(ff.getKey(), ff);
-        }
-      }
-
-      // Develop Pivot Facet Information
-      String[] facetPFs = params.getParams(FacetParams.FACET_PIVOT);
-      if (facetPFs != null) {
-        for (String fieldGroup : facetPFs) {
-          PivotFacet pf = new PivotFacet(rb, fieldGroup);
-          pivotFacets.add(pf.getKey(), pf);
-        }
-      }
-
-      heatmapFacets = SpatialHeatmapFacets.distribParse(params, rb);
-    }
-  }
-
-  /**
-   * <b>This API is experimental and subject to change</b>
-   */
-  public static class FacetBase {
-    String facetType; // facet.field, facet.query, etc (make enum?)
-    String facetStr; // original parameter value of facetStr
-    String facetOn; // the field or query, absent localParams if appropriate
-    private String key; // label in the response for the result... 
-                        // "foo" for {!key=foo}myfield
-    SolrParams localParams; // any local params for the facet
-    private List<String> tags = Collections.emptyList();
-    private List<String> excludeTags = Collections.emptyList();
-    private int threadCount = -1;
-    
-    public FacetBase(ResponseBuilder rb, String facetType, String facetStr) {
-      this.facetType = facetType;
-      this.facetStr = facetStr;
-      try {
-        this.localParams = QueryParsing.getLocalParams(facetStr,
-                                                       rb.req.getParams());
-      } catch (SyntaxError e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-      }
-      this.facetOn = facetStr;
-      this.key = facetStr;
-      
-      if (localParams != null) {
-        // remove local params unless it's a query
-        if (!facetType.equals(FacetParams.FACET_QUERY)) {
-          facetOn = localParams.get(CommonParams.VALUE);
-          key = facetOn;
-        }
-        
-        key = localParams.get(CommonParams.OUTPUT_KEY, key);
-
-        String tagStr = localParams.get(CommonParams.TAG);
-        this.tags = tagStr == null ? Collections.<String>emptyList() : StrUtils.splitSmart(tagStr,',');
-
-        String threadStr = localParams.get(CommonParams.THREADS);
-        this.threadCount = threadStr != null ? Integer.parseInt(threadStr) : -1;
-
-        String excludeStr = localParams.get(CommonParams.EXCLUDE);
-        if (StringUtils.isEmpty(excludeStr))  {
-          this.excludeTags = Collections.emptyList();
-        } else {
-          this.excludeTags = StrUtils.splitSmart(excludeStr,',');
-        }
-      }
-    }
-    
-    /** returns the key in the response that this facet will be under */
-    public String getKey() { return key; }
-    public String getType() { return facetType; }
-    public List<String> getTags() { return tags; }
-    public List<String> getExcludeTags() { return excludeTags; }
-    public int getThreadCount() { return threadCount; }
-  }
-  
-  /**
-   * <b>This API is experimental and subject to change</b>
-   */
-  public static class QueryFacet extends FacetBase {
-    public long count;
-    
-    public QueryFacet(ResponseBuilder rb, String facetStr) {
-      super(rb, FacetParams.FACET_QUERY, facetStr);
-    }
-  }
-  
-  /**
-   * <b>This API is experimental and subject to change</b>
-   */
-  public static class FieldFacet extends FacetBase {
-    public String field; // the field to facet on... "myfield" for
-                         // {!key=foo}myfield
-    public FieldType ftype;
-    public int offset;
-    public int limit;
-    public int minCount;
-    public String sort;
-    public boolean missing;
-    public String prefix;
-    public long missingCount;
-    
-    public FieldFacet(ResponseBuilder rb, String facetStr) {
-      super(rb, FacetParams.FACET_FIELD, facetStr);
-      fillParams(rb, rb.req.getParams(), facetOn);
-    }
-    
-    protected void fillParams(ResponseBuilder rb, SolrParams params, String field) {
-      this.field = field;
-      this.ftype = rb.req.getSchema().getFieldTypeNoEx(this.field);
-      this.offset = params.getFieldInt(field, FacetParams.FACET_OFFSET, 0);
-      this.limit = params.getFieldInt(field, FacetParams.FACET_LIMIT, 100);
-      Integer mincount = params.getFieldInt(field, FacetParams.FACET_MINCOUNT);
-      if (mincount == null) {
-        Boolean zeros = params.getFieldBool(field, FacetParams.FACET_ZEROS);
-        // mincount = (zeros!=null && zeros) ? 0 : 1;
-        mincount = (zeros != null && !zeros) ? 1 : 0;
-        // current default is to include zeros.
-      }
-      this.minCount = mincount;
-      this.missing = params.getFieldBool(field, FacetParams.FACET_MISSING, false);
-      // default to sorting by count if there is a limit.
-      this.sort = params.getFieldParam(field, FacetParams.FACET_SORT,
-                                       (limit > 0 ? 
-                                        FacetParams.FACET_SORT_COUNT
-                                        : FacetParams.FACET_SORT_INDEX));
-      if (this.sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
-        this.sort = FacetParams.FACET_SORT_COUNT;
-      } else if (this.sort.equals(FacetParams.FACET_SORT_INDEX_LEGACY)) {
-        this.sort = FacetParams.FACET_SORT_INDEX;
-      }
-      this.prefix = params.getFieldParam(field, FacetParams.FACET_PREFIX);
-    }
-  }
-  
-  /**
-   * <b>This API is experimental and subject to change</b>
-   */
-  @SuppressWarnings("rawtypes")
-  public static class DistribFieldFacet extends FieldFacet {
-    public List<String>[] _toRefine; // a List<String> of refinements needed,
-                                     // one for each shard.
-    
-    // SchemaField sf; // currently unneeded
-    
-    // the max possible count for a term appearing on no list
-    public long missingMaxPossible;
-    // the max possible count for a missing term for each shard (indexed by
-    // shardNum)
-    public long[] missingMax;
-    // a bitset for each shard, keeping track of which terms seen
-    public FixedBitSet[] counted; 
-    public HashMap<String,ShardFacetCount> counts = new HashMap<>(128);
-    public int termNum;
-    
-    public int initialLimit; // how many terms requested in first phase
-    public int initialMincount; // mincount param sent to each shard
-    public double overrequestRatio;
-    public int overrequestCount;
-    public boolean needRefinements;
-    public ShardFacetCount[] countSorted;
-    
-    DistribFieldFacet(ResponseBuilder rb, String facetStr) {
-      super(rb, facetStr);
-      // sf = rb.req.getSchema().getField(field);
-      missingMax = new long[rb.shards.length];
-      counted = new FixedBitSet[rb.shards.length];
-    }
-    
-    protected void fillParams(ResponseBuilder rb, SolrParams params, String field) {
-      super.fillParams(rb, params, field);
-      this.overrequestRatio
-        = params.getFieldDouble(field, FacetParams.FACET_OVERREQUEST_RATIO, 1.5);
-      this.overrequestCount 
-        = params.getFieldInt(field, FacetParams.FACET_OVERREQUEST_COUNT, 10);
-    }
-    
-    void add(int shardNum, NamedList shardCounts, int numRequested) {
-      // shardCounts could be null if there was an exception
-      int sz = shardCounts == null ? 0 : shardCounts.size();
-      int numReceived = sz;
-      
-      FixedBitSet terms = new FixedBitSet(termNum + sz);
-
-      long last = 0;
-      for (int i = 0; i < sz; i++) {
-        String name = shardCounts.getName(i);
-        long count = ((Number) shardCounts.getVal(i)).longValue();
-        if (name == null) {
-          missingCount += count;
-          numReceived--;
-        } else {
-          ShardFacetCount sfc = counts.get(name);
-          if (sfc == null) {
-            sfc = new ShardFacetCount();
-            sfc.name = name;
-            if (ftype == null) {
-              sfc.indexed = null;
-            } else if (ftype.isPointField()) {
-              sfc.indexed = ((PointField)ftype).toInternalByteRef(sfc.name);
-            } else {
-              sfc.indexed = new BytesRef(ftype.toInternal(sfc.name));
-            }
-            sfc.termNum = termNum++;
-            counts.put(name, sfc);
-          }
-          incCount(sfc, count);
-          terms.set(sfc.termNum);
-          last = count;
-        }
-      }
-      
-      // the largest possible missing term is (initialMincount - 1) if we received
-      // less than the number requested.
-      if (numRequested < 0 || numRequested != 0 && numReceived < numRequested) {
-        last = Math.max(0, initialMincount - 1);
-      }
-      
-      missingMaxPossible += last;
-      missingMax[shardNum] = last;
-      counted[shardNum] = terms;
-    }
-
-    protected void incCount(ShardFacetCount sfc, long count) {
-      sfc.count += count;
-    }
-    
-    public ShardFacetCount[] getLexSorted() {
-      ShardFacetCount[] arr 
-        = counts.values().toArray(new ShardFacetCount[counts.size()]);
-      Arrays.sort(arr, (o1, o2) -> o1.indexed.compareTo(o2.indexed));
-      countSorted = arr;
-      return arr;
-    }
-    
-    public ShardFacetCount[] getCountSorted() {
-      ShardFacetCount[] arr 
-        = counts.values().toArray(new ShardFacetCount[counts.size()]);
-      Arrays.sort(arr, (o1, o2) -> {
-        if (o2.count < o1.count) return -1;
-        else if (o1.count < o2.count) return 1;
-        return o1.indexed.compareTo(o2.indexed);
-      });
-      countSorted = arr;
-      return arr;
-    }
-    
-    // returns the max possible value this ShardFacetCount could have for this shard
-    // (assumes the shard did not report a count for this value)
-    long maxPossible(int shardNum) {
-      return missingMax[shardNum];
-      // TODO: could store the last term in the shard to tell if this term
-      // comes before or after it. If it comes before, we could subtract 1
-    }
-
-    public void respectMinCount(long minCount) {
-      HashMap<String, ShardFacetCount> newOne = new HashMap<>();
-      boolean replace = false;
-      for (Map.Entry<String, ShardFacetCount> ent : counts.entrySet()) {
-        if (ent.getValue().count >= minCount) {
-          newOne.put(ent.getKey(), ent.getValue());
-        } else {
-          log.trace("Removing facet/key: " + ent.getKey() + "/" + ent.getValue().toString() + " mincount=" + minCount);
-          replace = true;
-        }
-      }
-      if (replace) {
-        counts = newOne;
-      }
-    }
-  }
-
-  /**
-   * <b>This API is experimental and subject to change</b>
-   */
-  public static class ShardFacetCount {
-    public String name;
-    // the indexed form of the name... used for comparisons
-    public BytesRef indexed; 
-    public long count;
-    public int termNum; // term number starting at 0 (used in bit arrays)
-    
-    @Override
-    public String toString() {
-      return "{term=" + name + ",termNum=" + termNum + ",count=" + count + "}";
-    }
-  }
-
-  
-  private static final class DistribFacetExistsField extends DistribFieldFacet {
-    private DistribFacetExistsField(ResponseBuilder rb, String facetStr) {
-      super(rb, facetStr);
-      SimpleFacets.checkMincountOnExists(field, minCount); 
-    }
-
-    @Override
-    protected void incCount(ShardFacetCount sfc, long count) {
-      if (count>0) {
-        sfc.count = 1;
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java b/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
deleted file mode 100644
index 2b8373a..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.SolrIndexSearcher;
-
-
-/**
- * FieldFacetStats is a utility to accumulate statistics on a set of values in one field,
- * for facet values present in another field.
- * <p>
- * 9/10/2009 - Moved out of StatsComponent to allow open access to UnInvertedField
- * @see org.apache.solr.handler.component.StatsComponent
- *
- */
-
-public class FieldFacetStats {
-  public final String name;
-  final StatsField statsField;
-  final SchemaField facet_sf;
-
-  public final Map<String, StatsValues> facetStatsValues;
-  private final Map<Integer, Integer> missingStats;
-  List<HashMap<String, Integer>> facetStatsTerms;
-
-  final LeafReader topLevelReader;
-  LeafReaderContext leave;
-  final ValueSource valueSource;
-  LeafReaderContext context;
-  FunctionValues values;
-
-  SortedDocValues topLevelSortedValues = null;
-
-  public FieldFacetStats(SolrIndexSearcher searcher, SchemaField facet_sf, StatsField statsField) {
-    this.statsField = statsField;
-    this.facet_sf = facet_sf;
-    this.name = facet_sf.getName();
-
-    topLevelReader = searcher.getSlowAtomicReader();
-    valueSource = facet_sf.getType().getValueSource(facet_sf, null);
-
-    facetStatsValues = new HashMap<>();
-    facetStatsTerms = new ArrayList<>();
-    missingStats = new HashMap<>();
-  }
-
-  private StatsValues getStatsValues(String key) throws IOException {
-    StatsValues stats = facetStatsValues.get(key);
-    if (stats == null) {
-      stats = StatsValuesFactory.createStatsValues(statsField);
-      facetStatsValues.put(key, stats);
-      stats.setNextReader(context);
-    }
-    return stats;
-  }
-
-  // docID is relative to the context
-  public void facet(int docID) throws IOException {
-    final String key = values.exists(docID)
-        ? values.strVal(docID)
-        : null;
-    final StatsValues stats = getStatsValues(key);
-    stats.accumulate(docID);
-  }
-
-  // Function to keep track of facet counts for term number.
-  // Currently only used by UnInvertedField stats
-  public boolean facetTermNum(int docID, int statsTermNum) throws IOException {
-    if (topLevelSortedValues == null) {
-      topLevelSortedValues = DocValues.getSorted(topLevelReader, name);
-    }
-
-    if (docID > topLevelSortedValues.docID()) {
-      topLevelSortedValues.advance(docID);
-    }
- 
-    int term;
-    if (docID == topLevelSortedValues.docID()) {
-      term = topLevelSortedValues.ordValue();
-    } else {
-      term = -1;
-    }
-    
-    int arrIdx = term;
-    if (arrIdx >= 0 && arrIdx < topLevelSortedValues.getValueCount()) {
-      final String key;
-      if (term == -1) {
-        key = null;
-      } else {
-        key = topLevelSortedValues.lookupOrd(term).utf8ToString();
-      }
-      while (facetStatsTerms.size() <= statsTermNum) {
-        facetStatsTerms.add(new HashMap<String, Integer>());
-      }
-      
-      
-      final Map<String, Integer> statsTermCounts = facetStatsTerms.get(statsTermNum);
-      Integer statsTermCount = statsTermCounts.get(key);
-      if (statsTermCount == null) {
-        statsTermCounts.put(key, 1);
-      } else {
-        statsTermCounts.put(key, statsTermCount + 1);
-      }
-      return true;
-    }
-    
-    return false;
-  }
-
-
-  //function to accumulate counts for statsTermNum to specified value
-  public boolean accumulateTermNum(int statsTermNum, BytesRef value) throws IOException {
-    if (value == null) return false;
-    while (facetStatsTerms.size() <= statsTermNum) {
-      facetStatsTerms.add(new HashMap<String, Integer>());
-    }
-    for (Map.Entry<String, Integer> pairs : facetStatsTerms.get(statsTermNum).entrySet()) {
-      String key = (String) pairs.getKey();
-      StatsValues facetStats = facetStatsValues.get(key);
-      if (facetStats == null) {
-        facetStats = StatsValuesFactory.createStatsValues(statsField);
-        facetStatsValues.put(key, facetStats);
-      }
-      Integer count = (Integer) pairs.getValue();
-      if (count != null) {
-        facetStats.accumulate(value, count);
-      }
-    }
-    return true;
-  }
-
-  public void setNextReader(LeafReaderContext ctx) throws IOException {
-    this.context = ctx;
-    values = valueSource.getValues(Collections.emptyMap(), ctx);
-    for (StatsValues stats : facetStatsValues.values()) {
-      stats.setNextReader(ctx);
-    }
-  }
-
-  public void facetMissingNum(int docID) throws IOException {
-    if (topLevelSortedValues == null) {
-      topLevelSortedValues = DocValues.getSorted(topLevelReader, name);
-    }
-    
-    if (docID > topLevelSortedValues.docID()) {
-      topLevelSortedValues.advance(docID);
-    }
- 
-    if (docID == topLevelSortedValues.docID()) {
-      int ord = topLevelSortedValues.ordValue();
-      Integer missingCount = missingStats.get(ord);
-      if (missingCount == null) {
-        missingStats.put(ord, 1);
-      } else {
-        missingStats.put(ord, missingCount + 1);
-      }
-    }
-  }
-  
-  public void accumulateMissing() throws IOException {
-    StatsValues statsValue;
-    
-    for (Map.Entry<Integer, Integer> entry : missingStats.entrySet()) {
-      if (entry.getKey() >= 0) {
-        String key = topLevelSortedValues.lookupOrd(entry.getKey()).utf8ToString();
-        if ((statsValue = facetStatsValues.get(key)) != null) {
-          statsValue.addMissing(entry.getValue());
-        }
-      }
-    }
-    return;
-  }
-}
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
deleted file mode 100644
index 0ee6855..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Stream;
-
-import com.google.common.base.Objects;
-import org.apache.lucene.search.Query;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.HighlightParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.highlight.DefaultSolrHighlighter;
-import org.apache.solr.highlight.PostingsSolrHighlighter;
-import org.apache.solr.highlight.SolrHighlighter;
-import org.apache.solr.highlight.UnifiedSolrHighlighter;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.QParserPlugin;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.util.SolrPluginUtils;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.apache.solr.util.plugin.SolrCoreAware;
-
-import static java.util.stream.Collectors.toMap;
-
-/**
- * TODO!
- *
- *
- * @since solr 1.3
- */
-public class HighlightComponent extends SearchComponent implements PluginInfoInitialized, SolrCoreAware
-{
-  public enum HighlightMethod {
-    UNIFIED("unified"),
-    FAST_VECTOR("fastVector"),
-    POSTINGS("postings"),
-    ORIGINAL("original");
-
-    private static final Map<String, HighlightMethod> METHODS = Collections.unmodifiableMap(Stream.of(values())
-        .collect(toMap(HighlightMethod::getMethodName, Function.identity())));
-
-    private final String methodName;
-
-    HighlightMethod(String method) {
-      this.methodName = method;
-    }
-
-    public String getMethodName() {
-      return methodName;
-    }
-
-    public static HighlightMethod parse(String method) {
-      return METHODS.get(method);
-    }
-  }
-
-  public static final String COMPONENT_NAME = "highlight";
-
-  private PluginInfo info = PluginInfo.EMPTY_INFO;
-
-  @Deprecated // DWS: in 7.0 lets restructure the abstractions/relationships
-  private SolrHighlighter solrConfigHighlighter;
-
-  /**
-   * @deprecated instead depend on {@link #process(ResponseBuilder)} to choose the highlighter based on
-   * {@link HighlightParams#METHOD}
-   */
-  @Deprecated
-  public static SolrHighlighter getHighlighter(SolrCore core) {
-    HighlightComponent hl = (HighlightComponent) core.getSearchComponents().get(HighlightComponent.COMPONENT_NAME);
-    return hl==null ? null: hl.getHighlighter();
-  }
-
-  @Deprecated
-  public SolrHighlighter getHighlighter() {
-    return solrConfigHighlighter;
-  }
-
-  @Override
-  public void init(PluginInfo info) {
-    this.info = info;
-  }
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    SolrParams params = rb.req.getParams();
-    rb.doHighlights = solrConfigHighlighter.isHighlightingEnabled(params);
-    if(rb.doHighlights){
-      rb.setNeedDocList(true);
-      String hlq = params.get(HighlightParams.Q);
-      String hlparser = Objects.firstNonNull(params.get(HighlightParams.QPARSER),
-                                              params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE));
-      if(hlq != null){
-        try {
-          QParser parser = QParser.getParser(hlq, hlparser, rb.req);
-          rb.setHighlightQuery(parser.getHighlightQuery());
-        } catch (SyntaxError e) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-        }
-      }
-    }
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    List<PluginInfo> children = info.getChildren("highlighting");
-    if(children.isEmpty()) {
-      DefaultSolrHighlighter defHighlighter = new DefaultSolrHighlighter(core);
-      defHighlighter.init(PluginInfo.EMPTY_INFO);
-      solrConfigHighlighter = defHighlighter;
-    } else {
-      solrConfigHighlighter = core.createInitInstance(children.get(0),SolrHighlighter.class,null, DefaultSolrHighlighter.class.getName());
-    }
-
-  }
-
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-
-    if (rb.doHighlights) {
-      SolrQueryRequest req = rb.req;
-      SolrParams params = req.getParams();
-
-      SolrHighlighter highlighter = getHighlighter(params);
-
-      //TODO: get from builder by default?
-      String[] defaultHighlightFields = rb.getQparser() != null ? rb.getQparser().getDefaultHighlightFields() : null;
-      
-      Query highlightQuery = rb.getHighlightQuery();
-      if(highlightQuery==null) {
-        if (rb.getQparser() != null) {
-          try {
-            highlightQuery = rb.getQparser().getHighlightQuery();
-            rb.setHighlightQuery( highlightQuery );
-          } catch (Exception e) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-          }
-        } else {
-          highlightQuery = rb.getQuery();
-          rb.setHighlightQuery( highlightQuery );
-        }
-      }
-
-      // No highlighting if there is no query -- consider q.alt=*:*
-      if( highlightQuery != null ) {
-        NamedList sumData = highlighter.doHighlighting(
-                rb.getResults().docList,
-                highlightQuery,
-                req, defaultHighlightFields );
-        
-        if(sumData != null) {
-          // TODO ???? add this directly to the response?
-          rb.rsp.add(highlightingResponseField(), convertHighlights(sumData));
-        }
-      }
-    }
-  }
-
-  protected SolrHighlighter getHighlighter(SolrParams params) {
-    HighlightMethod method = HighlightMethod.parse(params.get(HighlightParams.METHOD));
-    if (method == null) {
-      return solrConfigHighlighter;
-    }
-
-    switch (method) {
-      case UNIFIED:
-        if (solrConfigHighlighter instanceof UnifiedSolrHighlighter) {
-          return solrConfigHighlighter;
-        }
-        return new UnifiedSolrHighlighter(); // TODO cache one?
-      case POSTINGS:
-        if (solrConfigHighlighter instanceof PostingsSolrHighlighter) {
-          return solrConfigHighlighter;
-        }
-        return new PostingsSolrHighlighter(); // TODO cache one?
-      case FAST_VECTOR: // fall-through
-      case ORIGINAL:
-        if (solrConfigHighlighter instanceof DefaultSolrHighlighter) {
-          return solrConfigHighlighter;
-        } else {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "In order to use " + HighlightParams.METHOD + "=" + method.getMethodName() + " the configured" +
-                  " highlighter in solrconfig must be " + DefaultSolrHighlighter.class);
-        }
-      default: throw new AssertionError();
-    }
-  }
-
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    if (!rb.doHighlights) return;
-
-    // Turn on highlighting only only when retrieving fields
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0) {
-        sreq.purpose |= ShardRequest.PURPOSE_GET_HIGHLIGHTS;
-        // should already be true...
-        sreq.params.set(HighlightParams.HIGHLIGHT, "true");      
-    } else {
-      sreq.params.set(HighlightParams.HIGHLIGHT, "false");      
-    }
-  }
-
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-  }
-
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    if (rb.doHighlights && rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
-
-      final Object[] objArr = newHighlightsArray(rb.resultIds.size());
-      final String highlightingResponseField = highlightingResponseField();
-
-      // TODO: make a generic routine to do automatic merging of id keyed data
-      for (ShardRequest sreq : rb.finished) {
-        if ((sreq.purpose & ShardRequest.PURPOSE_GET_HIGHLIGHTS) == 0) continue;
-        for (ShardResponse srsp : sreq.responses) {
-          if (srsp.getException() != null) {
-            // can't expect the highlight content if there was an exception for this request
-            // this should only happen when using shards.tolerant=true
-            continue;
-          }
-          Object hl = srsp.getSolrResponse().getResponse().get(highlightingResponseField);
-          addHighlights(objArr, hl, rb.resultIds);
-        }
-      }
-
-      rb.rsp.add(highlightingResponseField, getAllHighlights(objArr));
-    }
-  }
-
-  ////////////////////////////////////////////
-  ///  SolrInfoBean
-  ////////////////////////////////////////////
-  
-  @Override
-  public String getDescription() {
-    return "Highlighting";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.HIGHLIGHTER;
-  }
-
-  ////////////////////////////////////////////
-  ///  highlighting response collation
-  ////////////////////////////////////////////
-
-  protected String highlightingResponseField() {
-    return "highlighting";
-  }
-
-  protected Object convertHighlights(NamedList hl) {
-    return hl;
-  }
-
-  protected Object[] newHighlightsArray(int size) {
-    return new NamedList.NamedListEntry[size];
-  }
-
-  protected void addHighlights(Object[] objArr, Object obj, Map<Object, ShardDoc> resultIds) {
-    Map.Entry<String, Object>[] arr = (Map.Entry<String, Object>[])objArr;
-    NamedList hl = (NamedList)obj;
-    SolrPluginUtils.copyNamedListIntoArrayByDocPosInResponse(hl, resultIds, arr);
-  }
-
-  protected Object getAllHighlights(Object[] objArr) {
-      final Map.Entry<String, Object>[] arr = (Map.Entry<String, Object>[])objArr;
-      // remove nulls in case not all docs were able to be retrieved
-      return SolrPluginUtils.removeNulls(arr, new SimpleOrderedMap<>());
-  }
-
-}


[27/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
deleted file mode 100644
index 6e13039..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ /dev/null
@@ -1,3154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.lang.reflect.Constructor;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantLock;
-
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Timer;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.MapMaker;
-import org.apache.commons.io.FileUtils;
-import org.apache.lucene.analysis.util.ResourceLoader;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.solr.client.solrj.impl.BinaryResponseParser;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.RecoveryStrategy;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CommonParams.EchoParamStyle;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.ObjectReleaseTracker;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData;
-import org.apache.solr.handler.IndexFetcher;
-import org.apache.solr.handler.ReplicationHandler;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.SolrConfigHandler;
-import org.apache.solr.handler.component.HighlightComponent;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.metrics.SolrCoreMetricManager;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.BinaryResponseWriter;
-import org.apache.solr.response.CSVResponseWriter;
-import org.apache.solr.response.GeoJSONResponseWriter;
-import org.apache.solr.response.GraphMLResponseWriter;
-import org.apache.solr.response.JSONResponseWriter;
-import org.apache.solr.response.PHPResponseWriter;
-import org.apache.solr.response.PHPSerializedResponseWriter;
-import org.apache.solr.response.PythonResponseWriter;
-import org.apache.solr.response.QueryResponseWriter;
-import org.apache.solr.response.RawResponseWriter;
-import org.apache.solr.response.RubyResponseWriter;
-import org.apache.solr.response.SchemaXmlResponseWriter;
-import org.apache.solr.response.SmileResponseWriter;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.response.XMLResponseWriter;
-import org.apache.solr.response.transform.TransformerFactory;
-import org.apache.solr.rest.ManagedResourceStorage;
-import org.apache.solr.rest.ManagedResourceStorage.StorageIO;
-import org.apache.solr.rest.RestManager;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.IndexSchemaFactory;
-import org.apache.solr.schema.ManagedIndexSchema;
-import org.apache.solr.schema.SimilarityFactory;
-import org.apache.solr.search.QParserPlugin;
-import org.apache.solr.search.SolrFieldCacheBean;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.ValueSourceParser;
-import org.apache.solr.search.stats.LocalStatsCache;
-import org.apache.solr.search.stats.StatsCache;
-import org.apache.solr.update.DefaultSolrCoreState;
-import org.apache.solr.update.DirectUpdateHandler2;
-import org.apache.solr.update.IndexFingerprint;
-import org.apache.solr.update.SolrCoreState;
-import org.apache.solr.update.SolrCoreState.IndexWriterCloser;
-import org.apache.solr.update.SolrIndexWriter;
-import org.apache.solr.update.UpdateHandler;
-import org.apache.solr.update.VersionInfo;
-import org.apache.solr.update.processor.DistributedUpdateProcessorFactory;
-import org.apache.solr.update.processor.LogUpdateProcessorFactory;
-import org.apache.solr.update.processor.RunUpdateProcessorFactory;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain.ProcessorInfo;
-import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.IOFunction;
-import org.apache.solr.util.NumberUtils;
-import org.apache.solr.util.PropertiesInputStream;
-import org.apache.solr.util.PropertiesOutputStream;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.CommonParams.PATH;
-
-/**
- * SolrCore got its name because it represents the "core" of Solr -- one index and everything needed to make it work.
- * When multi-core support was added to Solr way back in version 1.3, this class was required so that the core
- * functionality could be re-used multiple times.
- */
-public final class SolrCore implements SolrInfoBean, SolrMetricProducer, Closeable {
-
-  public static final String version="1.0";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final Logger requestLog = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass().getName() + ".Request");
-  private static final Logger slowLog = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass().getName() + ".SlowRequest");
-
-  private String name;
-  private String logid; // used to show what name is set
-
-  private boolean isReloaded = false;
-
-  private StatsCache statsCache;
-
-  private final SolrConfig solrConfig;
-  private final SolrResourceLoader resourceLoader;
-  private volatile IndexSchema schema;
-  private final NamedList configSetProperties;
-  private final String dataDir;
-  private final String ulogDir;
-  private final UpdateHandler updateHandler;
-  private final SolrCoreState solrCoreState;
-
-  private final Date startTime = new Date();
-  private final long startNanoTime = System.nanoTime();
-  private final RequestHandlers reqHandlers;
-  private final PluginBag<SearchComponent> searchComponents = new PluginBag<>(SearchComponent.class, this);
-  private final PluginBag<UpdateRequestProcessorFactory> updateProcessors = new PluginBag<>(UpdateRequestProcessorFactory.class, this, true);
-  private final Map<String,UpdateRequestProcessorChain> updateProcessorChains;
-  private final SolrCoreMetricManager coreMetricManager;
-  private final Map<String, SolrInfoBean> infoRegistry = new ConcurrentHashMap<>();
-  private final IndexDeletionPolicyWrapper solrDelPolicy;
-  private final SolrSnapshotMetaDataManager snapshotMgr;
-  private final DirectoryFactory directoryFactory;
-  private final RecoveryStrategy.Builder recoveryStrategyBuilder;
-  private IndexReaderFactory indexReaderFactory;
-  private final Codec codec;
-  private final MemClassLoader memClassLoader;
-
-  private final List<Runnable> confListeners = new CopyOnWriteArrayList<>();
-
-  private final ReentrantLock ruleExpiryLock;
-  private final ReentrantLock snapshotDelLock; // A lock instance to guard against concurrent deletions.
-
-  private Timer newSearcherTimer;
-  private Timer newSearcherWarmupTimer;
-  private Counter newSearcherCounter;
-  private Counter newSearcherMaxReachedCounter;
-  private Counter newSearcherOtherErrorsCounter;
-  private final CoreContainer coreContainer;
-
-  private Set<String> metricNames = ConcurrentHashMap.newKeySet();
-  private String metricTag = Integer.toHexString(hashCode());
-
-  public Set<String> getMetricNames() {
-    return metricNames;
-  }
-
-
-  public Date getStartTimeStamp() { return startTime; }
-
-  private final Map<IndexReader.CacheKey, IndexFingerprint> perSegmentFingerprintCache = new MapMaker().weakKeys().makeMap();
-
-  public long getStartNanoTime() {
-    return startNanoTime;
-  }
-
-  public long getUptimeMs() {
-    return TimeUnit.MILLISECONDS.convert(System.nanoTime() - startNanoTime, TimeUnit.NANOSECONDS);
-  }
-
-  private final RestManager restManager;
-
-  public RestManager getRestManager() {
-    return restManager;
-  }
-
-  static int boolean_query_max_clause_count = Integer.MIN_VALUE;
-
-
-  /**
-   * The SolrResourceLoader used to load all resources for this core.
-   * @since solr 1.3
-   */
-  public SolrResourceLoader getResourceLoader() {
-    return resourceLoader;
-  }
-
-  /**
-   * Gets the configuration resource name used by this core instance.
-   * @since solr 1.3
-   */
-  public String getConfigResource() {
-    return solrConfig.getResourceName();
-  }
-
-  /**
-   * Gets the configuration object used by this core instance.
-   */
-  public SolrConfig getSolrConfig() {
-    return solrConfig;
-  }
-
-  /**
-   * Gets the schema resource name used by this core instance.
-   * @since solr 1.3
-   */
-  public String getSchemaResource() {
-    return getLatestSchema().getResourceName();
-  }
-  
-  /** 
-   * @return the latest snapshot of the schema used by this core instance. 
-   * @see #setLatestSchema 
-   */
-  public IndexSchema getLatestSchema() {
-    return schema;
-  }
-  
-  /** 
-   * Sets the latest schema snapshot to be used by this core instance. 
-   * If the specified <code>replacementSchema</code> uses a {@link SimilarityFactory} which is 
-   * {@link SolrCoreAware} then this method will {@link SolrCoreAware#inform} that factory about 
-   * this SolrCore prior to using the <code>replacementSchema</code>
-   * @see #getLatestSchema
-   */
-  public void setLatestSchema(IndexSchema replacementSchema) {
-    // 1) For a newly instantiated core, the Similarity needs SolrCore before inform() is called on
-    // any registered SolrCoreAware listeners (which will likeley need to use the SolrIndexSearcher.
-    //
-    // 2) If a new IndexSchema is assigned to an existing live SolrCore (ie: managed schema
-    // replacement via SolrCloud) then we need to explicitly inform() the similarity because
-    // we can't rely on the normal SolrResourceLoader lifecycle because the sim was instantiated
-    // after the SolrCore was already live (see: SOLR-8311 + SOLR-8280)
-    final SimilarityFactory similarityFactory = replacementSchema.getSimilarityFactory();
-    if (similarityFactory instanceof SolrCoreAware) {
-      ((SolrCoreAware) similarityFactory).inform(this);
-    }
-    this.schema = replacementSchema;
-  }
-  
-  public NamedList getConfigSetProperties() {
-    return configSetProperties;
-  }
-
-  public String getDataDir() {
-    return dataDir;
-  }
-
-  public String getUlogDir() {
-    return ulogDir;
-  }
-
-  public String getIndexDir() {
-    synchronized (searcherLock) {
-      if (_searcher == null) return getNewIndexDir();
-      SolrIndexSearcher searcher = _searcher.get();
-      return searcher.getPath() == null ? dataDir + "index/" : searcher
-          .getPath();
-    }
-  }
-
-
-  /**
-   * Returns the indexdir as given in index.properties. If index.properties exists in dataDir and
-   * there is a property <i>index</i> available and it points to a valid directory
-   * in dataDir that is returned. Else dataDir/index is returned. Only called for creating new indexSearchers
-   * and indexwriters. Use the getIndexDir() method to know the active index directory
-   *
-   * @return the indexdir as given in index.properties
-   *
-   * @throws SolrException if for any reason the a reasonable index directory cannot be determined.
-   */
-  public String getNewIndexDir() {
-    Directory dir = null;
-    try {
-      dir = getDirectoryFactory().get(getDataDir(), DirContext.META_DATA, getSolrConfig().indexConfig.lockType);
-      String result = getIndexPropertyFromPropFile(dir);
-      if (!result.equals(lastNewIndexDir)) {
-        log.debug("New index directory detected: old={} new={}", lastNewIndexDir, result);
-      }
-      lastNewIndexDir = result;
-      return result;
-    } catch (IOException e) {
-      SolrException.log(log, "", e);
-      // See SOLR-11687. It is inadvisable to assume we can do the right thing for any but a small
-      // number of exceptions that ware caught and swallowed in getIndexProperty.
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error in getNewIndexDir, exception: ", e);
-    } finally {
-      if (dir != null) {
-        try {
-          getDirectoryFactory().release(dir);
-        } catch (IOException e) {
-          SolrException.log(log, "", e);
-        }
-      }
-    }
-  }
-
-  // This is guaranteed to return a string or throw an exception.
-  //
-  // NOTE: Not finding the index.properties file is normal.
-  //
-  // We return dataDir/index if there is an index.properties file with no value for "index"
-  // See SOLR-11687
-  //
-
-  private String getIndexPropertyFromPropFile(Directory dir) throws IOException {
-    IndexInput input;
-    try {
-      input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, IOContext.DEFAULT);
-    } catch (FileNotFoundException | NoSuchFileException e) {
-      // Swallow this error, dataDir/index is the right thing to return
-      // if there is no index.properties file
-      // All other exceptions are will propagate to caller.
-      return dataDir + "index/";
-    }
-    final InputStream is = new PropertiesInputStream(input); // c'tor just assigns a variable here, no exception thrown.
-    try {
-      Properties p = new Properties();
-      p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
-
-      String s = p.getProperty("index");
-      if (s != null && s.trim().length() > 0) {
-        return dataDir + s.trim();
-      }
-
-      // We'll return dataDir/index/ if the properties file has an "index" property with
-      // no associated value or does not have an index property at all.
-      return dataDir + "index/";
-    } finally {
-      IOUtils.closeQuietly(is);
-    }
-  }
-
-  private String lastNewIndexDir; // for debugging purposes only... access not synchronized, but that's ok
-
-
-  public DirectoryFactory getDirectoryFactory() {
-    return directoryFactory;
-  }
-
-  public IndexReaderFactory getIndexReaderFactory() {
-    return indexReaderFactory;
-  }
-  
-  public long getIndexSize() {
-    Directory dir;
-    long size = 0;
-    try {
-      if (directoryFactory.exists(getIndexDir())) {
-        dir = directoryFactory.get(getIndexDir(), DirContext.DEFAULT, solrConfig.indexConfig.lockType);
-        try {
-          size = DirectoryFactory.sizeOfDirectory(dir);
-        } finally {
-          directoryFactory.release(dir);
-        }
-      }
-    } catch (IOException e) {
-      SolrException.log(log, "IO error while trying to get the size of the Directory", e);
-    }
-    return size;
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String v) {
-    this.name = v;
-    this.logid = (v==null)?"":("["+v+"] ");
-    if (coreMetricManager != null) {
-      coreMetricManager.afterCoreSetName();
-    }
-  }
-
-  public String getLogId()
-  {
-    return this.logid;
-  }
-
-  /**
-   * Returns the {@link SolrCoreMetricManager} for this core.
-   *
-   * @return the {@link SolrCoreMetricManager} for this core
-   */
-  public SolrCoreMetricManager getCoreMetricManager() {
-    return coreMetricManager;
-  }
-
-  /**
-   * Returns a Map of name vs SolrInfoBean objects. The returned map is an instance of
-   * a ConcurrentHashMap and therefore no synchronization is needed for putting, removing
-   * or iterating over it.
-   *
-   * @return the Info Registry map which contains SolrInfoBean objects keyed by name
-   * @since solr 1.3
-   */
-  public Map<String, SolrInfoBean> getInfoRegistry() {
-    return infoRegistry;
-  }
-
-  private IndexDeletionPolicyWrapper initDeletionPolicy(IndexDeletionPolicyWrapper delPolicyWrapper) {
-    if (delPolicyWrapper != null) {
-      return delPolicyWrapper;
-    }
-    
-    final PluginInfo info = solrConfig.getPluginInfo(IndexDeletionPolicy.class.getName());
-    final IndexDeletionPolicy delPolicy;
-    if (info != null) {
-      delPolicy = createInstance(info.className, IndexDeletionPolicy.class, "Deletion Policy for SOLR", this, getResourceLoader());
-      if (delPolicy instanceof NamedListInitializedPlugin) {
-        ((NamedListInitializedPlugin) delPolicy).init(info.initArgs);
-      }
-    } else {
-      delPolicy = new SolrDeletionPolicy();
-    }
-
-    return new IndexDeletionPolicyWrapper(delPolicy, snapshotMgr);
-  }
-
-  private SolrSnapshotMetaDataManager initSnapshotMetaDataManager() {
-    try {
-      String dirName = getDataDir() + SolrSnapshotMetaDataManager.SNAPSHOT_METADATA_DIR + "/";
-      Directory snapshotDir = directoryFactory.get(dirName, DirContext.DEFAULT,
-           getSolrConfig().indexConfig.lockType);
-      return new SolrSnapshotMetaDataManager(this, snapshotDir);
-    } catch (IOException e) {
-      throw new IllegalStateException(e);
-    }
-  }
-
-  /**
-   * This method deletes the snapshot with the specified name. If the directory
-   * storing the snapshot is not the same as the *current* core index directory,
-   * then delete the files corresponding to this snapshot. Otherwise we leave the
-   * index files related to snapshot as is (assuming the underlying Solr IndexDeletionPolicy
-   * will clean them up appropriately).
-   *
-   * @param commitName The name of the snapshot to be deleted.
-   * @throws IOException in case of I/O error.
-   */
-  public void deleteNamedSnapshot(String commitName) throws IOException {
-    // Note this lock is required to prevent multiple snapshot deletions from
-    // opening multiple IndexWriter instances simultaneously.
-    this.snapshotDelLock.lock();
-    try {
-      Optional<SnapshotMetaData> metadata = snapshotMgr.release(commitName);
-      if (metadata.isPresent()) {
-        long gen = metadata.get().getGenerationNumber();
-        String indexDirPath = metadata.get().getIndexDirPath();
-
-        if (!indexDirPath.equals(getIndexDir())) {
-          Directory d = getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, "none");
-          try {
-            Collection<SnapshotMetaData> snapshots = snapshotMgr.listSnapshotsInIndexDir(indexDirPath);
-            log.info("Following snapshots exist in the index directory {} : {}", indexDirPath, snapshots);
-            if (snapshots.isEmpty()) {// No snapshots remain in this directory. Can be cleaned up!
-              log.info("Removing index directory {} since all named snapshots are deleted.", indexDirPath);
-              getDirectoryFactory().remove(d);
-            } else {
-              SolrSnapshotManager.deleteSnapshotIndexFiles(this, d, gen);
-            }
-          } finally {
-            getDirectoryFactory().release(d);
-          }
-        }
-      }
-    } finally {
-      snapshotDelLock.unlock();
-    }
-  }
-
-  /**
-   * This method deletes the index files not associated with any named snapshot only
-   * if the specified indexDirPath is not the *current* index directory.
-   *
-   * @param indexDirPath The path of the directory
-   * @throws IOException In case of I/O error.
-   */
-  public void deleteNonSnapshotIndexFiles(String indexDirPath) throws IOException {
-    // Skip if the specified indexDirPath is the *current* index directory.
-    if (getIndexDir().equals(indexDirPath)) {
-      return;
-    }
-
-    // Note this lock is required to prevent multiple snapshot deletions from
-    // opening multiple IndexWriter instances simultaneously.
-    this.snapshotDelLock.lock();
-    Directory dir = getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, "none");
-    try {
-      Collection<SnapshotMetaData> snapshots = snapshotMgr.listSnapshotsInIndexDir(indexDirPath);
-      log.info("Following snapshots exist in the index directory {} : {}", indexDirPath, snapshots);
-      // Delete the old index directory only if no snapshot exists in that directory.
-      if (snapshots.isEmpty()) {
-        log.info("Removing index directory {} since all named snapshots are deleted.", indexDirPath);
-        getDirectoryFactory().remove(dir);
-      } else {
-        SolrSnapshotManager.deleteNonSnapshotIndexFiles(this, dir, snapshots);
-      }
-    } finally {
-      snapshotDelLock.unlock();
-      if (dir != null) {
-        getDirectoryFactory().release(dir);
-      }
-    }
-  }
-
-
-  private void initListeners() {
-    final Class<SolrEventListener> clazz = SolrEventListener.class;
-    final String label = "Event Listener";
-    for (PluginInfo info : solrConfig.getPluginInfos(SolrEventListener.class.getName())) {
-      final String event = info.attributes.get("event");
-      if ("firstSearcher".equals(event)) {
-        SolrEventListener obj = createInitInstance(info, clazz, label, null);
-        firstSearcherListeners.add(obj);
-        log.debug("[{}] Added SolrEventListener for firstSearcher: [{}]", logid, obj);
-      } else if ("newSearcher".equals(event)) {
-        SolrEventListener obj = createInitInstance(info, clazz, label, null);
-        newSearcherListeners.add(obj);
-        log.debug("[{}] Added SolrEventListener for newSearcher: [{}]", logid, obj);
-      }
-    }
-  }
-
-  final List<SolrEventListener> firstSearcherListeners = new ArrayList<>();
-  final List<SolrEventListener> newSearcherListeners = new ArrayList<>();
-
-  /**
-   * NOTE: this function is not thread safe.  However, it is safe to call within the
-   * <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes.
-   * Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException
-   *
-   * @see SolrCoreAware
-   */
-  public void registerFirstSearcherListener( SolrEventListener listener )
-  {
-    firstSearcherListeners.add( listener );
-  }
-
-  /**
-   * NOTE: this function is not thread safe.  However, it is safe to call within the
-   * <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes.
-   * Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException
-   *
-   * @see SolrCoreAware
-   */
-  public void registerNewSearcherListener( SolrEventListener listener )
-  {
-    newSearcherListeners.add( listener );
-  }
-
-  /**
-   * NOTE: this function is not thread safe.  However, it is safe to call within the
-   * <code>inform( SolrCore core )</code> function for <code>SolrCoreAware</code> classes.
-   * Outside <code>inform</code>, this could potentially throw a ConcurrentModificationException
-   *
-   * @see SolrCoreAware
-   */
-  public QueryResponseWriter registerResponseWriter( String name, QueryResponseWriter responseWriter ){
-    return responseWriters.put(name, responseWriter);
-  }
-
-  public SolrCore reload(ConfigSet coreConfig) throws IOException {
-    // only one reload at a time
-    synchronized (getUpdateHandler().getSolrCoreState().getReloadLock()) {
-      solrCoreState.increfSolrCoreState();
-      final SolrCore currentCore;
-      if (!getNewIndexDir().equals(getIndexDir())) {
-        // the directory is changing, don't pass on state
-        currentCore = null;
-      } else {
-        currentCore = this;
-      }
-
-      boolean success = false;
-      SolrCore core = null;
-      try {
-        CoreDescriptor cd = new CoreDescriptor(name, getCoreDescriptor());
-        cd.loadExtraProperties(); //Reload the extra properties
-        core = new SolrCore(coreContainer, getName(), getDataDir(), coreConfig.getSolrConfig(),
-            coreConfig.getIndexSchema(), coreConfig.getProperties(),
-            cd, updateHandler, solrDelPolicy, currentCore, true);
-        
-        // we open a new IndexWriter to pick up the latest config
-        core.getUpdateHandler().getSolrCoreState().newIndexWriter(core, false);
-        
-        core.getSearcher(true, false, null, true);
-        success = true;
-        return core;
-      } finally {
-        // close the new core on any errors that have occurred.
-        if (!success) {
-          IOUtils.closeQuietly(core);
-        }
-      }
-    }
-  }
-
-  private DirectoryFactory initDirectoryFactory() {
-    return DirectoryFactory.loadDirectoryFactory(solrConfig, coreContainer, coreMetricManager.getRegistryName());
-  }
-
-  private RecoveryStrategy.Builder initRecoveryStrategyBuilder() {
-    final PluginInfo info = solrConfig.getPluginInfo(RecoveryStrategy.Builder.class.getName());
-    final RecoveryStrategy.Builder rsBuilder;
-    if (info != null && info.className != null) {
-      log.info(info.className);
-      rsBuilder = getResourceLoader().newInstance(info.className, RecoveryStrategy.Builder.class);
-    } else {
-      log.debug("solr.RecoveryStrategy.Builder");
-      rsBuilder = new RecoveryStrategy.Builder();
-    }
-    if (info != null) {
-      rsBuilder.init(info.initArgs);
-    }
-    return rsBuilder;
-  }
-
-  private void initIndexReaderFactory() {
-    IndexReaderFactory indexReaderFactory;
-    PluginInfo info = solrConfig.getPluginInfo(IndexReaderFactory.class.getName());
-    if (info != null) {
-      indexReaderFactory = resourceLoader.newInstance(info.className, IndexReaderFactory.class);
-      indexReaderFactory.init(info.initArgs);
-    } else {
-      indexReaderFactory = new StandardIndexReaderFactory();
-    }
-    this.indexReaderFactory = indexReaderFactory;
-  }
-
-  // protect via synchronized(SolrCore.class)
-  private static Set<String> dirs = new HashSet<>();
-
-  /**
-   * Returns <code>true</code> iff the index in the named directory is
-   * currently locked.
-   * @param directory the directory to check for a lock
-   * @throws IOException if there is a low-level IO error
-   * @deprecated Use of this method can only lead to race conditions. Try
-   *             to actually obtain a lock instead.
-   */
-  @Deprecated
-  private static boolean isWriterLocked(Directory directory) throws IOException {
-    try {
-      directory.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
-      return false;
-    } catch (LockObtainFailedException failed) {
-      return true;
-    }
-  }
-
-  void initIndex(boolean passOnPreviousState, boolean reload) throws IOException {
-    String indexDir = getNewIndexDir();
-    boolean indexExists = getDirectoryFactory().exists(indexDir);
-    boolean firstTime;
-    synchronized (SolrCore.class) {
-      firstTime = dirs.add(getDirectoryFactory().normalize(indexDir));
-    }
-
-    initIndexReaderFactory();
-
-    if (indexExists && firstTime && !passOnPreviousState) {
-      final String lockType = getSolrConfig().indexConfig.lockType;
-      Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT, lockType);
-      try {
-        if (isWriterLocked(dir)) {
-          log.error("{}Solr index directory '{}' is locked (lockType={}).  Throwing exception.", logid,
-              indexDir, lockType);
-          throw new LockObtainFailedException(
-              "Index dir '" + indexDir + "' of core '" + name + "' is already locked. " +
-                  "The most likely cause is another Solr server (or another solr core in this server) " +
-                  "also configured to use this directory; other possible causes may be specific to lockType: " +
-                  lockType);
-        }
-      } finally {
-        directoryFactory.release(dir);
-      }
-    }
-
-    // Create the index if it doesn't exist.
-    if (!indexExists) {
-      log.debug("{}Solr index directory '{}' doesn't exist. Creating new index...", logid, indexDir);
-
-      SolrIndexWriter writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true,
-          getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
-      writer.close();
-    }
-
-    cleanupOldIndexDirectories(reload);
-  }
-
-
-  /**
-   * Creates an instance by trying a constructor that accepts a SolrCore before
-   * trying the default (no arg) constructor.
-   *
-   * @param className the instance class to create
-   * @param cast      the class or interface that the instance should extend or implement
-   * @param msg       a message helping compose the exception error if any occurs.
-   * @param core      The SolrCore instance for which this object needs to be loaded
-   * @return the desired instance
-   * @throws SolrException if the object could not be instantiated
-   */
-  public static <T> T createInstance(String className, Class<T> cast, String msg, SolrCore core, ResourceLoader resourceLoader) {
-    Class<? extends T> clazz = null;
-    if (msg == null) msg = "SolrCore Object";
-    try {
-      clazz = resourceLoader.findClass(className, cast);
-      //most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
-      // So invariably always it will cause a  NoSuchMethodException. So iterate though the list of available constructors
-      Constructor<?>[] cons = clazz.getConstructors();
-      for (Constructor<?> con : cons) {
-        Class<?>[] types = con.getParameterTypes();
-        if (types.length == 1 && types[0] == SolrCore.class) {
-          return cast.cast(con.newInstance(core));
-        }
-      }
-      return resourceLoader.newInstance(className, cast);//use the empty constructor
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      // The JVM likes to wrap our helpful SolrExceptions in things like
-      // "InvocationTargetException" that have no useful getMessage
-      if (null != e.getCause() && e.getCause() instanceof SolrException) {
-        SolrException inner = (SolrException) e.getCause();
-        throw inner;
-      }
-
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error Instantiating " + msg + ", " + className + " failed to instantiate " + cast.getName(), e);
-    }
-  }
-
-  private UpdateHandler createReloadedUpdateHandler(String className, String msg, UpdateHandler updateHandler) {
-    Class<? extends UpdateHandler> clazz = null;
-    if (msg == null) msg = "SolrCore Object";
-    try {
-        clazz = getResourceLoader().findClass(className, UpdateHandler.class);
-        //most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
-        // So invariably always it will cause a  NoSuchMethodException. So iterate though the list of available constructors
-        Constructor<?>[] cons =  clazz.getConstructors();
-        for (Constructor<?> con : cons) {
-          Class<?>[] types = con.getParameterTypes();
-          if(types.length == 2 && types[0] == SolrCore.class && types[1] == UpdateHandler.class){
-            return UpdateHandler.class.cast(con.newInstance(this, updateHandler));
-          }
-        }
-        throw new SolrException(ErrorCode.SERVER_ERROR,"Error Instantiating "+msg+", "+className+ " could not find proper constructor for " + UpdateHandler.class.getName());
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      // The JVM likes to wrap our helpful SolrExceptions in things like
-      // "InvocationTargetException" that have no useful getMessage
-      if (null != e.getCause() && e.getCause() instanceof SolrException) {
-        SolrException inner = (SolrException) e.getCause();
-        throw inner;
-      }
-
-      throw new SolrException(ErrorCode.SERVER_ERROR,"Error Instantiating "+msg+", "+className+ " failed to instantiate " + UpdateHandler.class.getName(), e);
-    }
-  }
-
-  public <T extends Object> T createInitInstance(PluginInfo info,Class<T> cast, String msg, String defClassName){
-    if(info == null) return null;
-    T o = createInstance(info.className == null ? defClassName : info.className ,cast, msg,this, getResourceLoader());
-    if (o instanceof PluginInfoInitialized) {
-      ((PluginInfoInitialized) o).init(info);
-    } else if (o instanceof NamedListInitializedPlugin) {
-      ((NamedListInitializedPlugin) o).init(info.initArgs);
-    }
-    if(o instanceof SearchComponent) {
-      ((SearchComponent) o).setName(info.name);
-    }
-    return o;
-  }
-
-  private UpdateHandler createUpdateHandler(String className) {
-    return createInstance(className, UpdateHandler.class, "Update Handler", this, getResourceLoader());
-  }
-
-  private UpdateHandler createUpdateHandler(String className, UpdateHandler updateHandler) {
-    return createReloadedUpdateHandler(className, "Update Handler", updateHandler);
-  }
-
-  public SolrCore(CoreContainer coreContainer, CoreDescriptor cd, ConfigSet coreConfig) {
-    this(coreContainer, cd.getName(), null, coreConfig.getSolrConfig(), coreConfig.getIndexSchema(), coreConfig.getProperties(),
-        cd, null, null, null, false);
-  }
-
-  public CoreContainer getCoreContainer() {
-    return coreContainer;
-  }
-
-
-  /**
-   * Creates a new core and register it in the list of cores. If a core with the
-   * same name already exists, it will be stopped and replaced by this one.
-   *
-   * @param dataDir
-   *          the index directory
-   * @param config
-   *          a solr config instance
-   * @param schema
-   *          a solr schema instance
-   *
-   * @since solr 1.3
-   */
-  public SolrCore(CoreContainer coreContainer, String name, String dataDir, SolrConfig config,
-                  IndexSchema schema, NamedList configSetProperties,
-                  CoreDescriptor coreDescriptor, UpdateHandler updateHandler,
-                  IndexDeletionPolicyWrapper delPolicy, SolrCore prev, boolean reload) {
-
-    this.coreContainer = coreContainer;
-    
-    assert ObjectReleaseTracker.track(searcherExecutor); // ensure that in unclean shutdown tests we still close this
-
-    CoreDescriptor cd = Objects.requireNonNull(coreDescriptor, "coreDescriptor cannot be null");
-    coreContainer.solrCores.addCoreDescriptor(cd);
-
-    setName(name);
-    MDCLoggingContext.setCore(this);
-    
-    resourceLoader = config.getResourceLoader();
-    this.solrConfig = config;
-    this.configSetProperties = configSetProperties;
-    // Initialize the metrics manager
-    this.coreMetricManager = initCoreMetricManager(config);
-    this.coreMetricManager.loadReporters();
-
-    if (updateHandler == null) {
-      directoryFactory = initDirectoryFactory();
-      recoveryStrategyBuilder = initRecoveryStrategyBuilder();
-      solrCoreState = new DefaultSolrCoreState(directoryFactory, recoveryStrategyBuilder);
-    } else {
-      solrCoreState = updateHandler.getSolrCoreState();
-      directoryFactory = solrCoreState.getDirectoryFactory();
-      recoveryStrategyBuilder = solrCoreState.getRecoveryStrategyBuilder();
-      isReloaded = true;
-    }
-
-    this.dataDir = initDataDir(dataDir, config, coreDescriptor);
-    this.ulogDir = initUpdateLogDir(coreDescriptor);
-
-    log.info("[{}] Opening new SolrCore at [{}], dataDir=[{}]", logid, resourceLoader.getInstancePath(), this.dataDir);
-
-    checkVersionFieldExistsInSchema(schema, coreDescriptor);
-
-    SolrMetricManager metricManager = coreContainer.getMetricManager();
-
-    // initialize searcher-related metrics
-    initializeMetrics(metricManager, coreMetricManager.getRegistryName(), metricTag, null);
-
-    SolrFieldCacheBean solrFieldCacheBean = new SolrFieldCacheBean();
-    // this is registered at the CONTAINER level because it's not core-specific - for now we
-    // also register it here for back-compat
-    solrFieldCacheBean.initializeMetrics(metricManager, coreMetricManager.getRegistryName(), metricTag, "core");
-    infoRegistry.put("fieldCache", solrFieldCacheBean);
-
-
-    initSchema(config, schema);
-
-    this.maxWarmingSearchers = config.maxWarmingSearchers;
-    this.slowQueryThresholdMillis = config.slowQueryThresholdMillis;
-
-    final CountDownLatch latch = new CountDownLatch(1);
-
-    try {
-
-      initListeners();
-
-      this.snapshotMgr = initSnapshotMetaDataManager();
-      this.solrDelPolicy = initDeletionPolicy(delPolicy);
-
-      this.codec = initCodec(solrConfig, this.schema);
-
-      memClassLoader = new MemClassLoader(PluginBag.RuntimeLib.getLibObjects(this, solrConfig.getPluginInfos(PluginBag.RuntimeLib.class.getName())), getResourceLoader());
-      initIndex(prev != null, reload);
-
-      initWriters();
-      qParserPlugins.init(QParserPlugin.standardPlugins, this);
-      valueSourceParsers.init(ValueSourceParser.standardValueSourceParsers, this);
-      transformerFactories.init(TransformerFactory.defaultFactories, this);
-      loadSearchComponents();
-      updateProcessors.init(Collections.emptyMap(), this);
-
-      // Processors initialized before the handlers
-      updateProcessorChains = loadUpdateProcessorChains();
-      reqHandlers = new RequestHandlers(this);
-      reqHandlers.initHandlersFromConfig(solrConfig);
-
-      statsCache = initStatsCache();
-
-      // cause the executor to stall so firstSearcher events won't fire
-      // until after inform() has been called for all components.
-      // searchExecutor must be single-threaded for this to work
-      searcherExecutor.submit(() -> {
-        latch.await();
-        return null;
-      });
-
-      this.updateHandler = initUpdateHandler(updateHandler);
-      
-      initSearcher(prev);
-
-      // Initialize the RestManager
-      restManager = initRestManager();
-
-      // Finally tell anyone who wants to know
-      resourceLoader.inform(resourceLoader);
-      resourceLoader.inform(this); // last call before the latch is released.
-      this.updateHandler.informEventListeners(this);
-    } catch (Throwable e) {
-      // release the latch, otherwise we block trying to do the close. This
-      // should be fine, since counting down on a latch of 0 is still fine
-      latch.countDown();
-      if (e instanceof OutOfMemoryError) {
-        throw (OutOfMemoryError)e;
-      }
-
-      try {
-        // close down the searcher and any other resources, if it exists, as this
-        // is not recoverable
-       close();
-      } catch (Throwable t) {
-        if (t instanceof OutOfMemoryError) {
-          throw (OutOfMemoryError) t;
-        }
-        log.error("Error while closing", t);
-      }
-
-      throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e);
-    } finally {
-      // allow firstSearcher events to fire and make sure it is released
-      latch.countDown();
-    }
-
-    infoRegistry.put("core", this);
-
-    // register any SolrInfoMBeans SolrResourceLoader initialized
-    //
-    // this must happen after the latch is released, because a JMX server impl may
-    // choose to block on registering until properties can be fetched from an MBean,
-    // and a SolrCoreAware MBean may have properties that depend on getting a Searcher
-    // from the core.
-    resourceLoader.inform(infoRegistry);
-
-    // Allow the directory factory to report metrics
-    if (directoryFactory instanceof SolrMetricProducer) {
-      ((SolrMetricProducer)directoryFactory).initializeMetrics(metricManager, coreMetricManager.getRegistryName(), metricTag, "directoryFactory");
-    }
-
-    // seed version buckets with max from index during core initialization ... requires a searcher!
-    seedVersionBuckets();
-
-    bufferUpdatesIfConstructing(coreDescriptor);
-
-    this.ruleExpiryLock = new ReentrantLock();
-    this.snapshotDelLock = new ReentrantLock();
-
-    registerConfListener();
-    
-    assert ObjectReleaseTracker.track(this);
-  }
-
-  public void seedVersionBuckets() {
-    UpdateHandler uh = getUpdateHandler();
-    if (uh != null && uh.getUpdateLog() != null) {
-      RefCounted<SolrIndexSearcher> newestSearcher = getRealtimeSearcher();
-      if (newestSearcher != null) {
-        try {
-          uh.getUpdateLog().seedBucketsWithHighestVersion(newestSearcher.get());
-        } finally {
-          newestSearcher.decref();
-        }
-      } else {
-        log.warn("No searcher available! Cannot seed version buckets with max from index.");
-      }
-    }
-  }
-
-  /** Set UpdateLog to buffer updates if the slice is in construction. */
-  private void bufferUpdatesIfConstructing(CoreDescriptor coreDescriptor) {
-    
-    if (coreContainer != null && coreContainer.isZooKeeperAware()) {
-      if (reqHandlers.get("/get") == null) {
-        log.warn("WARNING: RealTimeGetHandler is not registered at /get. " +
-            "SolrCloud will always use full index replication instead of the more efficient PeerSync method.");
-      }
-
-      // ZK pre-register would have already happened so we read slice properties now
-      final ClusterState clusterState = coreContainer.getZkController().getClusterState();
-      final DocCollection collection = clusterState.getCollection(coreDescriptor.getCloudDescriptor().getCollectionName());
-      final Slice slice = collection.getSlice(coreDescriptor.getCloudDescriptor().getShardId());
-      if (slice.getState() == Slice.State.CONSTRUCTION) {
-        // set update log to buffer before publishing the core
-        getUpdateHandler().getUpdateLog().bufferUpdates();
-      }
-    }
-  }
-
-  private void initSearcher(SolrCore prev) throws IOException {
-    // use the (old) writer to open the first searcher
-    RefCounted<IndexWriter> iwRef = null;
-    if (prev != null) {
-      iwRef = prev.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
-      if (iwRef != null) {
-        final IndexWriter iw = iwRef.get();
-        final SolrCore core = this;
-        newReaderCreator = () -> indexReaderFactory.newReader(iw, core);
-      }
-    }
-
-    try {
-      getSearcher(false, false, null, true);
-    } finally {
-      newReaderCreator = null;
-      if (iwRef != null) {
-        iwRef.decref();
-      }
-    }
-  }
-
-  private UpdateHandler initUpdateHandler(UpdateHandler updateHandler) {
-    String updateHandlerClass = solrConfig.getUpdateHandlerInfo().className;
-    if (updateHandlerClass == null) {
-      updateHandlerClass = DirectUpdateHandler2.class.getName();
-    }
-
-    final UpdateHandler newUpdateHandler;
-    if (updateHandler == null) {
-      newUpdateHandler = createUpdateHandler(updateHandlerClass);
-    } else {
-      newUpdateHandler = createUpdateHandler(updateHandlerClass, updateHandler);
-    }
-    if (newUpdateHandler instanceof SolrMetricProducer) {
-      coreMetricManager.registerMetricProducer("updateHandler", (SolrMetricProducer)newUpdateHandler);
-    }
-    infoRegistry.put("updateHandler", newUpdateHandler);
-    return newUpdateHandler;
-  }
-  
-  /**
-   * Initializes the "Latest Schema" for this SolrCore using either the provided <code>schema</code> 
-   * if non-null, or a new instance build via the factory identified in the specified <code>config</code>
-   * @see IndexSchemaFactory
-   * @see #setLatestSchema
-   */
-  private void initSchema(SolrConfig config, IndexSchema schema) {
-    if (schema == null) {
-      schema = IndexSchemaFactory.buildIndexSchema(IndexSchema.DEFAULT_SCHEMA_FILE, config);
-    }
-    setLatestSchema(schema);
-  }
-
-  /**
-   * Initializes the core's {@link SolrCoreMetricManager} with a given configuration.
-   * If metric reporters are configured, they are also initialized for this core.
-   *
-   * @param config the given configuration
-   * @return an instance of {@link SolrCoreMetricManager}
-   */
-  private SolrCoreMetricManager initCoreMetricManager(SolrConfig config) {
-    SolrCoreMetricManager coreMetricManager = new SolrCoreMetricManager(this);
-    return coreMetricManager;
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registry, String tag, String scope) {
-    newSearcherCounter = manager.counter(this, registry, "new", Category.SEARCHER.toString());
-    newSearcherTimer = manager.timer(this, registry, "time", Category.SEARCHER.toString(), "new");
-    newSearcherWarmupTimer = manager.timer(this, registry, "warmup", Category.SEARCHER.toString(), "new");
-    newSearcherMaxReachedCounter = manager.counter(this, registry, "maxReached", Category.SEARCHER.toString(), "new");
-    newSearcherOtherErrorsCounter = manager.counter(this, registry, "errors", Category.SEARCHER.toString(), "new");
-
-    manager.registerGauge(this, registry, () -> name == null ? "(null)" : name, getMetricTag(), true, "coreName", Category.CORE.toString());
-    manager.registerGauge(this, registry, () -> startTime, getMetricTag(), true, "startTime", Category.CORE.toString());
-    manager.registerGauge(this, registry, () -> getOpenCount(), getMetricTag(), true, "refCount", Category.CORE.toString());
-    manager.registerGauge(this, registry, () -> resourceLoader.getInstancePath().toString(), getMetricTag(), true, "instanceDir", Category.CORE.toString());
-    manager.registerGauge(this, registry, () -> isClosed() ? "(closed)" : getIndexDir(), getMetricTag(), true, "indexDir", Category.CORE.toString());
-    manager.registerGauge(this, registry, () -> isClosed() ? 0 : getIndexSize(), getMetricTag(), true, "sizeInBytes", Category.INDEX.toString());
-    manager.registerGauge(this, registry, () -> isClosed() ? "(closed)" : NumberUtils.readableSize(getIndexSize()), getMetricTag(), true, "size", Category.INDEX.toString());
-    if (coreContainer != null) {
-      manager.registerGauge(this, registry, () -> coreContainer.getNamesForCore(this), getMetricTag(), true, "aliases", Category.CORE.toString());
-      final CloudDescriptor cd = getCoreDescriptor().getCloudDescriptor();
-      if (cd != null) {
-        manager.registerGauge(this, registry, () -> {
-          if (cd.getCollectionName() != null) {
-            return cd.getCollectionName();
-          } else {
-            return "_notset_";
-          }
-        }, getMetricTag(), true, "collection", Category.CORE.toString());
-
-        manager.registerGauge(this, registry, () -> {
-          if (cd.getShardId() != null) {
-            return cd.getShardId();
-          } else {
-            return "_auto_";
-          }
-        }, getMetricTag(), true, "shard", Category.CORE.toString());
-      }
-    }
-    // initialize disk total / free metrics
-    Path dataDirPath = Paths.get(dataDir);
-    File dataDirFile = dataDirPath.toFile();
-    manager.registerGauge(this, registry, () -> dataDirFile.getTotalSpace(), getMetricTag(), true, "totalSpace", Category.CORE.toString(), "fs");
-    manager.registerGauge(this, registry, () -> dataDirFile.getUsableSpace(), getMetricTag(), true, "usableSpace", Category.CORE.toString(), "fs");
-    manager.registerGauge(this, registry, () -> dataDirPath.toAbsolutePath().toString(), getMetricTag(), true, "path", Category.CORE.toString(), "fs");
-    manager.registerGauge(this, registry, () -> {
-      try {
-        return org.apache.lucene.util.IOUtils.spins(dataDirPath.toAbsolutePath());
-      } catch (IOException e) {
-        // default to spinning
-        return true;
-      }
-    }, getMetricTag(), true, "spins", Category.CORE.toString(), "fs");
-  }
-
-  public String getMetricTag() {
-    return metricTag;
-  }
-
-  private void checkVersionFieldExistsInSchema(IndexSchema schema, CoreDescriptor coreDescriptor) {
-    if (null != coreDescriptor.getCloudDescriptor()) {
-      // we are evidently running in cloud mode.  
-      //
-      // In cloud mode, version field is required for correct consistency
-      // ideally this check would be more fine grained, and individual features
-      // would assert it when they initialize, but DistributedUpdateProcessor
-      // is currently a big ball of wax that does more then just distributing
-      // updates (ie: partial document updates), so it needs to work in no cloud
-      // mode as well, and can't assert version field support on init.
-
-      try {
-        VersionInfo.getAndCheckVersionField(schema);
-      } catch (SolrException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-                                "Schema will not work with SolrCloud mode: " +
-                                e.getMessage(), e);
-      }
-    }
-  }
-
-  private String initDataDir(String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
-    return findDataDir(getDirectoryFactory(), dataDir, config, coreDescriptor);
-  }
-
-  /**
-   * Locate the data directory for a given config and core descriptor.
-   *
-   * @param directoryFactory
-   *          The directory factory to use if necessary to calculate an absolute path. Should be the same as what will
-   *          be used to open the data directory later.
-   * @param dataDir
-   *          An optional hint to the data directory location. Will be normalized and used if not null.
-   * @param config
-   *          A solr config to retrieve the default data directory location, if used.
-   * @param coreDescriptor
-   *          descriptor to load the actual data dir from, if not using the defualt.
-   * @return a normalized data directory name
-   * @throws SolrException
-   *           if the data directory cannot be loaded from the core descriptor
-   */
-  static String findDataDir(DirectoryFactory directoryFactory, String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
-    if (dataDir == null) {
-      if (coreDescriptor.usingDefaultDataDir()) {
-        dataDir = config.getDataDir();
-      }
-      if (dataDir == null) {
-        try {
-          dataDir = coreDescriptor.getDataDir();
-          if (!directoryFactory.isAbsolute(dataDir)) {
-            dataDir = directoryFactory.getDataHome(coreDescriptor);
-          }
-        } catch (IOException e) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, e);
-        }
-      }
-    }
-    return SolrResourceLoader.normalizeDir(dataDir);
-  }
-
-
-  public boolean modifyIndexProps(String tmpIdxDirName) {
-    return SolrCore.modifyIndexProps(getDirectoryFactory(), getDataDir(), getSolrConfig(), tmpIdxDirName);
-  }
-  
-  /**
-   * Update the index.properties file with the new index sub directory name
-   */
-  // package private
-  static boolean modifyIndexProps(DirectoryFactory directoryFactory, String dataDir, SolrConfig solrConfig, String tmpIdxDirName) {
-    log.info("Updating index properties... index={}", tmpIdxDirName);
-    Directory dir = null;
-    try {
-      dir = directoryFactory.get(dataDir, DirContext.META_DATA, solrConfig.indexConfig.lockType);
-      String tmpIdxPropName = IndexFetcher.INDEX_PROPERTIES + "." + System.nanoTime();
-      writeNewIndexProps(dir, tmpIdxPropName, tmpIdxDirName);
-      directoryFactory.renameWithOverwrite(dir, tmpIdxPropName, IndexFetcher.INDEX_PROPERTIES);
-      return true;
-    } catch (IOException e1) {
-      throw new RuntimeException(e1);
-    } finally {
-      if (dir != null) {
-        try {
-          directoryFactory.release(dir);
-        } catch (IOException e) {
-          SolrException.log(log, "", e);
-        }
-      }
-    }
-  }
-  
-  /**
-   * Write the index.properties file with the new index sub directory name
-   * @param dir a data directory (containing an index.properties file)
-   * @param tmpFileName the file name to write the new index.properties to
-   * @param tmpIdxDirName new index directory name
-   */
-  private static void writeNewIndexProps(Directory dir, String tmpFileName, String tmpIdxDirName) {
-    if (tmpFileName == null) {
-      tmpFileName = IndexFetcher.INDEX_PROPERTIES;
-    }
-    final Properties p = new Properties();
-    
-    // Read existing properties
-    try {
-      final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
-      final InputStream is = new PropertiesInputStream(input);
-      try {
-        p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
-      } catch (Exception e) {
-        log.error("Unable to load {}", IndexFetcher.INDEX_PROPERTIES, e);
-      } finally {
-        IOUtils.closeQuietly(is);
-      }
-    } catch (IOException e) {
-      // ignore; file does not exist
-    }
-    
-    p.put("index", tmpIdxDirName);
-
-    // Write new properties
-    Writer os = null;
-    try {
-      IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-      os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
-      p.store(os, IndexFetcher.INDEX_PROPERTIES);
-      dir.sync(Collections.singleton(tmpFileName));
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
-    } finally {
-      IOUtils.closeQuietly(os);
-    }
-  }
-
-  private String initUpdateLogDir(CoreDescriptor coreDescriptor) {
-    String updateLogDir = coreDescriptor.getUlogDir();
-    if (updateLogDir == null) {
-      updateLogDir = coreDescriptor.getInstanceDir().resolve(dataDir).normalize().toAbsolutePath().toString();
-    }
-    return updateLogDir;
-  }
-
-  /**
-   * Close the core, if it is still in use waits until is no longer in use.
-   * @see #close() 
-   * @see #isClosed() 
-   */
-  public void closeAndWait() {
-    close();
-    while (!isClosed()) {
-      final long milliSleep = 100;
-      log.info("Core {} is not yet closed, waiting {} ms before checking again.", getName(), milliSleep);
-      try {
-        Thread.sleep(milliSleep);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Caught InterruptedException whilst waiting for core " + getName() + " to close: "
-                + e.getMessage(), e);
-      }
-    }
-  }
-  
-  private Codec initCodec(SolrConfig solrConfig, final IndexSchema schema) {
-    final PluginInfo info = solrConfig.getPluginInfo(CodecFactory.class.getName());
-    final CodecFactory factory;
-    if (info != null) {
-      factory = schema.getResourceLoader().newInstance(info.className, CodecFactory.class);
-      factory.init(info.initArgs);
-    } else {
-      factory = new CodecFactory() {
-        @Override
-        public Codec getCodec() {
-          return Codec.getDefault();
-        }
-      };
-    }
-    if (factory instanceof SolrCoreAware) {
-      // CodecFactory needs SolrCore before inform() is called on all registered
-      // SolrCoreAware listeners, at the end of the SolrCore constructor
-      ((SolrCoreAware)factory).inform(this);
-    } else {
-      for (FieldType ft : schema.getFieldTypes().values()) {
-        if (null != ft.getPostingsFormat()) {
-          String msg = "FieldType '" + ft.getTypeName() + "' is configured with a postings format, but the codec does not support it: " + factory.getClass();
-          log.error(msg);
-          throw new SolrException(ErrorCode.SERVER_ERROR, msg);
-        }
-        if (null != ft.getDocValuesFormat()) {
-          String msg = "FieldType '" + ft.getTypeName() + "' is configured with a docValues format, but the codec does not support it: " + factory.getClass();
-          log.error(msg);
-          throw new SolrException(ErrorCode.SERVER_ERROR, msg);
-        }
-      }
-    }
-    return factory.getCodec();
-  }
-
-  private StatsCache initStatsCache() {
-    final StatsCache cache;
-    PluginInfo pluginInfo = solrConfig.getPluginInfo(StatsCache.class.getName());
-    if (pluginInfo != null && pluginInfo.className != null && pluginInfo.className.length() > 0) {
-      cache = createInitInstance(pluginInfo, StatsCache.class, null,
-          LocalStatsCache.class.getName());
-      log.debug("Using statsCache impl: {}", cache.getClass().getName());
-    } else {
-      log.debug("Using default statsCache cache: {}", LocalStatsCache.class.getName());
-      cache = new LocalStatsCache();
-    }
-    return cache;
-  }
-
-  /**
-   * Get the StatsCache.
-   */
-  public StatsCache getStatsCache() {
-    return statsCache;
-  }
-
-  /**
-   * Load the request processors
-   */
-   private Map<String,UpdateRequestProcessorChain> loadUpdateProcessorChains() {
-    Map<String, UpdateRequestProcessorChain> map = new HashMap<>();
-    UpdateRequestProcessorChain def = initPlugins(map,UpdateRequestProcessorChain.class, UpdateRequestProcessorChain.class.getName());
-    if(def == null){
-      def = map.get(null);
-    }
-    if (def == null) {
-      log.debug("no updateRequestProcessorChain defined as default, creating implicit default");
-      // construct the default chain
-      UpdateRequestProcessorFactory[] factories = new UpdateRequestProcessorFactory[]{
-              new LogUpdateProcessorFactory(),
-              new DistributedUpdateProcessorFactory(),
-              new RunUpdateProcessorFactory()
-      };
-      def = new UpdateRequestProcessorChain(Arrays.asList(factories), this);
-    }
-    map.put(null, def);
-    map.put("", def);
-    return map;
-  }
-
-  public SolrCoreState getSolrCoreState() {
-    return solrCoreState;
-  }
-
-  /**
-   * @return an update processor registered to the given name.  Throw an exception if this chain is undefined
-   */
-  public UpdateRequestProcessorChain getUpdateProcessingChain( final String name )
-  {
-    UpdateRequestProcessorChain chain = updateProcessorChains.get( name );
-    if( chain == null ) {
-      throw new SolrException( ErrorCode.BAD_REQUEST,
-          "unknown UpdateRequestProcessorChain: "+name );
-    }
-    return chain;
-  }
-
-  public UpdateRequestProcessorChain getUpdateProcessorChain(SolrParams params) {
-    String chainName = params.get(UpdateParams.UPDATE_CHAIN);
-    UpdateRequestProcessorChain defaultUrp = getUpdateProcessingChain(chainName);
-    ProcessorInfo processorInfo = new ProcessorInfo(params);
-    if (processorInfo.isEmpty()) return defaultUrp;
-    return UpdateRequestProcessorChain.constructChain(defaultUrp, processorInfo, this);
-  }
-
-  public PluginBag<UpdateRequestProcessorFactory> getUpdateProcessors() {
-    return updateProcessors;
-  }
-
-  // this core current usage count
-  private final AtomicInteger refCount = new AtomicInteger(1);
-
-  /** expert: increments the core reference count */
-  public void open() {
-    refCount.incrementAndGet();
-  }
-
-  /**
-   * Close all resources allocated by the core if it is no longer in use...
-   * <ul>
-   *   <li>searcher</li>
-   *   <li>updateHandler</li>
-   *   <li>all CloseHooks will be notified</li>
-   *   <li>All MBeans will be unregistered from MBeanServer if JMX was enabled
-   *       </li>
-   * </ul>
-   * <p>
-   * The behavior of this method is determined by the result of decrementing
-   * the core's reference count (A core is created with a reference count of 1)...
-   * </p>
-   * <ul>
-   *   <li>If reference count is &gt; 0, the usage count is decreased by 1 and no
-   *       resources are released.
-   *   </li>
-   *   <li>If reference count is == 0, the resources are released.
-   *   <li>If reference count is &lt; 0, and error is logged and no further action
-   *       is taken.
-   *   </li>
-   * </ul>
-   * @see #isClosed()
-   */
-  @Override
-  public void close() {
-    int count = refCount.decrementAndGet();
-    if (count > 0) return; // close is called often, and only actually closes if nothing is using it.
-    if (count < 0) {
-      log.error("Too many close [count:{}] on {}. Please report this exception to solr-user@lucene.apache.org", count, this );
-      assert false : "Too many closes on SolrCore";
-      return;
-    }
-    log.info("{} CLOSING SolrCore {}", logid, this);
-
-    // stop reporting metrics
-    try {
-      coreMetricManager.close();
-    } catch (Throwable e) {
-      SolrException.log(log, e);
-      if (e instanceof  Error) {
-        throw (Error) e;
-      }
-    }
-
-    if( closeHooks != null ) {
-       for( CloseHook hook : closeHooks ) {
-         try {
-           hook.preClose( this );
-         } catch (Throwable e) {
-           SolrException.log(log, e);
-           if (e instanceof Error) {
-             throw (Error) e;
-           }
-         }
-      }
-    }
-
-    if(reqHandlers != null) reqHandlers.close();
-    responseWriters.close();
-    searchComponents.close();
-    qParserPlugins.close();
-    valueSourceParsers.close();
-    transformerFactories.close();
-
-    if (memClassLoader != null) {
-      try {
-        memClassLoader.close();
-      } catch (Exception e) {
-      }
-    }
-
-
-    try {
-      if (null != updateHandler) {
-        updateHandler.close();
-      }
-    } catch (Throwable e) {
-      SolrException.log(log,e);
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-    }
-
-    boolean coreStateClosed = false;
-    try {
-      if (solrCoreState != null) {
-        if (updateHandler instanceof IndexWriterCloser) {
-          coreStateClosed = solrCoreState.decrefSolrCoreState((IndexWriterCloser) updateHandler);
-        } else {
-          coreStateClosed = solrCoreState.decrefSolrCoreState(null);
-        }
-      }
-    } catch (Throwable e) {
-      SolrException.log(log, e);
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-    }
-
-    try {
-      ExecutorUtil.shutdownAndAwaitTermination(searcherExecutor);
-    } catch (Throwable e) {
-      SolrException.log(log, e);
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-    }
-    assert ObjectReleaseTracker.release(searcherExecutor);
-
-    try {
-      // Since we waited for the searcherExecutor to shut down,
-      // there should be no more searchers warming in the background
-      // that we need to take care of.
-      //
-      // For the case that a searcher was registered *before* warming
-      // then the searchExecutor will throw an exception when getSearcher()
-      // tries to use it, and the exception handling code should close it.
-      closeSearcher();
-    } catch (Throwable e) {
-      SolrException.log(log,e);
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-    }
-    
-    if (coreStateClosed) {
-      try {
-        cleanupOldIndexDirectories(false);
-      } catch (Exception e) {
-        SolrException.log(log, e);
-      }
-    }
-
-    try {
-      infoRegistry.clear();
-    } catch (Throwable e) {
-      SolrException.log(log, e);
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-    }
-
-    // Close the snapshots meta-data directory.
-    Directory snapshotsDir = snapshotMgr.getSnapshotsDir();
-    try {
-      this.directoryFactory.release(snapshotsDir);
-    }  catch (Throwable e) {
-      SolrException.log(log,e);
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-    }
-
-    if (coreStateClosed) {
-      
-      try {
-        directoryFactory.close();
-      } catch (Throwable e) {
-        SolrException.log(log, e);
-        if (e instanceof Error) {
-          throw (Error) e;
-        }
-      }
-    }
-
-    if( closeHooks != null ) {
-       for( CloseHook hook : closeHooks ) {
-         try {
-           hook.postClose( this );
-         } catch (Throwable e) {
-           SolrException.log(log, e);
-           if (e instanceof Error) {
-             throw (Error) e;
-           }
-         }
-      }
-    }
-    
-    assert ObjectReleaseTracker.release(this);
-  }
-
-  /** Current core usage count. */
-  public int getOpenCount() {
-    return refCount.get();
-  }
-
-  /** Whether this core is closed. */
-  public boolean isClosed() {
-      return refCount.get() <= 0;
-  }
-
-  @Override
-  protected void finalize() throws Throwable {
-    try {
-      if (getOpenCount() != 0) {
-        log.error("REFCOUNT ERROR: unreferenced {} ({}) has a reference count of {}", this, getName(), getOpenCount());
-      }
-    } finally {
-      super.finalize();
-    }
-  }
-
-  private Collection<CloseHook> closeHooks = null;
-
-   /**
-    * Add a close callback hook
-    */
-   public void addCloseHook( CloseHook hook )
-   {
-     if( closeHooks == null ) {
-       closeHooks = new ArrayList<>();
-     }
-     closeHooks.add( hook );
-   }
-
-  /** @lucene.internal
-   *  Debugging aid only.  No non-test code should be released with uncommented verbose() calls.  */
-  public static boolean VERBOSE = Boolean.parseBoolean(System.getProperty("tests.verbose","false"));
-  public static void verbose(Object... args) {
-    if (!VERBOSE) return;
-    StringBuilder sb = new StringBuilder("VERBOSE:");
-//    sb.append(Thread.currentThread().getName());
-//    sb.append(':');
-    for (Object o : args) {
-      sb.append(' ');
-      sb.append(o==null ? "(null)" : o.toString());
-    }
-    // System.out.println(sb.toString());
-    log.info(sb.toString());
-  }
-
-
-  ////////////////////////////////////////////////////////////////////////////////
-  // Request Handler
-  ////////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Get the request handler registered to a given name.
-   *
-   * This function is thread safe.
-   */
-  public SolrRequestHandler getRequestHandler(String handlerName) {
-    return RequestHandlerBase.getRequestHandler(RequestHandlers.normalize(handlerName), reqHandlers.handlers);
-  }
-
-  /**
-   * Returns an unmodifiable Map containing the registered handlers
-   */
-  public PluginBag<SolrRequestHandler> getRequestHandlers() {
-    return reqHandlers.handlers;
-  }
-
-
-  /**
-   * Registers a handler at the specified location.  If one exists there, it will be replaced.
-   * To remove a handler, register <code>null</code> at its path
-   *
-   * Once registered the handler can be accessed through:
-   * <pre>
-   *   http://${host}:${port}/${context}/${handlerName}
-   * or:
-   *   http://${host}:${port}/${context}/select?qt=${handlerName}
-   * </pre>
-   *
-   * Handlers <em>must</em> be initialized before getting registered.  Registered
-   * handlers can immediately accept requests.
-   *
-   * This call is thread safe.
-   *
-   * @return the previous <code>SolrRequestHandler</code> registered to this name <code>null</code> if none.
-   */
-  public SolrRequestHandler registerRequestHandler(String handlerName, SolrRequestHandler handler) {
-    return reqHandlers.register(handlerName,handler);
-  }
-
-  /**
-   * Register the default search components
-   */
-  private void loadSearchComponents()
-  {
-    Map<String, SearchComponent> instances = createInstances(SearchComponent.standard_components);
-    for (Map.Entry<String, SearchComponent> e : instances.entrySet()) e.getValue().setName(e.getKey());
-    searchComponents.init(instances, this);
-
-    for (String name : searchComponents.keySet()) {
-      if (searchComponents.isLoaded(name) && searchComponents.get(name) instanceof HighlightComponent) {
-        if (!HighlightComponent.COMPONENT_NAME.equals(name)) {
-          searchComponents.put(HighlightComponent.COMPONENT_NAME, searchComponents.getRegistry().get(name));
-        }
-        break;
-      }
-    }
-  }
-  /**
-   * @return a Search Component registered to a given name.  Throw an exception if the component is undefined
-   */
-  public SearchComponent getSearchComponent(String name) {
-    return searchComponents.get(name);
-  }
-
-  /**
-   * Accessor for all the Search Components
-   * @return An unmodifiable Map of Search Components
-   */
-  public PluginBag<SearchComponent> getSearchComponents() {
-    return searchComponents;
-  }
-
-  ////////////////////////////////////////////////////////////////////////////////
-  // Update Handler
-  ////////////////////////////////////////////////////////////////////////////////
-
-  /**
-   * RequestHandlers need access to the updateHandler so they can all talk to the
-   * same RAM indexer.
-   */
-  public UpdateHandler getUpdateHandler() {
-    return updateHandler;
-  }
-
-  ////////////////////////////////////////////////////////////////////////////////
-  // Searcher Control
-  ////////////////////////////////////////////////////////////////////////////////
-
-  // The current searcher used to service queries.
-  // Don't access this directly!!!! use getSearcher() to
-  // get it (and it will increment the ref count at the same time).
-  // This reference is protected by searcherLock.
-  private RefCounted<SolrIndexSearcher> _searcher;
-
-  // All of the normal open searchers.  Don't access this directly.
-  // protected by synchronizing on searcherLock.
-  private final LinkedList<RefCounted<SolrIndexSearcher>> _searchers = new LinkedList<>();
-  private final LinkedList<RefCounted<SolrIndexSearcher>> _realtimeSearchers = new LinkedList<>();
-
-  final ExecutorService searcherExecutor = ExecutorUtil.newMDCAwareSingleThreadExecutor(
-      new DefaultSolrThreadFactory("searcherExecutor"));
-  private int onDeckSearchers;  // number of searchers preparing
-  // Lock ordering: one can acquire the openSearcherLock and then the searcherLock, but not vice-versa.
-  private Object searcherLock = new Object();  // the sync object for the searcher
-  private ReentrantLock openSearcherLock = new ReentrantLock(true);     // used to serialize opens/reopens for absolute ordering
-  private final int maxWarmingSearchers;  // max number of on-deck searchers allowed
-  private final int slowQueryThresholdMillis;  // threshold above which a query is considered slow
-
-  private RefCounted<SolrIndexSearcher> realtimeSearcher;
-  private Callable<DirectoryReader> newReaderCreator;
-
-  // For testing
-  boolean areAllSearcherReferencesEmpty() {
-    boolean isEmpty;
-    synchronized (searcherLock) {
-      isEmpty = _searchers.isEmpty();
-      isEmpty = isEmpty && _realtimeSearchers.isEmpty();
-      isEmpty = isEmpty && (_searcher == null);
-      isEmpty = isEmpty && (realtimeSearcher == null);
-    }
-    return isEmpty;
-  }
-
-  /**
-   * Return a registered {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; with
-   * the reference count incremented.  It <b>must</b> be decremented when no longer needed.
-   * This method should not be called from SolrCoreAware.inform() since it can result
-   * in a deadlock if useColdSearcher==false.
-   * If handling a normal request, the searcher should be obtained from
-   * {@link org.apache.solr.request.SolrQueryRequest#getSearcher()} instead.
-   * If you still think you need to call this, consider {@link #withSearcher(IOFunction)} instead which is easier to
-   * use.
-   * @see SolrQueryRequest#getSearcher()
-   * @see #withSearcher(IOFunction)
-   */
-  public RefCounted<SolrIndexSearcher> getSearcher() {
-    return getSearcher(false,true,null);
-  }
-
-  /**
-   * Executes the lambda with the {@link SolrIndexSearcher}.  This is more convenient than using
-   * {@link #getSearcher()} since there is no ref-counting business to worry about.
-   * Example:
-   * <pre class="prettyprint">
-   *   IndexReader reader = h.getCore().withSearcher(SolrIndexSearcher::getIndexReader);
-   * </pre>
-   * Warning: although a lambda is concise, it may be inappropriate to simply return the IndexReader because it might
-   * be closed soon after this method returns; it really depends.
-   */
-  @SuppressWarnings("unchecked")
-  public <R> R withSearcher(IOFunction<SolrIndexSearcher,R> lambda) throws IOException {
-    final RefCounted<SolrIndexSearcher> refCounted = getSearcher();
-    try {
-      return lambda.apply(refCounted.get());
-    } finally {
-      refCounted.decref();
-    }
-  }
-
-  /**
-   * Computes fingerprint of a segment and caches it only if all the version in segment are included in the fingerprint.
-   * We can't use computeIfAbsent as caching is conditional (as described above)
-   * There is chance that two threads may compute fingerprint on the same segment. It might be OK to do so rather than locking entire map.
-   *
-   * @param searcher   searcher that includes specified LeaderReaderContext
-   * @param ctx        LeafReaderContext of a segment to compute fingerprint of
-   * @param maxVersion maximum version number to consider for fingerprint computation
-   * @return IndexFingerprint of the segment
-   * @throws IOException Can throw IOException
-   */
-  public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafReaderContext ctx, long maxVersion)
-      throws IOException {
-    IndexReader.CacheHelper cacheHelper = ctx.reader().getReaderCacheHelper();
-    if (cacheHelper == null) {
-      log.debug("Cannot cache IndexFingerprint as reader does not support caching. searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion);
-      return IndexFingerprint.getFingerprint(searcher, ctx, maxVersion);
-    }
-    
-    IndexFingerprint f = null;
-    f = perSegmentFingerprintCache.get(cacheHelper.getKey());
-    // fingerprint is either not cached or
-    // if we want fingerprint only up to a version less than maxVersionEncountered in the segment, or
-    // documents were deleted from segment for which fingerprint was cached
-    //
-    if (f == null || (f.getMaxInHash() > maxVersion) || (f.getNumDocs() != ctx.reader().numDocs())) {
-      log.debug("IndexFingerprint cache miss for searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion);
-      f = IndexFingerprint.getFingerprint(searcher, ctx, maxVersion);
-      // cache fingerprint for the segment only if all the versions in the segment are included in the fingerprint
-      if (f.getMaxVersionEncountered() == f.getMaxInHash()) {
-        log.debug("Caching fingerprint for searcher:{} leafReaderContext:{} mavVersion:{}", searcher, ctx, maxVersion);
-        perSegmentFingerprintCache.put(cacheHelper.getKey(), f);
-      }
-
-    } else {
-      log.debug("IndexFingerprint cache hit for searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion);
-    }
-    log.debug("Cache Size: {}, Segments Size:{}", perSegmentFingerprintCache.size(), searcher.getTopReaderContext().leaves().size());
-    return f;
-  }
-
-  /**
-  * Returns the current registered searcher with its reference count incremented, or null if none are registered.
-  */
-  public RefCounted<SolrIndexSearcher> getRegisteredSearcher() {
-    synchronized (searcherLock) {
-      if (_searcher != null) {
-        _searcher.incref();
-      }
-      return _searcher;
-    }
-  }
-
-  /**
-   * Return the newest normal {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; with
-   * the reference count incremented.  It <b>must</b> be decremented when no longer needed.
-   * If no searcher is currently open, then if openNew==true a new searcher will be opened,
-   * or null is returned if openNew==false.
-   */
-  public RefCounted<SolrIndexSearcher> getNewestSearcher(boolean openNew) {
-    synchronized (searcherLock) {
-      if (!_searchers.isEmpty()) {
-        RefCounted<SolrIndexSearcher> newest = _searchers.getLast();
-        newest.incref();
-        return newest;
-      }
-    }
-
-    return openNew ? getRealtimeSearcher() : null;
-  }
-
-  /** Gets the latest real-time searcher w/o forcing open a new searcher if one already exists.
-   * The reference count will be incremented.
-   */
-  public RefCounted<SolrIndexSearcher> getRealtimeSearcher() {
-    synchronized (searcherLock) {
-      if (realtimeSearcher != null) {
-        realtimeSearcher.incref();
-        return realtimeSearcher;
-      }
-    }
-
-    // use the searcher lock to prevent multiple people from trying to open at once
-    openSearcherLock.lock();
-    try {
-
-      // try again
-      synchronized (searcherLock) {
-        if (realtimeSearcher != null) {
-          realtimeSearcher.incref();
-          return realtimeSearcher;
-        }
-      }
-
-      // force a new searcher open
-      return openNewSearcher(true, true);
-    } finally {
-      openSearcherLock.unlock();
-    }
-  }
-
-
-  public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher) {
-    return getSearcher(forceNew, returnSearcher, waitSearcher, false);
-  }
-
-
-  /** Opens a new searcher and returns a RefCounted&lt;SolrIndexSearcher&gt; with its reference incremented.
-   *
-   * "realtime" means that we need to open quickly for a realtime view of the index, hence don't do any
-   * autowarming and add to the _realtimeSearchers queue rather than the _searchers queue (so it won't
-   * be used for autowarming by a future normal searcher).  A "realtime" searcher will currently never
-   * become "registered" (since it currently lacks caching).
-   *
-   * realtimeSearcher is updated to the latest opened searcher, regardless of the value of "realtime".
-   *
-   * This method acquires openSearcherLock - do not call with searchLock held!
-   */
-  public RefCounted<SolrIndexSearcher>  openNewSearcher(boolean updateHandlerReopens, boolean realtime) {
-    if (isClosed()) { // catch some errors quicker
-      throw new SolrException(ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
-    }
-
-    SolrIndexSearcher tmp;
-    RefCounted<SolrIndexSearcher> newestSearcher = null;
-
-    openSearcherLock.lock();
-    try {
-      String newIndexDir = getNewIndexDir();
-      String indexDirFile = null;
-      String newIndexDirFile = null;
-
-      // if it's not a normal near-realtime update, check that paths haven't changed.
-      if (!updateHandlerReopens) {
-        indexDirFile = getDirectoryFactory().normalize(getIndexDir());
-        newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
-      }
-
-      synchronized (searcherLock) {
-        newestSearcher = realtimeSearcher;
-        if (newestSearcher != null) {
-          newestSearcher.incref();      // the matching decref is in the finally block
-        }
-      }
-
-      if (newestSearcher != null && (updateHandlerReopens || indexDirFile.equals(newIndexDirFile))) {
-
-        DirectoryReader newReader;
-        DirectoryReader currentReader = newestSearcher.get().getRawReader();
-
-        // SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);
-
-        RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(null);
-
-        try {
-          if (writer != null) {
-            // if in NRT mode, open from the writer
-            newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true);
-          } else {
-            // verbose("start reopen without writer, reader=", currentReader);
-            newReader = DirectoryReader.openIfChanged(currentReader);
-            // verbose("reopen result", newReader);
-          }
-        } finally {
-          if (writer != null) {
-            writer.decref();
-          }
-        }
-
-        if (newReader == null) { // the underlying index has not changed at all
-
-          if (realtime) {
-            // if this is a request for a realtime searcher, just return the same searcher
-            newestSearcher.incref();
-            return newestSearcher;
-
-          } else if (newestSearcher.get().isCachingEnabled() && newestSearcher.get().getSchema() == getLatestSchema()) {
-            // absolutely nothing has changed, can use the same searcher
-            // but log a message about it to minimize confusion
-
-            newestSearcher.incref();
-            log.debug("SolrIndexSearcher has not changed - not re-opening: {}", newestSearcher.get().getName());
-            return newestSearcher;
-
-          } // ELSE: open a new searcher against the old reader...
-          currentReader.incRef();
-          newReader = currentReader;
-        }
-
-        // for now, turn off caches if this is for a realtime reader 
-        // (caches take a little while to instantiate)
-        final boolean useCaches = !realtime;
-        final String newName = realtime ? "realtime" : "main";
-        tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), newName,
-                                    newReader, true, useCaches, true, directoryFactory);
-
-      } else {
-        // newestSearcher == null at this point
-
-        if (newReaderCreator != null) {
-          // this is set in the constructor if there is a currently open index writer
-          // so that we pick up any uncommitted changes and so we don't go backwards
-          // in time on a core reload
-          DirectoryReader newReader = newReaderCreator.call();
-          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
-              (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
-        } else  {
-          RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(this);
-          DirectoryReader newReader = null;
-          try {
-            newReader = indexReaderFactory.newReader(writer.get(), this);
-          } finally {
-            writer.decref();
-          }
-          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
-              (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
-        }
-      }
-
-      List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
-      RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList);    // refcount now at 1
-
-      // Increment reference again for "realtimeSearcher" variable.  It should be at 2 after.
-      // When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
-      // it will be closed.
-      newSearcher.incref();
-
-      synchronized (searcherLock) {
-        // Check if the core is closed again inside the lock in case this method is racing with a close. If the core is
-        // closed, clean up the new searcher and bail.
-        if (isClosed()) {
-          newSearcher.decref(); // once for caller since we're not returning it
-          newSearcher.decref(); // once for ourselves since it won't be "replaced"
-          throw new SolrException(ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
-        }
-
-        if (realtimeSearcher != null) {
-          realtimeSearcher.decref();
-        }
-        realtimeSearcher = newSearcher;
-        searcherList.add(realtimeSearcher);
-      }
-
-      return newSearcher;
-
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error opening new searcher", e);
-    }
-    finally {
-      openSearcherLock.unlock();
-      if (newestSearcher != null) {
-        newestSearcher.decref();
-      }
-    }
-  }
-
-  /**
-   * Get a {@link SolrIndexSearcher} or start the process of creating a new one.
-   * <p>
-   * The registered searcher is the default searcher used to service queries.
-   * A searcher will normally be registered after all of the warming
-   * and event handlers (newSearcher or firstSearcher events) have run.
-   * In the case where there is no registered searcher, the newly created searcher will
-   * be registered before running the event handlers (a slow searcher is better than no searcher).
-   *
-   * <p>
-   * These searchers contain read-only IndexReaders. To access a non read-only IndexReader,
-   * see newSearcher(String name, boolean readOnly).
-   *
-   * <p>
-   * If <tt>forceNew==true</tt> then
-   *  A new searcher will be opened and registered regardless of whether there is already
-   *    a registered searcher or other searchers in the process of being created.
-   * <p>
-   * If <tt>forceNew==false</tt> then:<ul>
-   *   <li>If a searcher is already registered, that searcher will be returned</li>
-   *   <li>If no searcher is currently registered, but at least one is in the process of being created, then
-   * this call will block until the first searcher is registered</li>
-   *   <li>If no searcher is currently registered, and no searchers in the process of being registered, a new
-   * searcher will be created.</li>
-   * </ul>
-   * <p>
-   * If <tt>returnSearcher==true</tt> then a {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; will be returned with
-   * the reference count incremented.  It <b>must</b> be decremented when no longer needed.
-   * <p>
-   * If <tt>waitSearcher!=null</tt> and a new {@link SolrIndexSearcher} was created,
-   * then it is filled in with a Future that will return after the searcher is registered.  The Future may be set to
-   * <tt>null</tt> in which case the SolrIndexSearcher created has already been registered at the time
-   * this method returned.
-   * <p>
-   * @param forceNew             if true, force the open of a new index searcher regardless if there is already one open.
-   * @param returnSearcher       if true, returns a {@link SolrIndexSearcher} holder with the refcount already incremented.
-   * @param waitSearcher         if non-null, will be filled in with a {@link Future} that will return after the new searcher is registered.
-   * @param updateHandlerReopens if true, the UpdateHandler will be used when reopening a {@link SolrIndexSearcher}.
-   */
-  public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher, boolean updateHandlerReopens) {
-    // it may take some time to open an index.... we may need to make
-    // sure that two threads aren't trying to open one at the same time
-    // if it isn't necessary.
-
-    synchronized (searcherLock) {
-      for(;;) { // this loop is so w can retry in the event that we exceed maxWarmingSearchers
-        // see if we can retu

<TRUNCATED>

[42/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
deleted file mode 100644
index fd09a3f..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ /dev/null
@@ -1,663 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Random;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.rule.ReplicaAssigner;
-import org.apache.solr.cloud.rule.Rule;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.NumberUtils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-
-public class Assign {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static String getCounterNodePath(String collection) {
-    return ZkStateReader.COLLECTIONS_ZKNODE + "/"+collection+"/counter";
-  }
-
-  public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
-    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/"+collection;
-    try {
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.makePath(path);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-      path += "/counter";
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    } catch (IOException | KeeperException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    }
-
-    while (true) {
-      try {
-        int version = 0;
-        int currentId = 0;
-        VersionedData data = stateManager.getData(path, null);
-        if (data != null) {
-          currentId = NumberUtils.bytesToInt(data.getData());
-          version = data.getVersion();
-        }
-        byte[] bytes = NumberUtils.intToBytes(++currentId);
-        stateManager.setData(path, bytes, version);
-        return currentId;
-      } catch (BadVersionException e) {
-        continue;
-      } catch (IOException | KeeperException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
-      }
-    }
-  }
-
-  public static String assignCoreNodeName(DistribStateManager stateManager, DocCollection collection) {
-    // for backward compatibility;
-    int defaultValue = defaultCounterValue(collection, false);
-    String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    while (collection.getReplica(coreNodeName) != null) {
-      // there is wee chance that, the new coreNodeName id not totally unique,
-      // but this will be guaranteed unique for new collections
-      coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    }
-    return coreNodeName;
-  }
-
-  /**
-   * Assign a new unique id up to slices count - then add replicas evenly.
-   *
-   * @return the assigned shard id
-   */
-  public static String assignShard(DocCollection collection, Integer numShards) {
-    if (numShards == null) {
-      numShards = 1;
-    }
-    String returnShardId = null;
-    Map<String, Slice> sliceMap = collection != null ? collection.getActiveSlicesMap() : null;
-
-
-    // TODO: now that we create shards ahead of time, is this code needed?  Esp since hash ranges aren't assigned when creating via this method?
-
-    if (sliceMap == null) {
-      return "shard1";
-    }
-
-    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
-
-    if (shardIdNames.size() < numShards) {
-      return "shard" + (shardIdNames.size() + 1);
-    }
-
-    // TODO: don't need to sort to find shard with fewest replicas!
-
-    // else figure out which shard needs more replicas
-    final Map<String, Integer> map = new HashMap<>();
-    for (String shardId : shardIdNames) {
-      int cnt = sliceMap.get(shardId).getReplicasMap().size();
-      map.put(shardId, cnt);
-    }
-
-    Collections.sort(shardIdNames, (String o1, String o2) -> {
-      Integer one = map.get(o1);
-      Integer two = map.get(o2);
-      return one.compareTo(two);
-    });
-
-    returnShardId = shardIdNames.get(0);
-    return returnShardId;
-  }
-
-  public static String buildSolrCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
-    // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
-    return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
-  }
-
-  private static int defaultCounterValue(DocCollection collection, boolean newCollection) {
-    if (newCollection) return 0;
-    int defaultValue = collection.getReplicas().size();
-    if (collection.getReplicationFactor() != null) {
-      // numReplicas and replicationFactor * numSlices can be not equals,
-      // in case of many addReplicas or deleteReplicas are executed
-      defaultValue = Math.max(defaultValue,
-          collection.getReplicationFactor() * collection.getSlices().size());
-    }
-    return defaultValue * 20;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
-    Slice slice = collection.getSlice(shard);
-    int defaultValue = defaultCounterValue(collection, newCollection);
-    int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-    String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    while (existCoreName(coreName, slice)) {
-      replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-      coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    }
-    return coreName;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
-    return buildSolrCoreName(stateManager, collection, shard, type, false);
-  }
-
-  private static boolean existCoreName(String coreName, Slice slice) {
-    if (slice == null) return false;
-    for (Replica replica : slice.getReplicas()) {
-      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
-    List<String> nodeList;
-    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
-    final List<String> createNodeList = (createNodeSetStr == null) ? null :
-        StrUtils.splitSmart((OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ?
-            "" : createNodeSetStr), ",", true);
-
-    if (createNodeList != null) {
-      nodeList = new ArrayList<>(createNodeList);
-      nodeList.retainAll(liveNodes);
-      if (message.getBool(OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE,
-          OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
-        Collections.shuffle(nodeList, random);
-      }
-    } else {
-      nodeList = new ArrayList<>(liveNodes);
-      Collections.shuffle(nodeList, random);
-    }
-
-    return nodeList;
-  }
-
-  /**
-   * <b>Note:</b> where possible, the {@link #usePolicyFramework(DocCollection, SolrCloudManager)} method should
-   * be used instead of this method
-   *
-   * @return true if autoscaling policy framework should be used for replica placement
-   */
-  public static boolean usePolicyFramework(SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    Objects.requireNonNull(cloudManager, "The SolrCloudManager instance cannot be null");
-    return usePolicyFramework(Optional.empty(), cloudManager);
-  }
-
-  /**
-   * @return true if auto scaling policy framework should be used for replica placement
-   * for this collection, otherwise false
-   */
-  public static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
-      throws IOException, InterruptedException {
-    Objects.requireNonNull(collection, "The DocCollection instance cannot be null");
-    Objects.requireNonNull(cloudManager, "The SolrCloudManager instance cannot be null");
-    return usePolicyFramework(Optional.of(collection), cloudManager);
-  }
-
-  private static boolean usePolicyFramework(Optional<DocCollection> collection, SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    boolean useLegacyAssignment = false;
-    Map<String, Object> clusterProperties = cloudManager.getClusterStateProvider().getClusterProperties();
-    if (clusterProperties.containsKey(CollectionAdminParams.DEFAULTS))  {
-      Map<String, Object> defaults = (Map<String, Object>) clusterProperties.get(CollectionAdminParams.DEFAULTS);
-      Map<String, Object> collectionDefaults = (Map<String, Object>) defaults.getOrDefault(CollectionAdminParams.CLUSTER, Collections.emptyMap());
-      useLegacyAssignment = (boolean) collectionDefaults.getOrDefault(CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT, false);
-    }
-
-    if (!useLegacyAssignment) {
-      // if legacy assignment is not selected then autoscaling is always available through the implicit policy/preferences
-      return true;
-    }
-
-    // legacy assignment is turned on, which means we must look at the actual autoscaling config
-    // to determine whether policy framework can be used or not for this collection
-
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    // if no autoscaling configuration exists then obviously we cannot use the policy framework
-    if (autoScalingConfig.getPolicy().isEmpty()) return false;
-    // do custom preferences exist
-    if (!autoScalingConfig.getPolicy().isEmptyPreferences()) return true;
-    // does a cluster policy exist
-    if (!autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) return true;
-    // finally we check if the current collection has a policy
-    return !collection.isPresent() || collection.get().getPolicyName() != null;
-  }
-
-  static class ReplicaCount {
-    public final String nodeName;
-    public int thisCollectionNodes = 0;
-    public int totalNodes = 0;
-
-    ReplicaCount(String nodeName) {
-      this.nodeName = nodeName;
-    }
-
-    public int weight() {
-      return (thisCollectionNodes * 100) + totalNodes;
-    }
-  }
-
-  // Only called from addReplica (and by extension createShard) (so far).
-  //
-  // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
-  // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
-  public static List<ReplicaPosition> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
-                                                          String shard, int nrtReplicas, int tlogReplicas, int pullReplicas,
-                                                          Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException, AssignmentException {
-    log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}", shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet );
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
-    List<String> createNodeList = null;
-
-    if (createNodeSet instanceof List) {
-      createNodeList = (List<String>) createNodeSet;
-    } else {
-      // deduplicate
-      createNodeList = createNodeSet == null ? null : new ArrayList<>(new LinkedHashSet<>(StrUtils.splitSmart((String) createNodeSet, ",", true)));
-    }
-
-    HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
-
-    if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
-      long availableSlots = 0;
-      for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
-        //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
-        if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
-          availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
-        }
-      }
-      if (availableSlots < nrtReplicas + tlogReplicas + pullReplicas) {
-        throw new AssignmentException(
-            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of eligible live nodes %d and a maxShardsPerNode of %d",
-                nrtReplicas, collectionName, nodeNameVsShardCount.size(), maxShardsPerNode));
-      }
-    }
-
-    AssignRequest assignRequest = new AssignRequestBuilder()
-        .forCollection(collectionName)
-        .forShard(Collections.singletonList(shard))
-        .assignNrtReplicas(nrtReplicas)
-        .assignTlogReplicas(tlogReplicas)
-        .assignPullReplicas(pullReplicas)
-        .onNodes(createNodeList)
-        .build();
-    AssignStrategyFactory assignStrategyFactory = new AssignStrategyFactory(cloudManager);
-    AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, coll);
-    return assignStrategy.assign(cloudManager, assignRequest);
-  }
-
-  public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
-                                                              int nrtReplicas,
-                                                              int tlogReplicas,
-                                                              int pullReplicas,
-                                                              String policyName, SolrCloudManager cloudManager,
-                                                              List<String> nodesList) throws IOException, InterruptedException, AssignmentException {
-    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
-    List<ReplicaPosition> replicaPositions = null;
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    try {
-      Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
-      replicaPositions = PolicyHelper.getReplicaLocations(
-          collName,
-          autoScalingConfig,
-          cloudManager,
-          kvMap,
-          shardNames,
-          nrtReplicas,
-          tlogReplicas,
-          pullReplicas,
-          nodesList);
-      return replicaPositions;
-    } catch (Exception e) {
-      throw new AssignmentException("Error getting replica locations : " + e.getMessage(), e);
-    } finally {
-      if (log.isTraceEnabled()) {
-        if (replicaPositions != null)
-          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
-        log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
-      }
-    }
-  }
-
-  static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
-                                                                       ClusterState clusterState, List<String> createNodeList) {
-    Set<String> nodes = clusterState.getLiveNodes();
-
-    List<String> nodeList = new ArrayList<>(nodes.size());
-    nodeList.addAll(nodes);
-    if (createNodeList != null) nodeList.retainAll(createNodeList);
-
-    HashMap<String, ReplicaCount> nodeNameVsShardCount = new HashMap<>();
-    for (String s : nodeList) {
-      nodeNameVsShardCount.put(s, new ReplicaCount(s));
-    }
-    if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
-      if (createNodeList.size() != nodeNameVsShardCount.size()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "At least one of the node(s) specified " + createNodeList + " are not currently active in "
-                + nodeNameVsShardCount.keySet() + ", no action taken.");
-      }
-      return nodeNameVsShardCount;
-    }
-    DocCollection coll = clusterState.getCollection(collectionName);
-    int maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
-    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
-      DocCollection c = entry.getValue();
-      //identify suitable nodes  by checking the no:of cores in each of them
-      for (Slice slice : c.getSlices()) {
-        Collection<Replica> replicas = slice.getReplicas();
-        for (Replica replica : replicas) {
-          ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
-          if (count != null) {
-            count.totalNodes++; // Used to "weigh" whether this node should be used later.
-            if (entry.getKey().equals(collectionName)) {
-              count.thisCollectionNodes++;
-              if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
-            }
-          }
-        }
-      }
-    }
-
-    return nodeNameVsShardCount;
-  }
-
-  /**
-   * Thrown if there is an exception while assigning nodes for replicas
-   */
-  public static class AssignmentException extends RuntimeException {
-    public AssignmentException() {
-    }
-
-    public AssignmentException(String message) {
-      super(message);
-    }
-
-    public AssignmentException(String message, Throwable cause) {
-      super(message, cause);
-    }
-
-    public AssignmentException(Throwable cause) {
-      super(cause);
-    }
-
-    public AssignmentException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
-      super(message, cause, enableSuppression, writableStackTrace);
-    }
-  }
-
-  public interface AssignStrategy {
-    List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest)
-        throws Assign.AssignmentException, IOException, InterruptedException;
-  }
-
-  public static class AssignRequest {
-    public String collectionName;
-    public List<String> shardNames;
-    public List<String> nodes;
-    public int numNrtReplicas;
-    public int numTlogReplicas;
-    public int numPullReplicas;
-
-    public AssignRequest(String collectionName, List<String> shardNames, List<String> nodes, int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
-      this.collectionName = collectionName;
-      this.shardNames = shardNames;
-      this.nodes = nodes;
-      this.numNrtReplicas = numNrtReplicas;
-      this.numTlogReplicas = numTlogReplicas;
-      this.numPullReplicas = numPullReplicas;
-    }
-  }
-
-  public static class AssignRequestBuilder {
-    private String collectionName;
-    private List<String> shardNames;
-    private List<String> nodes;
-    private int numNrtReplicas;
-    private int numTlogReplicas;
-    private int numPullReplicas;
-
-    public AssignRequestBuilder forCollection(String collectionName) {
-      this.collectionName = collectionName;
-      return this;
-    }
-
-    public AssignRequestBuilder forShard(List<String> shardNames) {
-      this.shardNames = shardNames;
-      return this;
-    }
-
-    public AssignRequestBuilder onNodes(List<String> nodes) {
-      this.nodes = nodes;
-      return this;
-    }
-
-    public AssignRequestBuilder assignNrtReplicas(int numNrtReplicas) {
-      this.numNrtReplicas = numNrtReplicas;
-      return this;
-    }
-
-    public AssignRequestBuilder assignTlogReplicas(int numTlogReplicas) {
-      this.numTlogReplicas = numTlogReplicas;
-      return this;
-    }
-
-    public AssignRequestBuilder assignPullReplicas(int numPullReplicas) {
-      this.numPullReplicas = numPullReplicas;
-      return this;
-    }
-
-    public AssignRequest build() {
-      Objects.requireNonNull(collectionName, "The collectionName cannot be null");
-      Objects.requireNonNull(shardNames, "The shard names cannot be null");
-      return new AssignRequest(collectionName, shardNames, nodes, numNrtReplicas,
-          numTlogReplicas, numPullReplicas);
-    }
-  }
-
-  public static class LegacyAssignStrategy implements AssignStrategy {
-    @Override
-    public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
-      ClusterState clusterState = solrCloudManager.getClusterStateProvider().getClusterState();
-      List<String> nodeList = assignRequest.nodes;
-
-      HashMap<String, Assign.ReplicaCount> nodeNameVsShardCount = Assign.getNodeNameVsShardCount(assignRequest.collectionName, clusterState, assignRequest.nodes);
-      if (nodeList == null || nodeList.isEmpty()) {
-        ArrayList<Assign.ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
-        sortedNodeList.sort(Comparator.comparingInt(Assign.ReplicaCount::weight));
-        nodeList = sortedNodeList.stream().map(replicaCount -> replicaCount.nodeName).collect(Collectors.toList());
-      }
-
-      int i = 0;
-      List<ReplicaPosition> result = new ArrayList<>();
-      for (String aShard : assignRequest.shardNames)
-        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, assignRequest.numNrtReplicas,
-            Replica.Type.TLOG, assignRequest.numTlogReplicas,
-            Replica.Type.PULL, assignRequest.numPullReplicas
-        ).entrySet()) {
-          for (int j = 0; j < e.getValue(); j++) {
-            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
-            i++;
-          }
-        }
-      return result;
-    }
-  }
-
-  public static class RulesBasedAssignStrategy implements AssignStrategy {
-    public List<Rule> rules;
-    public List snitches;
-    public ClusterState clusterState;
-
-    public RulesBasedAssignStrategy(List<Rule> rules, List snitches, ClusterState clusterState) {
-      this.rules = rules;
-      this.snitches = snitches;
-      this.clusterState = clusterState;
-    }
-
-    @Override
-    public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
-      if (assignRequest.numTlogReplicas + assignRequest.numPullReplicas != 0) {
-        throw new Assign.AssignmentException(
-            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
-      }
-
-      Map<String, Integer> shardVsReplicaCount = new HashMap<>();
-      for (String shard : assignRequest.shardNames) shardVsReplicaCount.put(shard, assignRequest.numNrtReplicas);
-
-      Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
-      DocCollection docCollection = solrCloudManager.getClusterStateProvider().getClusterState().getCollectionOrNull(assignRequest.collectionName);
-      if (docCollection != null) {
-        for (Slice slice : docCollection.getSlices()) {
-          LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
-          shardVsNodes.put(slice.getName(), n);
-          for (Replica replica : slice.getReplicas()) {
-            Integer count = n.get(replica.getNodeName());
-            if (count == null) count = 0;
-            n.put(replica.getNodeName(), ++count);
-          }
-        }
-      }
-
-      List<String> nodesList = assignRequest.nodes == null ? new ArrayList<>(clusterState.getLiveNodes()) : assignRequest.nodes;
-
-      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
-          shardVsReplicaCount,
-          snitches,
-          shardVsNodes,
-          nodesList,
-          solrCloudManager, clusterState);
-
-      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
-      return nodeMappings.entrySet().stream()
-          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
-          .collect(Collectors.toList());
-    }
-  }
-
-  public static class PolicyBasedAssignStrategy implements AssignStrategy {
-    public String policyName;
-
-    public PolicyBasedAssignStrategy(String policyName) {
-      this.policyName = policyName;
-    }
-
-    @Override
-    public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
-      return Assign.getPositionsUsingPolicy(assignRequest.collectionName,
-          assignRequest.shardNames, assignRequest.numNrtReplicas,
-          assignRequest.numTlogReplicas, assignRequest.numPullReplicas,
-          policyName, solrCloudManager, assignRequest.nodes);
-    }
-  }
-
-  public static class AssignStrategyFactory {
-    public SolrCloudManager solrCloudManager;
-
-    public AssignStrategyFactory(SolrCloudManager solrCloudManager) {
-      this.solrCloudManager = solrCloudManager;
-    }
-
-    public AssignStrategy create(ClusterState clusterState, DocCollection collection) throws IOException, InterruptedException {
-      List<Map> ruleMaps = (List<Map>) collection.get("rule");
-      String policyName = collection.getStr(POLICY);
-      List snitches = (List) collection.get(SNITCH);
-
-      Strategy strategy = null;
-      if ((ruleMaps == null || ruleMaps.isEmpty()) && !usePolicyFramework(collection, solrCloudManager)) {
-        strategy = Strategy.LEGACY;
-      } else if (ruleMaps != null && !ruleMaps.isEmpty()) {
-        strategy = Strategy.RULES;
-      } else {
-        strategy = Strategy.POLICY;
-      }
-
-      switch (strategy) {
-        case LEGACY:
-          return new LegacyAssignStrategy();
-        case RULES:
-          List<Rule> rules = new ArrayList<>();
-          for (Object map : ruleMaps) rules.add(new Rule((Map) map));
-          return new RulesBasedAssignStrategy(rules, snitches, clusterState);
-        case POLICY:
-          return new PolicyBasedAssignStrategy(policyName);
-        default:
-          throw new Assign.AssignmentException("Unknown strategy type: " + strategy);
-      }
-    }
-
-    private enum Strategy {
-      LEGACY, RULES, POLICY;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
deleted file mode 100644
index b8aba76..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-
-import org.apache.lucene.util.Version;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public BackupCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-
-    Instant startTime = Instant.now();
-
-    CoreContainer cc = ocmh.overseer.getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-    BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader);
-
-    // Backup location
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-
-    //Validating if the directory already exists.
-    if (repository.exists(backupPath)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath);
-    }
-
-    // Create a directory to store backup details.
-    repository.createDirectory(backupPath);
-
-    String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-    switch (strategy) {
-      case CollectionAdminParams.COPY_FILES_STRATEGY: {
-        copyIndexFiles(backupPath, message, results);
-        break;
-      }
-      case CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY: {
-        break;
-      }
-    }
-
-    log.info("Starting to backup ZK data for backupName={}", backupName);
-
-    //Download the configs
-    String configName = ocmh.zkStateReader.readConfigName(collectionName);
-    backupMgr.downloadConfigDir(location, backupName, configName);
-
-    //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json
-    //Since we don't want to distinguish we extract the state and back it up as a separate json
-    DocCollection collectionState = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-    backupMgr.writeCollectionState(location, backupName, collectionName, collectionState);
-
-    Properties properties = new Properties();
-
-    properties.put(BackupManager.BACKUP_NAME_PROP, backupName);
-    properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName);
-    properties.put(CollectionAdminParams.COLL_CONF, configName);
-    properties.put(BackupManager.START_TIME_PROP, startTime.toString());
-    properties.put(BackupManager.INDEX_VERSION_PROP, Version.LATEST.toString());
-    //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same.
-    //if they are not the same then we can throw an error or have an 'overwriteConfig' flag
-    //TODO save numDocs for the shardLeader. We can use it to sanity check the restore.
-
-    backupMgr.writeBackupProperties(location, backupName, properties);
-
-    backupMgr.downloadCollectionProperties(location, backupName, collectionName);
-
-    log.info("Completed backing up ZK data for backupName={}", backupName);
-  }
-
-  private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
-    // The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
-    // If that is not possible, we choose any other replica for the given shard.
-    Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
-
-    Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
-    if (leaderCore.isPresent()) {
-      log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
-      Replica r = slice.getReplica(leaderCore.get().getCoreName());
-      if ((r != null) && !r.getState().equals(State.DOWN)) {
-        return r;
-      }
-    }
-
-    Optional<Replica> r = slice.getReplicas().stream()
-                               .filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x))
-                               .findFirst();
-
-    if (!r.isPresent()) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
-    }
-
-    return r.get();
-  }
-
-  private void copyIndexFiles(URI backupPath, ZkNodeProps request, NamedList results) throws Exception {
-    String collectionName = request.getStr(COLLECTION_PROP);
-    String backupName = request.getStr(NAME);
-    String asyncId = request.getStr(ASYNC);
-    String repoName = request.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    Map<String, String> requestMap = new HashMap<>();
-
-    String commitName = request.getStr(CoreAdminParams.COMMIT_NAME);
-    Optional<CollectionSnapshotMetaData> snapshotMeta = Optional.empty();
-    if (commitName != null) {
-      SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
-      snapshotMeta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      if (!snapshotMeta.isPresent()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-            + " does not exist for collection " + collectionName);
-      }
-      if (snapshotMeta.get().getStatus() != SnapshotStatus.Successful) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName + " for collection " + collectionName
-            + " has not completed successfully. The status is " + snapshotMeta.get().getStatus());
-      }
-    }
-
-    log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
-        backupPath);
-
-    Collection<String> shardsToConsider = Collections.emptySet();
-    if (snapshotMeta.isPresent()) {
-      shardsToConsider = snapshotMeta.get().getShards();
-    }
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getActiveSlices()) {
-      Replica replica = null;
-
-      if (snapshotMeta.isPresent()) {
-        if (!shardsToConsider.contains(slice.getName())) {
-          log.warn("Skipping the backup for shard {} since it wasn't part of the collection {} when snapshot {} was created.",
-              slice.getName(), collectionName, snapshotMeta.get().getName());
-          continue;
-        }
-        replica = selectReplicaWithSnapshot(snapshotMeta.get(), slice);
-      } else {
-        // Note - Actually this can return a null value when there is no leader for this shard.
-        replica = slice.getLeader();
-        if (replica == null) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "No 'leader' replica available for shard " + slice.getName() + " of collection " + collectionName);
-        }
-      }
-
-      String coreName = replica.getStr(CORE_NAME_PROP);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
-      params.set(NAME, slice.getName());
-      params.set(CoreAdminParams.BACKUP_REPOSITORY, repoName);
-      params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
-      params.set(CORE_NAME_PROP, coreName);
-      if (snapshotMeta.isPresent()) {
-        params.set(CoreAdminParams.COMMIT_NAME, snapshotMeta.get().getName());
-      }
-
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-      log.debug("Sent backup request to core={} for backupName={}", coreName, backupName);
-    }
-    log.debug("Sent backup requests to all shard leaders for backupName={}", backupName);
-
-    ocmh.processResponses(results, shardHandler, true, "Could not backup all shards", asyncId, requestMap);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
deleted file mode 100644
index 7117019..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
+++ /dev/null
@@ -1,164 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.time.Instant;
-import java.time.temporal.ChronoUnit;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.stream.Collectors;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.util.DateMathParser;
-
-import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
-
-public class CreateAliasCmd implements OverseerCollectionMessageHandler.Cmd {
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  private static boolean anyRoutingParams(ZkNodeProps message) {
-    return message.keySet().stream().anyMatch(k -> k.startsWith(TimeRoutedAlias.ROUTER_PREFIX));
-  }
-
-  public CreateAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results)
-      throws Exception {
-    final String aliasName = message.getStr(CommonParams.NAME);
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-
-    if (!anyRoutingParams(message)) {
-      callCreatePlainAlias(message, aliasName, zkStateReader);
-    } else {
-      callCreateRoutedAlias(message, aliasName, zkStateReader, state);
-    }
-
-    // Sleep a bit to allow ZooKeeper state propagation.
-    //
-    // THIS IS A KLUDGE.
-    //
-    // Solr's view of the cluster is eventually consistent. *Eventually* all nodes and CloudSolrClients will be aware of
-    // alias changes, but not immediately. If a newly created alias is queried, things should work right away since Solr
-    // will attempt to see if it needs to get the latest aliases when it can't otherwise resolve the name.  However
-    // modifications to an alias will take some time.
-    //
-    // We could levy this requirement on the client but they would probably always add an obligatory sleep, which is
-    // just kicking the can down the road.  Perhaps ideally at this juncture here we could somehow wait until all
-    // Solr nodes in the cluster have the latest aliases?
-    Thread.sleep(100);
-  }
-
-  private void callCreatePlainAlias(ZkNodeProps message, String aliasName, ZkStateReader zkStateReader) {
-    final List<String> canonicalCollectionList = parseCollectionsParameter(message.get("collections"));
-    final String canonicalCollectionsString = StrUtils.join(canonicalCollectionList, ',');
-    validateAllCollectionsExistAndNoDups(canonicalCollectionList, zkStateReader);
-    zkStateReader.aliasesManager
-        .applyModificationAndExportToZk(aliases -> aliases.cloneWithCollectionAlias(aliasName, canonicalCollectionsString));
-  }
-
-  /**
-   * The v2 API directs that the 'collections' parameter be provided as a JSON array (e.g. ["a", "b"]).  We also
-   * maintain support for the legacy format, a comma-separated list (e.g. a,b).
-   */
-  @SuppressWarnings("unchecked")
-  private List<String> parseCollectionsParameter(Object colls) {
-    if (colls == null) throw new SolrException(BAD_REQUEST, "missing collections param");
-    if (colls instanceof List) return (List<String>) colls;
-    return StrUtils.splitSmart(colls.toString(), ",", true).stream()
-        .map(String::trim)
-        .collect(Collectors.toList());
-  }
-
-  private void callCreateRoutedAlias(ZkNodeProps message, String aliasName, ZkStateReader zkStateReader, ClusterState state) throws Exception {
-    // Validate we got everything we need
-    if (!message.getProperties().keySet().containsAll(TimeRoutedAlias.REQUIRED_ROUTER_PARAMS)) {
-      throw new SolrException(BAD_REQUEST, "A routed alias requires these params: " + TimeRoutedAlias.REQUIRED_ROUTER_PARAMS
-      + " plus some create-collection prefixed ones.");
-    }
-
-    Map<String, String> aliasProperties = new LinkedHashMap<>();
-    message.getProperties().entrySet().stream()
-        .filter(entry -> TimeRoutedAlias.PARAM_IS_PROP.test(entry.getKey()))
-        .forEach(entry -> aliasProperties.put(entry.getKey(), (String) entry.getValue())); // way easier than .collect
-
-    TimeRoutedAlias timeRoutedAlias = new TimeRoutedAlias(aliasName, aliasProperties); // validates as well
-
-    String start = message.getStr(TimeRoutedAlias.ROUTER_START);
-    Instant startTime = parseStart(start, timeRoutedAlias.getTimeZone());
-
-    String initialCollectionName = TimeRoutedAlias.formatCollectionNameFromInstant(aliasName, startTime);
-
-    // Create the collection
-    MaintainRoutedAliasCmd.createCollectionAndWait(state, aliasName, aliasProperties, initialCollectionName, ocmh);
-    validateAllCollectionsExistAndNoDups(Collections.singletonList(initialCollectionName), zkStateReader);
-
-    // Create/update the alias
-    zkStateReader.aliasesManager.applyModificationAndExportToZk(aliases -> aliases
-        .cloneWithCollectionAlias(aliasName, initialCollectionName)
-        .cloneWithCollectionAliasProperties(aliasName, aliasProperties));
-  }
-
-  private Instant parseStart(String str, TimeZone zone) {
-    Instant start = DateMathParser.parseMath(new Date(), str, zone).toInstant();
-    checkMilis(start);
-    return start;
-  }
-
-  private void checkMilis(Instant date) {
-    if (!date.truncatedTo(ChronoUnit.SECONDS).equals(date)) {
-      throw new SolrException(BAD_REQUEST,
-          "Date or date math for start time includes milliseconds, which is not supported. " +
-              "(Hint: 'NOW' used without rounding always has this problem)");
-    }
-  }
-
-  private void validateAllCollectionsExistAndNoDups(List<String> collectionList, ZkStateReader zkStateReader) {
-    final String collectionStr = StrUtils.join(collectionList, ',');
-
-    if (new HashSet<>(collectionList).size() != collectionList.size()) {
-      throw new SolrException(BAD_REQUEST,
-          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
-    }
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
-    for (String collection : collectionList) {
-      if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
-        throw new SolrException(BAD_REQUEST,
-            String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
deleted file mode 100644
index 533aee8..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.NotEmptyException;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
-import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.StrUtils.formatString;
-
-public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-  private final DistribStateManager stateManager;
-
-  public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.stateManager = ocmh.cloudManager.getDistribStateManager();
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    final String collectionName = message.getStr(NAME);
-    final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    log.info("Create collection {}", collectionName);
-    if (clusterState.hasCollection(collectionName)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
-    }
-
-    String withCollection = message.getStr(CollectionAdminParams.WITH_COLLECTION);
-    String withCollectionShard = null;
-    if (withCollection != null) {
-      if (!clusterState.hasCollection(withCollection)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "The 'withCollection' does not exist: " + withCollection);
-      } else  {
-        DocCollection collection = clusterState.getCollection(withCollection);
-        if (collection.getActiveSlices().size() > 1)  {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "The `withCollection` must have only one shard, found: " + collection.getActiveSlices().size());
-        }
-        withCollectionShard = collection.getActiveSlices().iterator().next().getName();
-      }
-    }
-
-    String configName = getConfigName(collectionName, message);
-    if (configName == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
-    }
-
-    ocmh.validateConfigOrThrowSolrException(configName);
-
-    String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
-
-    // fail fast if parameters are wrong or incomplete
-    List<String> shardNames = populateShardNames(message, router);
-    checkReplicaTypes(message);
-
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-
-    try {
-
-      final String async = message.getStr(ASYNC);
-
-      ZkStateReader zkStateReader = ocmh.zkStateReader;
-      boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
-
-      OverseerCollectionMessageHandler.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
-
-      Map<String,String> collectionParams = new HashMap<>();
-      Map<String,Object> collectionProps = message.getProperties();
-      for (String propName : collectionProps.keySet()) {
-        if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-          collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) collectionProps.get(propName));
-        }
-      }
-
-      createCollectionZkNode(stateManager, collectionName, collectionParams);
-
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-
-      // wait for a while until we see the collection
-      TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean created = false;
-      while (! waitUntil.hasTimedOut()) {
-        waitUntil.sleep(100);
-        created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
-        if(created) break;
-      }
-      if (!created) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
-      }
-
-      // refresh cluster state
-      clusterState = ocmh.cloudManager.getClusterStateProvider().getClusterState();
-
-      List<ReplicaPosition> replicaPositions = null;
-      try {
-        replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, clusterState.getCollection(collectionName), message, shardNames, sessionWrapper);
-      } catch (Assign.AssignmentException e) {
-        ZkNodeProps deleteMessage = new ZkNodeProps("name", collectionName);
-        new DeleteCollectionCmd(ocmh).call(clusterState, deleteMessage, results);
-        // unwrap the exception
-        throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e.getCause());
-      }
-
-      if (replicaPositions.isEmpty()) {
-        log.debug("Finished create command for collection: {}", collectionName);
-        return;
-      }
-
-      // For tracking async calls.
-      Map<String, String> requestMap = new HashMap<>();
-
-
-      log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
-          collectionName, shardNames, message));
-      Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
-      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-      for (ReplicaPosition replicaPosition : replicaPositions) {
-        String nodeName = replicaPosition.node;
-
-        if (withCollection != null) {
-          // check that we have a replica of `withCollection` on this node and if not, create one
-          DocCollection collection = clusterState.getCollection(withCollection);
-          List<Replica> replicas = collection.getReplicas(nodeName);
-          if (replicas == null || replicas.isEmpty()) {
-            ZkNodeProps props = new ZkNodeProps(
-                Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
-                ZkStateReader.COLLECTION_PROP, withCollection,
-                ZkStateReader.SHARD_ID_PROP, withCollectionShard,
-                "node", nodeName,
-                CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to true because we want `withCollection` to be ready after this collection is created
-            new AddReplicaCmd(ocmh).call(clusterState, props, results);
-            clusterState = zkStateReader.getClusterState(); // refresh
-          }
-        }
-
-        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
-            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
-            replicaPosition.shard, replicaPosition.type, true);
-        log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
-            , coreName, replicaPosition.shard, collectionName, nodeName));
-
-
-        String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
-        //in the new mode, create the replica in clusterstate prior to creating the core.
-        // Otherwise the core creation fails
-        if (!isLegacyCloud) {
-          ZkNodeProps props = new ZkNodeProps(
-              Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
-              ZkStateReader.COLLECTION_PROP, collectionName,
-              ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
-              ZkStateReader.CORE_NAME_PROP, coreName,
-              ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-              ZkStateReader.BASE_URL_PROP, baseUrl,
-              ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
-              CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        }
-
-        // Need to create new params for each request
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-
-        params.set(CoreAdminParams.NAME, coreName);
-        params.set(COLL_CONF, configName);
-        params.set(CoreAdminParams.COLLECTION, collectionName);
-        params.set(CoreAdminParams.SHARD, replicaPosition.shard);
-        params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
-        params.set(CoreAdminParams.NEW_COLLECTION, "true");
-        params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
-
-        if (async != null) {
-          String coreAdminAsyncId = async + Math.abs(System.nanoTime());
-          params.add(ASYNC, coreAdminAsyncId);
-          requestMap.put(nodeName, coreAdminAsyncId);
-        }
-        ocmh.addPropertyParams(message, params);
-
-        ShardRequest sreq = new ShardRequest();
-        sreq.nodeName = nodeName;
-        params.set("qt", ocmh.adminPath);
-        sreq.purpose = 1;
-        sreq.shards = new String[]{baseUrl};
-        sreq.actualShards = sreq.shards;
-        sreq.params = params;
-
-        if (isLegacyCloud) {
-          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
-        } else {
-          coresToCreate.put(coreName, sreq);
-        }
-      }
-
-      if(!isLegacyCloud) {
-        // wait for all replica entries to be created
-        Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
-        for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
-          ShardRequest sreq = e.getValue();
-          sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
-          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
-        }
-      }
-
-      ocmh.processResponses(results, shardHandler, false, null, async, requestMap, Collections.emptySet());
-      if(results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0) {
-        // Let's cleanup as we hit an exception
-        // We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
-        // element, which may be interpreted by the user as a positive ack
-        ocmh.cleanupCollection(collectionName, new NamedList());
-        log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
-      } else {
-        log.debug("Finished create command on all shards for collection: {}", collectionName);
-
-        // Emit a warning about production use of data driven functionality
-        boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
-            message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-        if (defaultConfigSetUsed) {
-          results.add("warning", "Using _default configset. Data driven schema functionality"
-              + " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
-              + " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
-        }
-      }
-
-      // modify the `withCollection` and store this new collection's name with it
-      if (withCollection != null) {
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, MODIFYCOLLECTION.toString(),
-            ZkStateReader.COLLECTION_PROP, withCollection,
-            CollectionAdminParams.COLOCATED_WITH, collectionName);
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        try {
-          zkStateReader.waitForState(withCollection, 5, TimeUnit.SECONDS, (liveNodes, collectionState) -> collectionName.equals(collectionState.getStr(COLOCATED_WITH)));
-        } catch (TimeoutException e) {
-          log.warn("Timed out waiting to see the " + COLOCATED_WITH + " property set on collection: " + withCollection);
-          // maybe the overseer queue is backed up, we don't want to fail the create request
-          // because of this time out, continue
-        }
-      }
-
-    } catch (SolrException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
-    } finally {
-      if (sessionWrapper.get() != null) sessionWrapper.get().release();
-    }
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                            DocCollection docCollection,
-                                                            ZkNodeProps message,
-                                                            List<String> shardNames,
-                                                            AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException, Assign.AssignmentException {
-    final String collectionName = message.getStr(NAME);
-    // look at the replication factor and see if it matches reality
-    // if it does not, find best nodes to create more cores
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
-    int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
-
-    int numSlices = shardNames.size();
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
-    if (maxShardsPerNode == -1) maxShardsPerNode = Integer.MAX_VALUE;
-
-    // we need to look at every node and see how many cores it serves
-    // add our new cores to existing nodes serving the least number of cores
-    // but (for now) require that each core goes on a distinct node.
-
-    List<ReplicaPosition> replicaPositions;
-    List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM);
-    if (nodeList.isEmpty()) {
-      log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
-
-      replicaPositions = new ArrayList<>();
-    } else {
-      int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
-      if (totalNumReplicas > nodeList.size()) {
-        log.warn("Specified number of replicas of "
-            + totalNumReplicas
-            + " on collection "
-            + collectionName
-            + " is higher than the number of Solr instances currently live or live and part of your " + OverseerCollectionMessageHandler.CREATE_NODE_SET + "("
-            + nodeList.size()
-            + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
-      }
-
-      int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
-          Integer.MAX_VALUE :
-          maxShardsPerNode * nodeList.size();
-      int requestedShardsToCreate = numSlices * totalNumReplicas;
-      if (maxShardsAllowedToCreate < requestedShardsToCreate) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
-            + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
-            + ", and the number of nodes currently live or live and part of your "+OverseerCollectionMessageHandler.CREATE_NODE_SET+" is " + nodeList.size()
-            + ". This allows a maximum of " + maxShardsAllowedToCreate
-            + " to be created. Value of " + OverseerCollectionMessageHandler.NUM_SLICES + " is " + numSlices
-            + ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
-            + ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
-            + " and value of " + PULL_REPLICAS + " is " + numPullReplicas
-            + ". This requires " + requestedShardsToCreate
-            + " shards to be created (higher than the allowed number)");
-      }
-      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
-          .forCollection(collectionName)
-          .forShard(shardNames)
-          .assignNrtReplicas(numNrtReplicas)
-          .assignTlogReplicas(numTlogReplicas)
-          .assignPullReplicas(numPullReplicas)
-          .onNodes(nodeList)
-          .build();
-      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(cloudManager);
-      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, docCollection);
-      replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    }
-    return replicaPositions;
-  }
-
-  public static void checkReplicaTypes(ZkNodeProps message) {
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas > 0 ? 0 : 1));
-
-    if (numNrtReplicas + numTlogReplicas <= 0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
-    }
-  }
-
-  public static List<String> populateShardNames(ZkNodeProps message, String router) {
-    List<String> shardNames = new ArrayList<>();
-    Integer numSlices = message.getInt(OverseerCollectionMessageHandler.NUM_SLICES, null);
-    if (ImplicitDocRouter.NAME.equals(router)) {
-      ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
-      numSlices = shardNames.size();
-    } else {
-      if (numSlices == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " is a required param (when using CompositeId router).");
-      }
-      if (numSlices <= 0) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " must be > 0");
-      }
-      ClusterStateMutator.getShardNames(numSlices, shardNames);
-    }
-    return shardNames;
-  }
-
-  String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
-    String configName = message.getStr(COLL_CONF);
-
-    if (configName == null) {
-      // if there is only one conf, use that
-      List<String> configNames = null;
-      try {
-        configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
-        if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-          if (CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
-            return coll;
-          } else {
-            String intendedConfigSetName = ConfigSetsHandlerApi.getSuffixedNameForAutoGeneratedConfigSet(coll);
-            copyDefaultConfigSetTo(configNames, intendedConfigSetName);
-            return intendedConfigSetName;
-          }
-        } else if (configNames != null && configNames.size() == 1) {
-          configName = configNames.get(0);
-          // no config set named, but there is only 1 - use it
-          log.info("Only one config set found in zk - using it:" + configName);
-        }
-      } catch (KeeperException.NoNodeException e) {
-
-      }
-    }
-    return "".equals(configName)? null: configName;
-  }
-
-  /**
-   * Copies the _default configset to the specified configset name (overwrites if pre-existing)
-   */
-  private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
-    ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
-
-    // if a configset named collection exists, re-use it
-    if (configNames.contains(targetConfig)) {
-      log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
-          ", re-using it.");
-      return;
-    }
-    // Copy _default into targetConfig
-    try {
-      configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
-    }
-  }
-
-  public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
-    log.debug("Check for collection zkNode:" + collection);
-    String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
-    // clean up old terms node
-    String termsPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/terms";
-    try {
-      stateManager.removeRecursively(termsPath, true, true);
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error deleting old term nodes for collection from Zookeeper", e);
-    } catch (KeeperException | IOException | NotEmptyException | BadVersionException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error deleting old term nodes for collection from Zookeeper", e);
-    }
-    try {
-      if (!stateManager.hasData(collectionPath)) {
-        log.debug("Creating collection in ZooKeeper:" + collection);
-
-        try {
-          Map<String,Object> collectionProps = new HashMap<>();
-
-          if (params.size() > 0) {
-            collectionProps.putAll(params);
-            // if the config name wasn't passed in, use the default
-            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
-              // users can create the collection node and conf link ahead of time, or this may return another option
-              getConfName(stateManager, collection, collectionPath, collectionProps);
-            }
-
-          } else if (System.getProperty("bootstrap_confdir") != null) {
-            String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
-
-            // if we are bootstrapping a collection, default the config for
-            // a new collection to the collection we are bootstrapping
-            log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
-
-            Properties sysProps = System.getProperties();
-            for (String sprop : System.getProperties().stringPropertyNames()) {
-              if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-                collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
-              }
-            }
-
-            // if the config name wasn't passed in, use the default
-            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
-              collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
-
-          } else if (Boolean.getBoolean("bootstrap_conf")) {
-            // the conf name should should be the collection name of this core
-            collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
-          } else {
-            getConfName(stateManager, collection, collectionPath, collectionProps);
-          }
-
-          collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP);  // we don't put numShards in the collections properties
-
-          ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
-          stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
-
-        } catch (KeeperException e) {
-          //TODO shouldn't the stateManager ensure this does not happen; should throw AlreadyExistsException
-          // it's okay if the node already exists
-          if (e.code() != KeeperException.Code.NODEEXISTS) {
-            throw e;
-          }
-        } catch (AlreadyExistsException e) {
-          // it's okay if the node already exists
-        }
-      } else {
-        log.debug("Collection zkNode exists");
-      }
-
-    } catch (KeeperException e) {
-      // it's okay if another beats us creating the node
-      if (e.code() == KeeperException.Code.NODEEXISTS) {
-        return;
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    }
-
-  }
-
-  private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
-      KeeperException, InterruptedException {
-    // check for configName
-    log.debug("Looking for collection configName");
-    if (collectionProps.containsKey("configName")) {
-      log.info("configName was passed as a param {}", collectionProps.get("configName"));
-      return;
-    }
-
-    List<String> configNames = null;
-    int retry = 1;
-    int retryLimt = 6;
-    for (; retry < retryLimt; retry++) {
-      if (stateManager.hasData(collectionPath)) {
-        VersionedData data = stateManager.getData(collectionPath);
-        ZkNodeProps cProps = ZkNodeProps.load(data.getData());
-        if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
-          break;
-        }
-      }
-
-      try {
-        configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
-      } catch (NoSuchElementException | NoNodeException e) {
-        // just keep trying
-      }
-
-      // check if there's a config set with the same name as the collection
-      if (configNames != null && configNames.contains(collection)) {
-        log.info(
-            "Could not find explicit collection configName, but found config name matching collection name - using that set.");
-        collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
-        break;
-      }
-      // if _default exists, use that
-      if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-        log.info(
-            "Could not find explicit collection configName, but found _default config set - using that set.");
-        collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-        break;
-      }
-      // if there is only one conf, use that
-      if (configNames != null && configNames.size() == 1) {
-        // no config set named, but there is only 1 - use it
-        log.info("Only one config set found in zk - using it:" + configNames.get(0));
-        collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
-        break;
-      }
-
-      log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
-      Thread.sleep(3000);
-    }
-    if (retry == retryLimt) {
-      log.error("Could not find configName for collection " + collection);
-      throw new ZooKeeperException(
-          SolrException.ErrorCode.SERVER_ERROR,
-          "Could not find configName for collection " + collection + " found:" + configNames);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
deleted file mode 100644
index e7f35f1..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class CreateShardCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-
-    log.info("Create shard invoked: {}", message);
-    if (collectionName == null || sliceName == null)
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
-    int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
-
-    if (numNrtReplicas + numTlogReplicas <= 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
-    }
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-    // wait for a while until we see the shard
-    ocmh.waitForNewShard(collectionName, sliceName);
-    String async = message.getStr(ASYNC);
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, collectionName,
-        SHARD_ID_PROP, sliceName,
-        ZkStateReader.NRT_REPLICAS, String.valueOf(numNrtReplicas),
-        ZkStateReader.TLOG_REPLICAS, String.valueOf(numTlogReplicas),
-        ZkStateReader.PULL_REPLICAS, String.valueOf(numPullReplicas),
-        OverseerCollectionMessageHandler.CREATE_NODE_SET, message.getStr(OverseerCollectionMessageHandler.CREATE_NODE_SET),
-        CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-
-    Map<String, Object> propertyParams = new HashMap<>();
-    ocmh.addPropertyParams(message, propertyParams);
-    addReplicasProps = addReplicasProps.plus(propertyParams);
-    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-    final NamedList addResult = new NamedList();
-    try {
-      ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
-        Object addResultFailure = addResult.get("failure");
-        if (addResultFailure != null) {
-          SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-          if (failure == null) {
-            failure = new SimpleOrderedMap();
-            results.add("failure", failure);
-          }
-          failure.addAll((NamedList) addResultFailure);
-        } else {
-          SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-          if (success == null) {
-            success = new SimpleOrderedMap();
-            results.add("success", success);
-          }
-          success.addAll((NamedList) addResult.get("success"));
-        }
-      });
-    } catch (Assign.AssignmentException e) {
-      // clean up the slice that we created
-      ZkNodeProps deleteShard = new ZkNodeProps(COLLECTION_PROP, collectionName, SHARD_ID_PROP, sliceName, ASYNC, async);
-      new DeleteShardCmd(ocmh).call(clusterState, deleteShard, results);
-      throw e;
-    }
-
-    log.info("Finished create command on all shards for collection: " + collectionName);
-  }
-
-}


[26/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java b/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java
deleted file mode 100644
index 93b653c..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.solr.common.SolrException;
-
-public class SolrCoreInitializationException extends SolrException {
-
-  public SolrCoreInitializationException(ErrorCode code, String msg) {
-    super(code, msg);
-  }
-  
-  public SolrCoreInitializationException(String coreName, Exception loadException) {
-    super(ErrorCode.SERVER_ERROR, "SolrCore '" + coreName +
-        "' is not available due to init failure: " +
-        loadException.getMessage(), loadException);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrCores.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCores.java b/solr/core/src/java/org/apache/solr/core/SolrCores.java
deleted file mode 100644
index 546686e..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrCores.java
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import com.google.common.collect.Lists;
-import org.apache.http.annotation.Experimental;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Observable;
-import java.util.Observer;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-
-class SolrCores implements Observer {
-
-  private static Object modifyLock = new Object(); // for locking around manipulating any of the core maps.
-  private final Map<String, SolrCore> cores = new LinkedHashMap<>(); // For "permanent" cores
-
-  // These descriptors, once loaded, will _not_ be unloaded, i.e. they are not "transient".
-  private final Map<String, CoreDescriptor> residentDesciptors = new LinkedHashMap<>();
-
-  private final CoreContainer container;
-  
-  private Set<String> currentlyLoadingCores = Collections.newSetFromMap(new ConcurrentHashMap<String,Boolean>());
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  // This map will hold objects that are being currently operated on. The core (value) may be null in the case of
-  // initial load. The rule is, never to any operation on a core that is currently being operated upon.
-  private static final Set<String> pendingCoreOps = new HashSet<>();
-
-  // Due to the fact that closes happen potentially whenever anything is _added_ to the transient core list, we need
-  // to essentially queue them up to be handled via pendingCoreOps.
-  private static final List<SolrCore> pendingCloses = new ArrayList<>();
-
-  private TransientSolrCoreCacheFactory transientCoreCache;
-
-  private TransientSolrCoreCache transientSolrCoreCache = null;
-  
-  SolrCores(CoreContainer container) {
-    this.container = container;
-  }
-  
-  protected void addCoreDescriptor(CoreDescriptor p) {
-    synchronized (modifyLock) {
-      if (p.isTransient()) {
-        if (getTransientCacheHandler() != null) {
-          getTransientCacheHandler().addTransientDescriptor(p.getName(), p);
-        } else {
-          log.warn("We encountered a core marked as transient, but there is no transient handler defined. This core will be inaccessible");
-        }
-      } else {
-        residentDesciptors.put(p.getName(), p);
-      }
-    }
-  }
-
-  protected void removeCoreDescriptor(CoreDescriptor p) {
-    synchronized (modifyLock) {
-      if (p.isTransient()) {
-        if (getTransientCacheHandler() != null) {
-          getTransientCacheHandler().removeTransientDescriptor(p.getName());
-        }
-      } else {
-        residentDesciptors.remove(p.getName());
-      }
-    }
-  }
-
-  public void load(SolrResourceLoader loader) {
-    transientCoreCache = TransientSolrCoreCacheFactory.newInstance(loader, container);
-  }
-  // We are shutting down. You can't hold the lock on the various lists of cores while they shut down, so we need to
-  // make a temporary copy of the names and shut them down outside the lock.
-  protected void close() {
-    waitForLoadingCoresToFinish(30*1000);
-    Collection<SolrCore> coreList = new ArrayList<>();
-
-    
-    TransientSolrCoreCache transientSolrCoreCache = getTransientCacheHandler();
-    // Release observer
-    if (transientSolrCoreCache != null) {
-      transientSolrCoreCache.close();
-    }
-
-    // It might be possible for one of the cores to move from one list to another while we're closing them. So
-    // loop through the lists until they're all empty. In particular, the core could have moved from the transient
-    // list to the pendingCloses list.
-    do {
-      coreList.clear();
-      synchronized (modifyLock) {
-        // make a copy of the cores then clear the map so the core isn't handed out to a request again
-        coreList.addAll(cores.values());
-        cores.clear();
-        if (transientSolrCoreCache != null) {
-          coreList.addAll(transientSolrCoreCache.prepareForShutdown());
-        }
-
-        coreList.addAll(pendingCloses);
-        pendingCloses.clear();
-      }
-      
-      ExecutorService coreCloseExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(Integer.MAX_VALUE,
-          new DefaultSolrThreadFactory("coreCloseExecutor"));
-      try {
-        for (SolrCore core : coreList) {
-          coreCloseExecutor.submit(() -> {
-            MDCLoggingContext.setCore(core);
-            try {
-              core.close();
-            } catch (Throwable e) {
-              SolrException.log(log, "Error shutting down core", e);
-              if (e instanceof Error) {
-                throw (Error) e;
-              }
-            } finally {
-              MDCLoggingContext.clear();
-            }
-            return core;
-          });
-        }
-      } finally {
-        ExecutorUtil.shutdownAndAwaitTermination(coreCloseExecutor);
-      }
-
-    } while (coreList.size() > 0);
-  }
-  
-  // Returns the old core if there was a core of the same name.
-  //WARNING! This should be the _only_ place you put anything into the list of transient cores!
-  protected SolrCore putCore(CoreDescriptor cd, SolrCore core) {
-    synchronized (modifyLock) {
-      if (cd.isTransient()) {
-        if (getTransientCacheHandler() != null) {
-          return getTransientCacheHandler().addCore(cd.getName(), core);
-        }
-      } else {
-        return cores.put(cd.getName(), core);
-      }
-    }
-    return null;
-  }
-
-  /**
-   *
-   * @return A list of "permanent" cores, i.e. cores that  may not be swapped out and are currently loaded.
-   * 
-   * A core may be non-transient but still lazily loaded. If it is "permanent" and lazy-load _and_
-   * not yet loaded it will _not_ be returned by this call.
-   * 
-   * Note: This is one of the places where SolrCloud is incompatible with Transient Cores. This call is used in 
-   * cancelRecoveries, transient cores don't participate.
-   */
-
-  List<SolrCore> getCores() {
-    List<SolrCore> lst = new ArrayList<>();
-
-    synchronized (modifyLock) {
-      lst.addAll(cores.values());
-      return lst;
-    }
-  }
-
-  /**
-   * Gets the cores that are currently loaded, i.e. cores that have
-   * 1> loadOnStartup=true and are either not-transient or, if transient, have been loaded and have not been aged out
-   * 2> loadOnStartup=false and have been loaded but either non-transient or have not been aged out.
-   * 
-   * Put another way, this will not return any names of cores that are lazily loaded but have not been called for yet
-   * or are transient and either not loaded or have been swapped out.
-   * 
-   * @return List of currently loaded cores.
-   */
-  Set<String> getLoadedCoreNames() {
-    Set<String> set = new TreeSet<>();
-
-    synchronized (modifyLock) {
-      set.addAll(cores.keySet());
-      if (getTransientCacheHandler() != null) {
-        set.addAll(getTransientCacheHandler().getLoadedCoreNames());
-      }
-    }
-    return set;
-  }
-
-  /** This method is currently experimental.
-   *
-   * @return a Collection of the names that a specific core object is mapped to, there are more than one.
-   */
-  @Experimental
-  List<String> getNamesForCore(SolrCore core) {
-    List<String> lst = new ArrayList<>();
-
-    synchronized (modifyLock) {
-      for (Map.Entry<String, SolrCore> entry : cores.entrySet()) {
-        if (core == entry.getValue()) {
-          lst.add(entry.getKey());
-        }
-      }
-      if (getTransientCacheHandler() != null) {
-        lst.addAll(getTransientCacheHandler().getNamesForCore(core));
-      }
-    }
-    return lst;
-  }
-
-  /**
-   * Gets a list of all cores, loaded and unloaded 
-   *
-   * @return all cores names, whether loaded or unloaded, transient or permanent.
-   */
-  public Collection<String> getAllCoreNames() {
-    Set<String> set = new TreeSet<>();
-    synchronized (modifyLock) {
-      set.addAll(cores.keySet());
-      if (getTransientCacheHandler() != null) {
-        set.addAll(getTransientCacheHandler().getAllCoreNames());
-      }
-      set.addAll(residentDesciptors.keySet());
-    }
-    return set;
-  }
-
-  SolrCore getCore(String name) {
-
-    synchronized (modifyLock) {
-      return cores.get(name);
-    }
-  }
-
-  protected void swap(String n0, String n1) {
-
-    synchronized (modifyLock) {
-      SolrCore c0 = cores.get(n0);
-      SolrCore c1 = cores.get(n1);
-      if (c0 == null) { // Might be an unloaded transient core
-        c0 = container.getCore(n0);
-        if (c0 == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n0);
-        }
-      }
-      if (c1 == null) { // Might be an unloaded transient core
-        c1 = container.getCore(n1);
-        if (c1 == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n1);
-        }
-      }
-      // When we swap the cores, we also need to swap the associated core descriptors. Note, this changes the 
-      // name of the coreDescriptor by virtue of the c-tor
-      CoreDescriptor cd1 = c1.getCoreDescriptor(); 
-      addCoreDescriptor(new CoreDescriptor(n1, c0.getCoreDescriptor()));
-      addCoreDescriptor(new CoreDescriptor(n0, cd1));
-      cores.put(n0, c1);
-      cores.put(n1, c0);
-      c0.setName(n1);
-      c1.setName(n0);
-      
-      container.getMetricManager().swapRegistries(
-          c0.getCoreMetricManager().getRegistryName(),
-          c1.getCoreMetricManager().getRegistryName());
-    }
-
-  }
-
-  protected SolrCore remove(String name) {
-
-    synchronized (modifyLock) {
-      SolrCore ret = cores.remove(name);
-      // It could have been a newly-created core. It could have been a transient core. The newly-created cores
-      // in particular should be checked. It could have been a dynamic core.
-      TransientSolrCoreCache transientHandler = getTransientCacheHandler();
-      if (ret == null && transientHandler != null) {
-        ret = transientHandler.removeCore(name);
-      }
-      return ret;
-    }
-  }
-
-  /* If you don't increment the reference count, someone could close the core before you use it. */
-  SolrCore  getCoreFromAnyList(String name, boolean incRefCount) {
-    synchronized (modifyLock) {
-      SolrCore core = cores.get(name);
-
-      if (core == null && getTransientCacheHandler() != null) {
-        core = getTransientCacheHandler().getCore(name);
-      }
-
-      if (core != null && incRefCount) {
-        core.open();
-      }
-
-      return core;
-    }
-  }
-
-  // See SOLR-5366 for why the UNLOAD command needs to know whether a core is actually loaded or not, it might have
-  // to close the core. However, there's a race condition. If the core happens to be in the pending "to close" queue,
-  // we should NOT close it in unload core.
-  protected boolean isLoadedNotPendingClose(String name) {
-    // Just all be synchronized
-    synchronized (modifyLock) {
-      if (cores.containsKey(name)) {
-        return true;
-      }
-      if (getTransientCacheHandler() != null && getTransientCacheHandler().containsCore(name)) {
-        // Check pending
-        for (SolrCore core : pendingCloses) {
-          if (core.getName().equals(name)) {
-            return false;
-          }
-        }
-
-        return true;
-      }
-    }
-    return false;
-  }
-
-  protected boolean isLoaded(String name) {
-    synchronized (modifyLock) {
-      if (cores.containsKey(name)) {
-        return true;
-      }
-      if (getTransientCacheHandler() != null && getTransientCacheHandler().containsCore(name)) {
-        return true;
-      }
-    }
-    return false;
-
-  }
-
-  protected CoreDescriptor getUnloadedCoreDescriptor(String cname) {
-    synchronized (modifyLock) {
-      CoreDescriptor desc = residentDesciptors.get(cname);
-      if (desc == null) {
-        if (getTransientCacheHandler() == null) return null;
-        desc = getTransientCacheHandler().getTransientDescriptor(cname);
-        if (desc == null) {
-          return null;
-        }
-      }
-      return new CoreDescriptor(cname, desc);
-    }
-  }
-
-  // Wait here until any pending operations (load, unload or reload) are completed on this core.
-  protected SolrCore waitAddPendingCoreOps(String name) {
-
-    // Keep multiple threads from operating on a core at one time.
-    synchronized (modifyLock) {
-      boolean pending;
-      do { // Are we currently doing anything to this core? Loading, unloading, reloading?
-        pending = pendingCoreOps.contains(name); // wait for the core to be done being operated upon
-        if (! pending) { // Linear list, but shouldn't be too long
-          for (SolrCore core : pendingCloses) {
-            if (core.getName().equals(name)) {
-              pending = true;
-              break;
-            }
-          }
-        }
-        if (container.isShutDown()) return null; // Just stop already.
-
-        if (pending) {
-          try {
-            modifyLock.wait();
-          } catch (InterruptedException e) {
-            return null; // Seems best not to do anything at all if the thread is interrupted
-          }
-        }
-      } while (pending);
-      // We _really_ need to do this within the synchronized block!
-      if (! container.isShutDown()) {
-        if (! pendingCoreOps.add(name)) {
-          log.warn("Replaced an entry in pendingCoreOps {}, we should not be doing this", name);
-        }
-        return getCoreFromAnyList(name, false); // we might have been _unloading_ the core, so return the core if it was loaded.
-      }
-    }
-    return null;
-  }
-
-  // We should always be removing the first thing in the list with our name! The idea here is to NOT do anything n
-  // any core while some other operation is working on that core.
-  protected void removeFromPendingOps(String name) {
-    synchronized (modifyLock) {
-      if (! pendingCoreOps.remove(name)) {
-        log.warn("Tried to remove core {} from pendingCoreOps and it wasn't there. ", name);
-      }
-      modifyLock.notifyAll();
-    }
-  }
-
-  protected Object getModifyLock() {
-    return modifyLock;
-  }
-
-  // Be a little careful. We don't want to either open or close a core unless it's _not_ being opened or closed by
-  // another thread. So within this lock we'll walk along the list of pending closes until we find something NOT in
-  // the list of threads currently being loaded or reloaded. The "usual" case will probably return the very first
-  // one anyway..
-  protected SolrCore getCoreToClose() {
-    synchronized (modifyLock) {
-      for (SolrCore core : pendingCloses) {
-        if (! pendingCoreOps.contains(core.getName())) {
-          pendingCoreOps.add(core.getName());
-          pendingCloses.remove(core);
-          return core;
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Return the CoreDescriptor corresponding to a given core name.
-   * Blocks if the SolrCore is still loading until it is ready.
-   * @param coreName the name of the core
-   * @return the CoreDescriptor
-   */
-  public CoreDescriptor getCoreDescriptor(String coreName) {
-    synchronized (modifyLock) {
-      if (residentDesciptors.containsKey(coreName))
-        return residentDesciptors.get(coreName);
-      return getTransientCacheHandler().getTransientDescriptor(coreName);
-    }
-  }
-
-  /**
-   * Get the CoreDescriptors for every SolrCore managed here
-   * @return a List of CoreDescriptors
-   */
-  public List<CoreDescriptor> getCoreDescriptors() {
-    List<CoreDescriptor> cds = Lists.newArrayList();
-    synchronized (modifyLock) {
-      for (String coreName : getAllCoreNames()) {
-        // TODO: This null check is a bit suspicious - it seems that
-        // getAllCoreNames might return deleted cores as well?
-        CoreDescriptor cd = getCoreDescriptor(coreName);
-        if (cd != null)
-          cds.add(cd);
-      }
-    }
-    return cds;
-  }
-
-  // cores marked as loading will block on getCore
-  public void markCoreAsLoading(CoreDescriptor cd) {
-    synchronized (modifyLock) {
-      currentlyLoadingCores.add(cd.getName());
-    }
-  }
-
-  //cores marked as loading will block on getCore
-  public void markCoreAsNotLoading(CoreDescriptor cd) {
-    synchronized (modifyLock) {
-      currentlyLoadingCores.remove(cd.getName());
-    }
-  }
-
-  // returns when no cores are marked as loading
-  public void waitForLoadingCoresToFinish(long timeoutMs) {
-    long time = System.nanoTime();
-    long timeout = time + TimeUnit.NANOSECONDS.convert(timeoutMs, TimeUnit.MILLISECONDS);
-    synchronized (modifyLock) {
-      while (!currentlyLoadingCores.isEmpty()) {
-        try {
-          modifyLock.wait(500);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-        if (System.nanoTime() >= timeout) {
-          log.warn("Timed out waiting for SolrCores to finish loading.");
-          break;
-        }
-      }
-    }
-  }
-  
-  // returns when core is finished loading, throws exception if no such core loading or loaded
-  public void waitForLoadingCoreToFinish(String core, long timeoutMs) {
-    long time = System.nanoTime();
-    long timeout = time + TimeUnit.NANOSECONDS.convert(timeoutMs, TimeUnit.MILLISECONDS);
-    synchronized (modifyLock) {
-      while (isCoreLoading(core)) {
-        try {
-          modifyLock.wait(500);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-        if (System.nanoTime() >= timeout) {
-          log.warn("Timed out waiting for SolrCore, {},  to finish loading.", core);
-          break;
-        }
-      }
-    }
-  }
-
-  public boolean isCoreLoading(String name) {
-    if (currentlyLoadingCores.contains(name)) {
-      return true;
-    }
-    return false;
-  }
-
-  // Let transient cache implementation tell us when it ages out a core
-  @Override
-  public void update(Observable o, Object arg) {
-    synchronized (modifyLock) {
-      // Erick Erickson debugging TestLazyCores. With this un-commented, we get no testLazyCores failures.
-//      SolrCore core = (SolrCore) arg;
-//      SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
-//      CommitUpdateCommand cmd = new CommitUpdateCommand(req, false);
-//      cmd.openSearcher = false;
-//      cmd.waitSearcher = false;
-//      try {
-//        core.getUpdateHandler().commit(cmd);
-//      } catch (IOException e) {
-//        log.warn("Caught exception trying to close a transient core, ignoring as it should be benign");
-//      }
-      pendingCloses.add((SolrCore) arg); // Essentially just queue this core up for closing.
-      modifyLock.notifyAll(); // Wakes up closer thread too
-    }
-  }
-
-  public TransientSolrCoreCache getTransientCacheHandler() {
-
-    if (transientCoreCache == null) {
-      log.error("No transient handler has been defined. Check solr.xml to see if an attempt to provide a custom " +
-          "TransientSolrCoreCacheFactory was done incorrectly since the default should have been used otherwise.");
-      return null;
-    }
-    return transientCoreCache.getTransientSolrCoreCache();
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java b/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
deleted file mode 100644
index 34482cd..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.List;
-
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.util.DateMathParser;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Standard Solr deletion policy that allows reserving index commit points
- * for certain amounts of time to support features such as index replication
- * or snapshooting directly out of a live index directory.
- *
- *
- * @see org.apache.lucene.index.IndexDeletionPolicy
- */
-public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedListInitializedPlugin {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private String maxCommitAge = null;
-  private int maxCommitsToKeep = 1;
-  private int maxOptimizedCommitsToKeep = 0;
-
-  @Override
-  public void init(NamedList args) {
-    String keepOptimizedOnlyString = (String) args.get("keepOptimizedOnly");
-    String maxCommitsToKeepString = (String) args.get("maxCommitsToKeep");
-    String maxOptimizedCommitsToKeepString = (String) args.get("maxOptimizedCommitsToKeep");
-    String maxCommitAgeString = (String) args.get("maxCommitAge");
-
-    if (maxCommitsToKeepString != null && maxCommitsToKeepString.trim().length() > 0)
-      maxCommitsToKeep = Integer.parseInt(maxCommitsToKeepString);
-    if (maxCommitAgeString != null && maxCommitAgeString.trim().length() > 0)
-      maxCommitAge = "-" + maxCommitAgeString;
-    if (maxOptimizedCommitsToKeepString != null && maxOptimizedCommitsToKeepString.trim().length() > 0) {
-      maxOptimizedCommitsToKeep = Integer.parseInt(maxOptimizedCommitsToKeepString);
-    }
-    
-    // legacy support
-    if (keepOptimizedOnlyString != null && keepOptimizedOnlyString.trim().length() > 0) {
-      boolean keepOptimizedOnly = Boolean.parseBoolean(keepOptimizedOnlyString);
-      if (keepOptimizedOnly) {
-        maxOptimizedCommitsToKeep = Math.max(maxOptimizedCommitsToKeep, maxCommitsToKeep);
-        maxCommitsToKeep=0;
-      }
-    }
-  }
-
-  /**
-   * Internal use for Lucene... do not explicitly call.
-   */
-  @Override
-  public void onInit(List<? extends IndexCommit> commits) throws IOException {
-    if (commits.isEmpty()) {
-      return;
-    }
-    log.debug("SolrDeletionPolicy.onInit: commits: {}", new CommitsLoggingDebug(commits));
-    updateCommits(commits);
-  }
-
-  /**
-   * Internal use for Lucene... do not explicitly call.
-   */
-  @Override
-  public void onCommit(List<? extends IndexCommit> commits) throws IOException {
-    log.debug("SolrDeletionPolicy.onCommit: commits: {}", new CommitsLoggingDebug(commits));
-    updateCommits(commits);
-  }
-
-  private static class CommitsLoggingInfo {
-    private List<? extends IndexCommit> commits;
-
-    public CommitsLoggingInfo(List<? extends IndexCommit> commits) {
-      this.commits = commits;
-    }
-
-    public final String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("num=").append(commits.size());
-      for (IndexCommit c : commits) {
-        sb.append("\n\tcommit{");
-        appendDetails(sb, c);
-        sb.append("}");
-      }
-      // add an end brace
-      return sb.toString();
-    }
-
-    protected void appendDetails(StringBuilder sb, IndexCommit c) {
-      Directory dir = c.getDirectory();
-      if (dir instanceof FSDirectory) {
-        FSDirectory fsd = (FSDirectory) dir;
-        sb.append("dir=").append(fsd.getDirectory());
-      } else {
-        sb.append("dir=").append(dir);
-      }
-      sb.append(",segFN=").append(c.getSegmentsFileName());
-      sb.append(",generation=").append(c.getGeneration());
-    }
-  }
-
-  private static class CommitsLoggingDebug extends CommitsLoggingInfo {
-    public CommitsLoggingDebug(List<? extends IndexCommit> commits) {
-      super(commits);
-    }
-
-    protected void appendDetails(StringBuilder sb, IndexCommit c) {
-      super.appendDetails(sb, c);
-      try {
-        sb.append(",filenames=");
-        sb.append(c.getFileNames());
-      } catch (IOException e) {
-        sb.append(e);
-      }
-    }
-  }
-
-  private void updateCommits(List<? extends IndexCommit> commits) {
-    // to be safe, we should only call delete on a commit point passed to us
-    // in this specific call (may be across diff IndexWriter instances).
-    // this will happen rarely, so just synchronize everything
-    // for safety and to avoid race conditions
-
-    synchronized (this) {
-      long maxCommitAgeTimeStamp = -1L;
-      IndexCommit newest = commits.get(commits.size() - 1);
-      log.debug("newest commit generation = " + newest.getGeneration());
-      int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
-      int totalKept = 1;
-
-      // work our way from newest to oldest, skipping the first since we always want to keep it.
-      for (int i=commits.size()-2; i>=0; i--) {
-        IndexCommit commit = commits.get(i);
-
-        // delete anything too old, regardless of other policies
-        try {
-          if (maxCommitAge != null) {
-            if (maxCommitAgeTimeStamp==-1) {
-              DateMathParser dmp = new DateMathParser(DateMathParser.UTC);
-              maxCommitAgeTimeStamp = dmp.parseMath(maxCommitAge).getTime();
-            }
-            if (IndexDeletionPolicyWrapper.getCommitTimestamp(commit) < maxCommitAgeTimeStamp) {
-              commit.delete();
-              continue;
-            }
-          }
-        } catch (Exception e) {
-          log.warn("Exception while checking commit point's age for deletion", e);
-        }
-
-        if (singleSegKept < maxOptimizedCommitsToKeep && commit.getSegmentCount() == 1) {
-          totalKept++;
-          singleSegKept++;
-          continue;
-        }
-
-        if (totalKept < maxCommitsToKeep) {
-          totalKept++;
-          continue;
-        }
-                                                  
-        commit.delete();
-      }
-
-    } // end synchronized
-  }
-
-  private String getId(IndexCommit commit) {
-    StringBuilder sb = new StringBuilder();
-    Directory dir = commit.getDirectory();
-
-    // For anything persistent, make something that will
-    // be the same, regardless of the Directory instance.
-    if (dir instanceof FSDirectory) {
-      FSDirectory fsd = (FSDirectory) dir;
-      File fdir = fsd.getDirectory().toFile();
-      sb.append(fdir.getPath());
-    } else {
-      sb.append(dir);
-    }
-
-    sb.append('/');
-    sb.append(commit.getGeneration());
-    return sb.toString();
-  }
-
-  public String getMaxCommitAge() {
-    return maxCommitAge;
-  }
-
-  public int getMaxCommitsToKeep() {
-    return maxCommitsToKeep;
-  }
-
-  public int getMaxOptimizedCommitsToKeep() {
-    return maxOptimizedCommitsToKeep;
-  }
-
-  public void setMaxCommitsToKeep(int maxCommitsToKeep) {
-    synchronized (this) {
-      this.maxCommitsToKeep = maxCommitsToKeep;
-    }
-  }
-
-  public void setMaxOptimizedCommitsToKeep(int maxOptimizedCommitsToKeep) {
-    synchronized (this) {
-      this.maxOptimizedCommitsToKeep = maxOptimizedCommitsToKeep;
-    }    
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrEventListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrEventListener.java b/solr/core/src/java/org/apache/solr/core/SolrEventListener.java
deleted file mode 100644
index ab1d485..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrEventListener.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-
-/**
- *
- */
-public interface SolrEventListener extends NamedListInitializedPlugin{
-
-  public void postCommit();
-  
-  public void postSoftCommit();
-
-  /** The searchers passed here are only guaranteed to be valid for the duration
-   * of this method call, so care should be taken not to spawn threads or asynchronous
-   * tasks with references to these searchers.
-   * <p>
-   * Implementations should add the {@link org.apache.solr.common.params.EventParams#EVENT} parameter and set it to a value of either:
-   * <ul>
-   * <li>{@link org.apache.solr.common.params.EventParams#FIRST_SEARCHER} - First Searcher event</li>
-   * <li>{@link org.apache.solr.common.params.EventParams#NEW_SEARCHER} - New Searcher event</li>
-   * </ul>
-   *
-   * Sample:
-   * <pre>
-    if (currentSearcher != null) {
-      nlst.add(CommonParams.EVENT, CommonParams.NEW_SEARCHER);
-    } else {
-      nlst.add(CommonParams.EVENT, CommonParams.FIRST_SEARCHER);
-    }
-   *
-   * </pre>
-   *
-   * @see org.apache.solr.core.AbstractSolrEventListener#addEventParms(org.apache.solr.search.SolrIndexSearcher, org.apache.solr.common.util.NamedList) 
-   *
-   * @param newSearcher The new {@link org.apache.solr.search.SolrIndexSearcher} to use
-   * @param currentSearcher The existing {@link org.apache.solr.search.SolrIndexSearcher}.  null if this is a firstSearcher event.
-   *
-   */
-  public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher);
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java b/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java
deleted file mode 100644
index 38ffc99..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrInfoBean.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.util.Map;
-import java.util.Set;
-
-import com.codahale.metrics.MetricRegistry;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.util.stats.MetricUtils;
-
-/**
- * Interface for getting various ui friendly strings
- * for use by objects which are 'pluggable' to make server administration
- * easier.
- */
-public interface SolrInfoBean {
-
-  /**
-   * Category of Solr component.
-   */
-  enum Category { CONTAINER, ADMIN, CORE, QUERY, UPDATE, CACHE, HIGHLIGHTER, QUERYPARSER, SPELLCHECKER,
-    SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER }
-
-  /**
-   * Top-level group of beans or metrics for a subsystem.
-   */
-  enum Group { jvm, jetty, node, core, collection, shard, cluster, overseer }
-
-  /**
-   * Simple common usage name, e.g. BasicQueryHandler,
-   * or fully qualified class name.
-   */
-  String getName();
-  /** Simple one or two line description */
-  String getDescription();
-  /** Category of this component */
-  Category getCategory();
-
-  /** Optionally return a snapshot of metrics that this component reports, or null.
-   * Default implementation requires that both {@link #getMetricNames()} and
-   * {@link #getMetricRegistry()} return non-null values.
-   */
-  default Map<String, Object> getMetricsSnapshot() {
-    if (getMetricRegistry() == null || getMetricNames() == null) {
-      return null;
-    }
-    return MetricUtils.convertMetrics(getMetricRegistry(), getMetricNames());
-  }
-
-  /**
-   * Modifiable set of metric names that this component reports (default is null,
-   * which means none). If not null then this set is used by {@link #registerMetricName(String)}
-   * to capture what metrics names are reported from this component.
-   * <p><b>NOTE: this set has to allow iteration under modifications.</b></p>
-   */
-  default Set<String> getMetricNames() {
-    return null;
-  }
-
-  /**
-   * An instance of {@link MetricRegistry} that this component uses for metrics reporting
-   * (default is null, which means no registry).
-   */
-  default MetricRegistry getMetricRegistry() {
-    return null;
-  }
-
-  /** Register a metric name that this component reports. This method is called by various
-   * metric registration methods in {@link org.apache.solr.metrics.SolrMetricManager} in order
-   * to capture what metric names are reported from this component (which in turn is called
-   * from {@link org.apache.solr.metrics.SolrMetricProducer#initializeMetrics(SolrMetricManager, String, String, String)}).
-   * <p>Default implementation registers all metrics added by a component. Implementations may
-   * override this to avoid reporting some or all metrics returned by {@link #getMetricsSnapshot()}</p>
-   */
-  default void registerMetricName(String name) {
-    Set<String> names = getMetricNames();
-    if (names != null) {
-      names.add(name);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
deleted file mode 100644
index 0ff5c7b..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
+++ /dev/null
@@ -1,918 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import javax.naming.NoInitialContextException;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.lang.invoke.MethodHandles;
-import java.lang.reflect.Constructor;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.nio.charset.CharacterCodingException;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.PathMatcher;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
-import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.ResourceLoader;
-import org.apache.lucene.analysis.util.ResourceLoaderAware;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.lucene.analysis.util.TokenizerFactory;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.util.IOUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.handler.admin.CoreAdminHandler;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.QueryResponseWriter;
-import org.apache.solr.rest.RestManager;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.ManagedIndexSchemaFactory;
-import org.apache.solr.schema.SimilarityFactory;
-import org.apache.solr.search.QParserPlugin;
-import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * @since solr 1.3
- */ 
-public class SolrResourceLoader implements ResourceLoader,Closeable
-{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  static final String project = "solr";
-  static final String base = "org.apache" + "." + project;
-  static final String[] packages = {
-      "", "analysis.", "schema.", "handler.", "handler.tagger.", "search.", "update.", "core.", "response.", "request.",
-      "update.processor.", "util.", "spelling.", "handler.component.", "handler.dataimport.",
-      "spelling.suggest.", "spelling.suggest.fst.", "rest.schema.analysis.", "security.", "handler.admin.",
-      "cloud.autoscaling."
-  };
-  private static final java.lang.String SOLR_CORE_NAME = "solr.core.name";
-  private static Set<String> loggedOnce = new ConcurrentSkipListSet<>();
-
-  protected URLClassLoader classLoader;
-  private final Path instanceDir;
-  private String dataDir;
-  
-  private final List<SolrCoreAware> waitingForCore = Collections.synchronizedList(new ArrayList<SolrCoreAware>());
-  private final List<SolrInfoBean> infoMBeans = Collections.synchronizedList(new ArrayList<SolrInfoBean>());
-  private final List<ResourceLoaderAware> waitingForResources = Collections.synchronizedList(new ArrayList<ResourceLoaderAware>());
-  private static final Charset UTF_8 = StandardCharsets.UTF_8;
-
-  private final Properties coreProperties;
-
-  private volatile boolean live;
-  
-  // Provide a registry so that managed resources can register themselves while the XML configuration
-  // documents are being parsed ... after all are registered, they are asked by the RestManager to
-  // initialize themselves. This two-step process is required because not all resources are available
-  // (such as the SolrZkClient) when XML docs are being parsed.    
-  private RestManager.Registry managedResourceRegistry;
-  
-  public synchronized RestManager.Registry getManagedResourceRegistry() {
-    if (managedResourceRegistry == null) {
-      managedResourceRegistry = new RestManager.Registry();      
-    }
-    return managedResourceRegistry; 
-  }
-
-  public SolrResourceLoader() {
-    this(SolrResourceLoader.locateSolrHome(), null, null);
-  }
-
-  /**
-   * <p>
-   * This loader will delegate to the context classloader when possible,
-   * otherwise it will attempt to resolve resources using any jar files
-   * found in the "lib/" directory in the specified instance directory.
-   * If the instance directory is not specified (=null), SolrResourceLoader#locateInstanceDir will provide one.
-   */
-  public SolrResourceLoader(Path instanceDir, ClassLoader parent)
-  {
-    this(instanceDir, parent, null);
-  }
-
-  public SolrResourceLoader(Path instanceDir) {
-    this(instanceDir, null, null);
-  }
-
-  /**
-   * <p>
-   * This loader will delegate to Solr's classloader when possible,
-   * otherwise it will attempt to resolve resources using any jar files
-   * found in the "lib/" directory in the specified instance directory.
-   * </p>
-   *
-   * @param instanceDir - base directory for this resource loader, if null locateSolrHome() will be used.
-   * @see #locateSolrHome
-   */
-  public SolrResourceLoader(Path instanceDir, ClassLoader parent, Properties coreProperties) {
-    if (instanceDir == null) {
-      this.instanceDir = SolrResourceLoader.locateSolrHome().toAbsolutePath().normalize();
-      log.debug("new SolrResourceLoader for deduced Solr Home: '{}'", this.instanceDir);
-    } else{
-      this.instanceDir = instanceDir.toAbsolutePath().normalize();
-      log.debug("new SolrResourceLoader for directory: '{}'", this.instanceDir);
-    }
-
-    if (parent == null) {
-      parent = getClass().getClassLoader();
-    }
-    this.classLoader = URLClassLoader.newInstance(new URL[0], parent);
-
-    /* 
-     * Skip the lib subdirectory when we are loading from the solr home.
-     * Otherwise load it, so core lib directories still get loaded.
-     * The default sharedLib will pick this up later, and if the user has
-     * changed sharedLib, then we don't want to load that location anyway.
-     */
-    if (!this.instanceDir.equals(SolrResourceLoader.locateSolrHome())) {
-      Path libDir = this.instanceDir.resolve("lib");
-      if (Files.exists(libDir)) {
-        try {
-          addToClassLoader(getURLs(libDir));
-        } catch (IOException e) {
-          log.warn("Couldn't add files from {} to classpath: {}", libDir, e.getMessage());
-        }
-        reloadLuceneSPI();
-      }
-    }
-    this.coreProperties = coreProperties;
-  }
-
-  /**
-   * Adds URLs to the ResourceLoader's internal classloader.  This method <b>MUST</b>
-   * only be called prior to using this ResourceLoader to get any resources, otherwise
-   * its behavior will be non-deterministic. You also have to {link @reloadLuceneSPI}
-   * before using this ResourceLoader.
-   *
-   * @param urls    the URLs of files to add
-   */
-  void addToClassLoader(List<URL> urls) {
-    URLClassLoader newLoader = addURLsToClassLoader(classLoader, urls);
-    if (newLoader != classLoader) {
-      this.classLoader = newLoader;
-    }
-
-    log.info("[{}] Added {} libs to classloader, from paths: {}",
-        getCoreName("null"), urls.size(), urls.stream()
-        .map(u -> u.getPath().substring(0,u.getPath().lastIndexOf("/")))
-        .sorted()
-        .distinct()
-        .collect(Collectors.toList()));
-  }
-
-  private String getCoreName(String defaultVal) {
-    if (getCoreProperties() != null) {
-      return getCoreProperties().getProperty(SOLR_CORE_NAME, defaultVal);
-    } else {
-      return defaultVal;
-    }
-  }
-
-  /**
-   * Adds URLs to the ResourceLoader's internal classloader.  This method <b>MUST</b>
-   * only be called prior to using this ResourceLoader to get any resources, otherwise
-   * its behavior will be non-deterministic. You also have to {link @reloadLuceneSPI}
-   * before using this ResourceLoader.
-   *
-   * @param urls    the URLs of files to add
-   */
-  void addToClassLoader(URL... urls) {
-    addToClassLoader(Arrays.asList(urls));
-  }
-  
-  /**
-   * Reloads all Lucene SPI implementations using the new classloader.
-   * This method must be called after {@link #addToClassLoader(List)}
-   * and before using this ResourceLoader.
-   */
-  void reloadLuceneSPI() {
-    // Codecs:
-    PostingsFormat.reloadPostingsFormats(this.classLoader);
-    DocValuesFormat.reloadDocValuesFormats(this.classLoader);
-    Codec.reloadCodecs(this.classLoader);
-    // Analysis:
-    CharFilterFactory.reloadCharFilters(this.classLoader);
-    TokenFilterFactory.reloadTokenFilters(this.classLoader);
-    TokenizerFactory.reloadTokenizers(this.classLoader);
-  }
-
-  private static URLClassLoader addURLsToClassLoader(final URLClassLoader oldLoader, List<URL> urls) {
-    if (urls.size() == 0) {
-      return oldLoader;
-    }
-
-    List<URL> allURLs = new ArrayList<>();
-    allURLs.addAll(Arrays.asList(oldLoader.getURLs()));
-    allURLs.addAll(urls);
-    for (URL url : urls) {
-      log.debug("Adding '{}' to classloader", url.toString());
-    }
-
-    ClassLoader oldParent = oldLoader.getParent();
-    IOUtils.closeWhileHandlingException(oldLoader);
-    return URLClassLoader.newInstance(allURLs.toArray(new URL[allURLs.size()]), oldParent);
-  }
-
-  /**
-   * Utility method to get the URLs of all paths under a given directory that match a filter
-   * @param libDir the root directory
-   * @param filter the filter
-   * @return all matching URLs
-   * @throws IOException on error
-   */
-  public static List<URL> getURLs(Path libDir, DirectoryStream.Filter<Path> filter) throws IOException {
-    List<URL> urls = new ArrayList<>();
-    try (DirectoryStream<Path> directory = Files.newDirectoryStream(libDir, filter)) {
-      for (Path element : directory) {
-        urls.add(element.toUri().normalize().toURL());
-      }
-    }
-    return urls;
-  }
-
-  /**
-   * Utility method to get the URLs of all paths under a given directory
-   * @param libDir the root directory
-   * @return all subdirectories as URLs
-   * @throws IOException on error
-   */
-  public static List<URL> getURLs(Path libDir) throws IOException {
-    return getURLs(libDir, new DirectoryStream.Filter<Path>() {
-      @Override
-      public boolean accept(Path entry) throws IOException {
-        return true;
-      }
-    });
-  }
-
-  /**
-   * Utility method to get the URLs of all paths under a given directory that match a regex
-   * @param libDir the root directory
-   * @param regex the regex as a String
-   * @return all matching URLs
-   * @throws IOException on error
-   */
-  public static List<URL> getFilteredURLs(Path libDir, String regex) throws IOException {
-    final PathMatcher matcher = libDir.getFileSystem().getPathMatcher("regex:" + regex);
-    return getURLs(libDir, new DirectoryStream.Filter<Path>() {
-      @Override
-      public boolean accept(Path entry) throws IOException {
-        return matcher.matches(entry.getFileName());
-      }
-    });
-  }
-  
-  /** Ensures a directory name always ends with a '/'. */
-  public static String normalizeDir(String path) {
-    return ( path != null && (!(path.endsWith("/") || path.endsWith("\\"))) )? path + File.separator : path;
-  }
-  
-  public String[] listConfigDir() {
-    File configdir = new File(getConfigDir());
-    if( configdir.exists() && configdir.isDirectory() ) {
-      return configdir.list();
-    } else {
-      return new String[0];
-    }
-  }
-
-  public String getConfigDir() {
-    return instanceDir.resolve("conf").toString();
-  }
-  
-  public String getDataDir()    {
-    return dataDir;
-  }
-
-  public Properties getCoreProperties() {
-    return coreProperties;
-  }
-
-  /**
-   * EXPERT
-   * <p>
-   * The underlying class loader.  Most applications will not need to use this.
-   * @return The {@link ClassLoader}
-   */
-  public ClassLoader getClassLoader() {
-    return classLoader;
-  }
-
-  /** Opens a schema resource by its name.
-   * Override this method to customize loading schema resources.
-   *@return the stream for the named schema
-   */
-  public InputStream openSchema(String name) throws IOException {
-    return openResource(name);
-  }
-  
-  /** Opens a config resource by its name.
-   * Override this method to customize loading config resources.
-   *@return the stream for the named configuration
-   */
-  public InputStream openConfig(String name) throws IOException {
-    return openResource(name);
-  }
-
-  private Path checkPathIsSafe(Path pathToCheck) throws IOException {
-    if (Boolean.getBoolean("solr.allow.unsafe.resourceloading"))
-      return pathToCheck;
-    pathToCheck = pathToCheck.normalize();
-    if (pathToCheck.startsWith(instanceDir))
-      return pathToCheck;
-    throw new IOException("File " + pathToCheck + " is outside resource loader dir " + instanceDir +
-        "; set -Dsolr.allow.unsafe.resourceloading=true to allow unsafe loading");
-  }
-  
-  /** Opens any resource by its name.
-   * By default, this will look in multiple locations to load the resource:
-   * $configDir/$resource (if resource is not absolute)
-   * $CWD/$resource
-   * otherwise, it will look for it in any jar accessible through the class loader.
-   * Override this method to customize loading resources.
-   *@return the stream for the named resource
-   */
-  @Override
-  public InputStream openResource(String resource) throws IOException {
-
-    Path inConfigDir = getInstancePath().resolve("conf").resolve(resource);
-    if (Files.exists(inConfigDir) && Files.isReadable(inConfigDir)) {
-      return Files.newInputStream(checkPathIsSafe(inConfigDir));
-    }
-
-    Path inInstanceDir = getInstancePath().resolve(resource);
-    if (Files.exists(inInstanceDir) && Files.isReadable(inInstanceDir)) {
-      return Files.newInputStream(checkPathIsSafe(inInstanceDir));
-    }
-
-    // Delegate to the class loader (looking into $INSTANCE_DIR/lib jars).
-    // We need a ClassLoader-compatible (forward-slashes) path here!
-    InputStream is = classLoader.getResourceAsStream(resource.replace(File.separatorChar, '/'));
-
-    // This is a hack just for tests (it is not done in ZKResourceLoader)!
-    // TODO can we nuke this?
-    if (is == null && System.getProperty("jetty.testMode") != null) {
-      is = classLoader.getResourceAsStream(("conf/" + resource).replace(File.separatorChar, '/'));
-    }
-
-    if (is == null) {
-      throw new SolrResourceNotFoundException("Can't find resource '" + resource + "' in classpath or '" + instanceDir + "'");
-    }
-    return is;
-  }
-
-  /**
-   * Report the location of a resource found by the resource loader
-   */
-  public String resourceLocation(String resource) {
-    Path inConfigDir = getInstancePath().resolve("conf").resolve(resource);
-    if (Files.exists(inConfigDir) && Files.isReadable(inConfigDir))
-      return inConfigDir.toAbsolutePath().normalize().toString();
-
-    Path inInstanceDir = getInstancePath().resolve(resource);
-    if (Files.exists(inInstanceDir) && Files.isReadable(inInstanceDir))
-      return inInstanceDir.toAbsolutePath().normalize().toString();
-
-    try (InputStream is = classLoader.getResourceAsStream(resource.replace(File.separatorChar, '/'))) {
-      if (is != null)
-        return "classpath:" + resource;
-    } catch (IOException e) {
-      // ignore
-    }
-
-    return resource;
-  }
-
-  /**
-   * Accesses a resource by name and returns the (non comment) lines
-   * containing data.
-   *
-   * <p>
-   * A comment line is any line that starts with the character "#"
-   * </p>
-   *
-   * @return a list of non-blank non-comment lines with whitespace trimmed
-   * from front and back.
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public List<String> getLines(String resource) throws IOException {
-    return getLines(resource, UTF_8);
-  }
-
-  /**
-   * Accesses a resource by name and returns the (non comment) lines containing
-   * data using the given character encoding.
-   *
-   * <p>
-   * A comment line is any line that starts with the character "#"
-   * </p>
-   *
-   * @param resource the file to be read
-   * @return a list of non-blank non-comment lines with whitespace trimmed
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public List<String> getLines(String resource,
-      String encoding) throws IOException {
-    return getLines(resource, Charset.forName(encoding));
-  }
-
-
-  public List<String> getLines(String resource, Charset charset) throws IOException{
-    try {
-      return WordlistLoader.getLines(openResource(resource), charset);
-    } catch (CharacterCodingException ex) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-         "Error loading resource (wrong encoding?): " + resource, ex);
-    }
-  }
-
-  /*
-   * A static map of short class name to fully qualified class name 
-   */
-  private static final Map<String, String> classNameCache = new ConcurrentHashMap<>();
-
-  // Using this pattern, legacy analysis components from previous Solr versions are identified and delegated to SPI loader:
-  private static final Pattern legacyAnalysisPattern = 
-      Pattern.compile("((\\Q"+base+".analysis.\\E)|(\\Q"+project+".\\E))([\\p{L}_$][\\p{L}\\p{N}_$]+?)(TokenFilter|Filter|Tokenizer|CharFilter)Factory");
-
-  @Override
-  public <T> Class<? extends T> findClass(String cname, Class<T> expectedType) {
-    return findClass(cname, expectedType, empty);
-  }
-  
-  /**
-   * This method loads a class either with its FQN or a short-name (solr.class-simplename or class-simplename).
-   * It tries to load the class with the name that is given first and if it fails, it tries all the known
-   * solr packages. This method caches the FQN of a short-name in a static map in-order to make subsequent lookups
-   * for the same class faster. The caching is done only if the class is loaded by the webapp classloader and it
-   * is loaded using a shortname.
-   *
-   * @param cname The name or the short name of the class.
-   * @param subpackages the packages to be tried if the cname starts with solr.
-   * @return the loaded class. An exception is thrown if it fails
-   */
-  public <T> Class<? extends T> findClass(String cname, Class<T> expectedType, String... subpackages) {
-    if (subpackages == null || subpackages.length == 0 || subpackages == packages) {
-      subpackages = packages;
-      String  c = classNameCache.get(cname);
-      if(c != null) {
-        try {
-          return Class.forName(c, true, classLoader).asSubclass(expectedType);
-        } catch (ClassNotFoundException e) {
-          //this is unlikely
-          log.error("Unable to load cached class-name :  "+ c +" for shortname : "+cname + e);
-        }
-
-      }
-    }
-    
-    Class<? extends T> clazz = null;
-    try {
-      // first try legacy analysis patterns, now replaced by Lucene's Analysis package:
-      final Matcher m = legacyAnalysisPattern.matcher(cname);
-      if (m.matches()) {
-        final String name = m.group(4);
-        log.trace("Trying to load class from analysis SPI using name='{}'", name);
-        try {
-          if (CharFilterFactory.class.isAssignableFrom(expectedType)) {
-            return clazz = CharFilterFactory.lookupClass(name).asSubclass(expectedType);
-          } else if (TokenizerFactory.class.isAssignableFrom(expectedType)) {
-            return clazz = TokenizerFactory.lookupClass(name).asSubclass(expectedType);
-          } else if (TokenFilterFactory.class.isAssignableFrom(expectedType)) {
-            return clazz = TokenFilterFactory.lookupClass(name).asSubclass(expectedType);
-          } else {
-            log.warn("'{}' looks like an analysis factory, but caller requested different class type: {}", cname, expectedType.getName());
-          }
-        } catch (IllegalArgumentException ex) { 
-          // ok, we fall back to legacy loading
-        }
-      }
-      
-      // first try cname == full name
-      try {
-        return clazz = Class.forName(cname, true, classLoader).asSubclass(expectedType);
-      } catch (ClassNotFoundException e) {
-        String newName=cname;
-        if (newName.startsWith(project)) {
-          newName = cname.substring(project.length()+1);
-        }
-        for (String subpackage : subpackages) {
-          try {
-            String name = base + '.' + subpackage + newName;
-            log.trace("Trying class name " + name);
-            return clazz = Class.forName(name,true,classLoader).asSubclass(expectedType);
-          } catch (ClassNotFoundException e1) {
-            // ignore... assume first exception is best.
-          }
-        }
-    
-        throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Error loading class '" + cname + "'", e);
-      }
-      
-    } finally {
-      if (clazz != null) {
-        //cache the shortname vs FQN if it is loaded by the webapp classloader  and it is loaded
-        // using a shortname
-        if (clazz.getClassLoader() == SolrResourceLoader.class.getClassLoader() &&
-              !cname.equals(clazz.getName()) &&
-              (subpackages.length == 0 || subpackages == packages)) {
-          //store in the cache
-          classNameCache.put(cname, clazz.getName());
-        }
-        
-        // print warning if class is deprecated
-        if (clazz.isAnnotationPresent(Deprecated.class)) {
-          log.warn("Solr loaded a deprecated plugin/analysis class [{}]. Please consult documentation how to replace it accordingly.",
-              cname);
-        }
-      }
-    }
-  }
-  
-  static final String empty[] = new String[0];
-  
-  @Override
-  public <T> T newInstance(String name, Class<T> expectedType) {
-    return newInstance(name, expectedType, empty);
-  }
-
-  private static final Class[] NO_CLASSES = new Class[0];
-  private static final Object[] NO_OBJECTS = new Object[0];
-
-  public <T> T newInstance(String cname, Class<T> expectedType, String ... subpackages) {
-    return newInstance(cname, expectedType, subpackages, NO_CLASSES, NO_OBJECTS);
-  }
-
-  public CoreAdminHandler newAdminHandlerInstance(final CoreContainer coreContainer, String cname, String ... subpackages) {
-    Class<? extends CoreAdminHandler> clazz = findClass(cname, CoreAdminHandler.class, subpackages);
-    if( clazz == null ) {
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-          "Can not find class: "+cname + " in " + classLoader);
-    }
-    
-    CoreAdminHandler obj = null;
-    try {
-      Constructor<? extends CoreAdminHandler> ctor = clazz.getConstructor(CoreContainer.class);
-      obj = ctor.newInstance(coreContainer);
-    } 
-    catch (Exception e) {
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-          "Error instantiating class: '" + clazz.getName()+"'", e);
-    }
-
-    if (!live) {
-      //TODO: Does SolrCoreAware make sense here since in a multi-core context
-      // which core are we talking about ?
-      if( obj instanceof ResourceLoaderAware ) {
-        assertAwareCompatibility( ResourceLoaderAware.class, obj );
-        waitingForResources.add( (ResourceLoaderAware)obj );
-      }
-    }
-
-    return obj;
-  }
-
-
-
-  public <T> T newInstance(String cName, Class<T> expectedType, String [] subPackages, Class[] params, Object[] args){
-    Class<? extends T> clazz = findClass(cName, expectedType, subPackages);
-    if( clazz == null ) {
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-          "Can not find class: "+cName + " in " + classLoader);
-    }
-
-    T obj = null;
-    try {
-
-      Constructor<? extends T> constructor = null;
-      try {
-        constructor = clazz.getConstructor(params);
-        obj = constructor.newInstance(args);
-      } catch (NoSuchMethodException e) {
-        //look for a zero arg constructor if the constructor args do not match
-        try {
-          constructor = clazz.getConstructor();
-          obj = constructor.newInstance();
-        } catch (NoSuchMethodException e1) {
-          throw e;
-        }
-      }
-
-    } catch (Error err) {
-      log.error("Loading Class " + cName + " ("+clazz.getName() + ") triggered serious java error: "
-                + err.getClass().getName(), err);
-      throw err;
-
-    } catch (Exception e) {
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-          "Error instantiating class: '" + clazz.getName()+"'", e);
-    }
-
-    if (!live) {
-      if( obj instanceof SolrCoreAware ) {
-        assertAwareCompatibility( SolrCoreAware.class, obj );
-        waitingForCore.add( (SolrCoreAware)obj );
-      }
-      if( obj instanceof ResourceLoaderAware ) {
-        assertAwareCompatibility( ResourceLoaderAware.class, obj );
-        waitingForResources.add( (ResourceLoaderAware)obj );
-      }
-      if (obj instanceof SolrInfoBean){
-        //TODO: Assert here?
-        infoMBeans.add((SolrInfoBean) obj);
-      }
-    }
-
-    return obj;
-  }
-
-  
-  /**
-   * Tell all {@link SolrCoreAware} instances about the SolrCore
-   */
-  public void inform(SolrCore core) 
-  {
-    this.dataDir = core.getDataDir();
-
-    // make a copy to avoid potential deadlock of a callback calling newInstance and trying to
-    // add something to waitingForCore.
-    SolrCoreAware[] arr;
-
-    while (waitingForCore.size() > 0) {
-      synchronized (waitingForCore) {
-        arr = waitingForCore.toArray(new SolrCoreAware[waitingForCore.size()]);
-        waitingForCore.clear();
-      }
-
-      for( SolrCoreAware aware : arr) {
-        aware.inform( core );
-      }
-    }
-
-    // this is the last method to be called in SolrCore before the latch is released.
-    live = true;
-  }
-  
-  /**
-   * Tell all {@link ResourceLoaderAware} instances about the loader
-   */
-  public void inform( ResourceLoader loader ) throws IOException
-  {
-
-     // make a copy to avoid potential deadlock of a callback adding to the list
-    ResourceLoaderAware[] arr;
-
-    while (waitingForResources.size() > 0) {
-      synchronized (waitingForResources) {
-        arr = waitingForResources.toArray(new ResourceLoaderAware[waitingForResources.size()]);
-        waitingForResources.clear();
-      }
-
-      for( ResourceLoaderAware aware : arr) {
-        aware.inform(loader);
-      }
-    }
-  }
-
-  /**
-   * Register any {@link SolrInfoBean}s
-   * @param infoRegistry The Info Registry
-   */
-  public void inform(Map<String, SolrInfoBean> infoRegistry) {
-    // this can currently happen concurrently with requests starting and lazy components
-    // loading.  Make sure infoMBeans doesn't change.
-
-    SolrInfoBean[] arr;
-    synchronized (infoMBeans) {
-      arr = infoMBeans.toArray(new SolrInfoBean[infoMBeans.size()]);
-      waitingForResources.clear();
-    }
-
-
-    for (SolrInfoBean bean : arr) {
-      // Too slow? I suspect not, but we may need
-      // to start tracking this in a Set.
-      if (!infoRegistry.containsValue(bean)) {
-        try {
-          infoRegistry.put(bean.getName(), bean);
-        } catch (Exception e) {
-          log.warn("could not register MBean '" + bean.getName() + "'.", e);
-        }
-      }
-    }
-  }
-  
-  /**
-   * Determines the solrhome from the environment.
-   * Tries JNDI (java:comp/env/solr/home) then system property (solr.solr.home);
-   * if both fail, defaults to solr/
-   * @return the instance directory name
-   */
-  /**
-   * Finds the solrhome based on looking up the value in one of three places:
-   * <ol>
-   *  <li>JNDI: via java:comp/env/solr/home</li>
-   *  <li>The system property solr.solr.home</li>
-   *  <li>Look in the current working directory for a solr/ directory</li> 
-   * </ol>
-   *
-   * The return value is normalized.  Normalization essentially means it ends in a trailing slash.
-   * @return A normalized solrhome
-   * @see #normalizeDir(String)
-   */
-  public static Path locateSolrHome() {
-
-    String home = null;
-    // Try JNDI
-    try {
-      Context c = new InitialContext();
-      home = (String)c.lookup("java:comp/env/"+project+"/home");
-      logOnceInfo("home_using_jndi", "Using JNDI solr.home: "+home );
-    } catch (NoInitialContextException e) {
-      log.debug("JNDI not configured for "+project+" (NoInitialContextEx)");
-    } catch (NamingException e) {
-      log.debug("No /"+project+"/home in JNDI");
-    } catch( RuntimeException ex ) {
-      log.warn("Odd RuntimeException while testing for JNDI: " + ex.getMessage());
-    } 
-    
-    // Now try system property
-    if( home == null ) {
-      String prop = project + ".solr.home";
-      home = System.getProperty(prop);
-      if( home != null ) {
-        logOnceInfo("home_using_sysprop", "Using system property "+prop+": " + home );
-      }
-    }
-    
-    // if all else fails, try 
-    if( home == null ) {
-      home = project + '/';
-      logOnceInfo("home_default", project + " home defaulted to '" + home + "' (could not find system property or JNDI)");
-    }
-    return Paths.get(home);
-  }
-
-  // Logs a message only once per startup
-  private static void logOnceInfo(String key, String msg) {
-    if (!loggedOnce.contains(key)) {
-      loggedOnce.add(key);
-      log.info(msg);
-    }
-  }
-
-  /**
-   * @return the instance path for this resource loader
-   */
-  public Path getInstancePath() {
-    return instanceDir;
-  }
-  
-  /**
-   * Keep a list of classes that are allowed to implement each 'Aware' interface
-   */
-  private static final Map<Class, Class[]> awareCompatibility;
-  static {
-    awareCompatibility = new HashMap<>();
-    awareCompatibility.put( 
-      SolrCoreAware.class, new Class[] {
-        // DO NOT ADD THINGS TO THIS LIST -- ESPECIALLY THINGS THAT CAN BE CREATED DYNAMICALLY
-        // VIA RUNTIME APIS -- UNTILL CAREFULLY CONSIDERING THE ISSUES MENTIONED IN SOLR-8311
-        CodecFactory.class,
-        DirectoryFactory.class,
-        ManagedIndexSchemaFactory.class,
-        QueryResponseWriter.class,
-        SearchComponent.class,
-        ShardHandlerFactory.class,
-        SimilarityFactory.class,
-        SolrRequestHandler.class,
-        UpdateRequestProcessorFactory.class
-      }
-    );
-
-    awareCompatibility.put(
-      ResourceLoaderAware.class, new Class[] {
-        // DO NOT ADD THINGS TO THIS LIST -- ESPECIALLY THINGS THAT CAN BE CREATED DYNAMICALLY
-        // VIA RUNTIME APIS -- UNTILL CAREFULLY CONSIDERING THE ISSUES MENTIONED IN SOLR-8311
-        CharFilterFactory.class,
-        TokenFilterFactory.class,
-        TokenizerFactory.class,
-        QParserPlugin.class,
-        FieldType.class
-      }
-    );
-  }
-
-  /**
-   * Utility function to throw an exception if the class is invalid
-   */
-  static void assertAwareCompatibility( Class aware, Object obj )
-  {
-    Class[] valid = awareCompatibility.get( aware );
-    if( valid == null ) {
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-          "Unknown Aware interface: "+aware );
-    }
-    for( Class v : valid ) {
-      if( v.isInstance( obj ) ) {
-        return;
-      }
-    }
-    StringBuilder builder = new StringBuilder();
-    builder.append( "Invalid 'Aware' object: " ).append( obj );
-    builder.append( " -- ").append( aware.getName() );
-    builder.append(  " must be an instance of: " );
-    for( Class v : valid ) {
-      builder.append( "[" ).append( v.getName() ).append( "] ") ;
-    }
-    throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, builder.toString() );
-  }
-
-  @Override
-  public void close() throws IOException {
-    IOUtils.close(classLoader);
-  }
-  public List<SolrInfoBean> getInfoMBeans(){
-    return Collections.unmodifiableList(infoMBeans);
-  }
-
-
-  public static void persistConfLocally(SolrResourceLoader loader, String resourceName, byte[] content) {
-    // Persist locally
-    File confFile = new File(loader.getConfigDir(), resourceName);
-    try {
-      File parentDir = confFile.getParentFile();
-      if ( ! parentDir.isDirectory()) {
-        if ( ! parentDir.mkdirs()) {
-          final String msg = "Can't create managed schema directory " + parentDir.getAbsolutePath();
-          log.error(msg);
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg);
-        }
-      }
-      try (OutputStream out = new FileOutputStream(confFile);) {
-        out.write(content);
-      }
-      log.info("Written confile " + resourceName);
-    } catch (IOException e) {
-      final String msg = "Error persisting conf file " + resourceName;
-      log.error(msg, e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg, e);
-    } finally {
-      try {
-        IOUtils.fsync(confFile.toPath(), false);
-      } catch (IOException e) {
-        final String msg = "Error syncing conf file " + resourceName;
-        log.error(msg, e);
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrResourceNotFoundException.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceNotFoundException.java b/solr/core/src/java/org/apache/solr/core/SolrResourceNotFoundException.java
deleted file mode 100644
index 47a863b..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrResourceNotFoundException.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-
-public class SolrResourceNotFoundException extends IOException {
-
-  public SolrResourceNotFoundException() {
-    super();
-  }
-
-  public SolrResourceNotFoundException(String message) {
-    super(message);
-  }
-
-  public SolrResourceNotFoundException(String message, Throwable cause) {
-    super(message, cause);
-  }
-
-  public SolrResourceNotFoundException(Throwable cause) {
-    super(cause);
-  }
-}


[20/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
deleted file mode 100644
index 8df0fb0..0000000
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ /dev/null
@@ -1,1900 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.zip.Adler32;
-import java.util.zip.Checksum;
-import java.util.zip.InflaterInputStream;
-
-import com.google.common.base.Strings;
-import org.apache.http.client.HttpClient;
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.SegmentInfos;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.FilterDirectory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.FastInputStream;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.core.DirectoryFactory;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.IndexDeletionPolicyWrapper;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.ReplicationHandler.*;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.CommitUpdateCommand;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.update.VersionInfo;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.FileUtils;
-import org.apache.solr.util.PropertiesOutputStream;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.TestInjection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.JAVABIN;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.handler.ReplicationHandler.*;
-
-/**
- * <p> Provides functionality of downloading changed index files as well as config files and a timer for scheduling fetches from the
- * master. </p>
- *
- *
- * @since solr 1.4
- */
-public class IndexFetcher {
-  private static final int _100K = 100000;
-
-  public static final String INDEX_PROPERTIES = "index.properties";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private String masterUrl;
-
-  final ReplicationHandler replicationHandler;
-
-  private volatile Date replicationStartTimeStamp;
-  private RTimer replicationTimer;
-
-  private final SolrCore solrCore;
-
-  private volatile List<Map<String, Object>> filesToDownload;
-
-  private volatile List<Map<String, Object>> confFilesToDownload;
-
-  private volatile List<Map<String, Object>> tlogFilesToDownload;
-
-  private volatile List<Map<String, Object>> filesDownloaded;
-
-  private volatile List<Map<String, Object>> confFilesDownloaded;
-
-  private volatile List<Map<String, Object>> tlogFilesDownloaded;
-
-  private volatile Map<String, Object> currentFile;
-
-  private volatile DirectoryFileFetcher dirFileFetcher;
-
-  private volatile LocalFsFileFetcher localFileFetcher;
-
-  private volatile ExecutorService fsyncService;
-
-  private volatile boolean stop = false;
-
-  private boolean useInternalCompression = false;
-
-  private boolean useExternalCompression = false;
-
-  boolean fetchFromLeader = false;
-
-  private final HttpClient myHttpClient;
-
-  private Integer connTimeout;
-
-  private Integer soTimeout;
-
-  private boolean downloadTlogFiles = false;
-
-  private boolean skipCommitOnMasterVersionZero = true;
-
-  private static final String INTERRUPT_RESPONSE_MESSAGE = "Interrupted while waiting for modify lock";
-
-  public static class IndexFetchResult {
-    private final String message;
-    private final boolean successful;
-    private final Throwable exception;
-
-    public static final String FAILED_BY_INTERRUPT_MESSAGE = "Fetching index failed by interrupt";
-    public static final String FAILED_BY_EXCEPTION_MESSAGE = "Fetching index failed by exception";
-
-    /** pre-defined results */
-    public static final IndexFetchResult ALREADY_IN_SYNC = new IndexFetchResult("Local index commit is already in sync with peer", true, null);
-    public static final IndexFetchResult INDEX_FETCH_FAILURE = new IndexFetchResult("Fetching lastest index is failed", false, null);
-    public static final IndexFetchResult INDEX_FETCH_SUCCESS = new IndexFetchResult("Fetching latest index is successful", true, null);
-    public static final IndexFetchResult LOCK_OBTAIN_FAILED = new IndexFetchResult("Obtaining SnapPuller lock failed", false, null);
-    public static final IndexFetchResult CONTAINER_IS_SHUTTING_DOWN = new IndexFetchResult("I was asked to replicate but CoreContainer is shutting down", false, null);
-    public static final IndexFetchResult MASTER_VERSION_ZERO = new IndexFetchResult("Index in peer is empty and never committed yet", true, null);
-    public static final IndexFetchResult NO_INDEX_COMMIT_EXIST = new IndexFetchResult("No IndexCommit in local index", false, null);
-    public static final IndexFetchResult PEER_INDEX_COMMIT_DELETED = new IndexFetchResult("No files to download because IndexCommit in peer was deleted", false, null);
-    public static final IndexFetchResult LOCAL_ACTIVITY_DURING_REPLICATION = new IndexFetchResult("Local index modification during replication", false, null);
-    public static final IndexFetchResult EXPECTING_NON_LEADER = new IndexFetchResult("Replicating from leader but I'm the shard leader", false, null);
-    public static final IndexFetchResult LEADER_IS_NOT_ACTIVE = new IndexFetchResult("Replicating from leader but leader is not active", false, null);
-
-    IndexFetchResult(String message, boolean successful, Throwable exception) {
-      this.message = message;
-      this.successful = successful;
-      this.exception = exception;
-    }
-
-    /*
-     * @return exception thrown if failed by exception or interrupt, otherwise null
-     */
-    public Throwable getException() {
-      return this.exception;
-    }
-
-    /*
-     * @return true if index fetch was successful, false otherwise
-     */
-    public boolean getSuccessful() {
-      return this.successful;
-    }
-
-    public String getMessage() {
-      return this.message;
-    }
-  }
-
-  private static HttpClient createHttpClient(SolrCore core, String httpBasicAuthUser, String httpBasicAuthPassword, boolean useCompression) {
-    final ModifiableSolrParams httpClientParams = new ModifiableSolrParams();
-    httpClientParams.set(HttpClientUtil.PROP_BASIC_AUTH_USER, httpBasicAuthUser);
-    httpClientParams.set(HttpClientUtil.PROP_BASIC_AUTH_PASS, httpBasicAuthPassword);
-    httpClientParams.set(HttpClientUtil.PROP_ALLOW_COMPRESSION, useCompression);
-
-    return HttpClientUtil.createClient(httpClientParams, core.getCoreContainer().getUpdateShardHandler().getDefaultConnectionManager(), true);
-  }
-
-  public IndexFetcher(final NamedList initArgs, final ReplicationHandler handler, final SolrCore sc) {
-    solrCore = sc;
-    Object fetchFromLeader = initArgs.get(FETCH_FROM_LEADER);
-    if (fetchFromLeader != null && fetchFromLeader instanceof Boolean) {
-      this.fetchFromLeader = (boolean) fetchFromLeader;
-    }
-    Object skipCommitOnMasterVersionZero = initArgs.get(SKIP_COMMIT_ON_MASTER_VERSION_ZERO);
-    if (skipCommitOnMasterVersionZero != null && skipCommitOnMasterVersionZero instanceof Boolean) {
-      this.skipCommitOnMasterVersionZero = (boolean) skipCommitOnMasterVersionZero;
-    }
-    String masterUrl = (String) initArgs.get(MASTER_URL);
-    if (masterUrl == null && !this.fetchFromLeader)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "'masterUrl' is required for a slave");
-    if (masterUrl != null && masterUrl.endsWith(ReplicationHandler.PATH)) {
-      masterUrl = masterUrl.substring(0, masterUrl.length()-12);
-      log.warn("'masterUrl' must be specified without the "+ReplicationHandler.PATH+" suffix");
-    }
-    this.masterUrl = masterUrl;
-
-    this.replicationHandler = handler;
-    String compress = (String) initArgs.get(COMPRESSION);
-    useInternalCompression = INTERNAL.equals(compress);
-    useExternalCompression = EXTERNAL.equals(compress);
-    connTimeout = getParameter(initArgs, HttpClientUtil.PROP_CONNECTION_TIMEOUT, 30000, null);
-    
-    // allow a master override for tests - you specify this in /replication slave section of solrconfig and some 
-    // test don't want to define this
-    soTimeout = Integer.getInteger("solr.indexfetcher.sotimeout", -1);
-    if (soTimeout == -1) {
-      soTimeout = getParameter(initArgs, HttpClientUtil.PROP_SO_TIMEOUT, 120000, null);
-    }
-
-    if (initArgs.getBooleanArg(TLOG_FILES) != null) {
-      downloadTlogFiles = initArgs.getBooleanArg(TLOG_FILES);
-    }
-
-    String httpBasicAuthUser = (String) initArgs.get(HttpClientUtil.PROP_BASIC_AUTH_USER);
-    String httpBasicAuthPassword = (String) initArgs.get(HttpClientUtil.PROP_BASIC_AUTH_PASS);
-    myHttpClient = createHttpClient(solrCore, httpBasicAuthUser, httpBasicAuthPassword, useExternalCompression);
-  }
-  
-  protected <T> T getParameter(NamedList initArgs, String configKey, T defaultValue, StringBuilder sb) {
-    T toReturn = defaultValue;
-    if (initArgs != null) {
-      T temp = (T) initArgs.get(configKey);
-      toReturn = (temp != null) ? temp : defaultValue;
-    }
-    if(sb!=null && toReturn != null) sb.append(configKey).append(" : ").append(toReturn).append(",");
-    return toReturn;
-  }
-
-  /**
-   * Gets the latest commit version and generation from the master
-   */
-  @SuppressWarnings("unchecked")
-  NamedList getLatestVersion() throws IOException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(COMMAND, CMD_INDEX_VERSION);
-    params.set(CommonParams.WT, JAVABIN);
-    params.set(CommonParams.QT, ReplicationHandler.PATH);
-    QueryRequest req = new QueryRequest(params);
-
-    // TODO modify to use shardhandler
-    try (HttpSolrClient client = new Builder(masterUrl)
-        .withHttpClient(myHttpClient)
-        .withConnectionTimeout(connTimeout)
-        .withSocketTimeout(soTimeout)
-        .build()) {
-
-      return client.request(req);
-    } catch (SolrServerException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Fetches the list of files in a given index commit point and updates internal list of files to download.
-   */
-  private void fetchFileList(long gen) throws IOException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(COMMAND,  CMD_GET_FILE_LIST);
-    params.set(TLOG_FILES, downloadTlogFiles);
-    params.set(GENERATION, String.valueOf(gen));
-    params.set(CommonParams.WT, JAVABIN);
-    params.set(CommonParams.QT, ReplicationHandler.PATH);
-    QueryRequest req = new QueryRequest(params);
-
-    // TODO modify to use shardhandler
-    try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl)
-        .withHttpClient(myHttpClient)
-        .withConnectionTimeout(connTimeout)
-        .withSocketTimeout(soTimeout)
-        .build()) {
-      NamedList response = client.request(req);
-
-      List<Map<String, Object>> files = (List<Map<String,Object>>) response.get(CMD_GET_FILE_LIST);
-      if (files != null)
-        filesToDownload = Collections.synchronizedList(files);
-      else {
-        filesToDownload = Collections.emptyList();
-        log.error("No files to download for index generation: "+ gen);
-      }
-
-      files = (List<Map<String,Object>>) response.get(CONF_FILES);
-      if (files != null)
-        confFilesToDownload = Collections.synchronizedList(files);
-
-      files = (List<Map<String, Object>>) response.get(TLOG_FILES);
-      if (files != null) {
-        tlogFilesToDownload = Collections.synchronizedList(files);
-      }
-    } catch (SolrServerException e) {
-      throw new IOException(e);
-    }
-  }
-
-  IndexFetchResult fetchLatestIndex(boolean forceReplication) throws IOException, InterruptedException {
-    return fetchLatestIndex(forceReplication, false);
-  }
-
-  /**
-   * This command downloads all the necessary files from master to install a index commit point. Only changed files are
-   * downloaded. It also downloads the conf files (if they are modified).
-   *
-   * @param forceReplication force a replication in all cases
-   * @param forceCoreReload force a core reload in all cases
-   * @return true on success, false if slave is already in sync
-   * @throws IOException if an exception occurs
-   */
-  IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException {
-
-    boolean cleanupDone = false;
-    boolean successfulInstall = false;
-    markReplicationStart();
-    Directory tmpIndexDir = null;
-    String tmpIndexDirPath;
-    Directory indexDir = null;
-    String indexDirPath;
-    boolean deleteTmpIdxDir = true;
-    File tmpTlogDir = null;
-
-    if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) {
-      // if the last replication was not a success, we force a full replication
-      // when we are a bit more confident we may want to try a partial replication
-      // if the error is connection related or something, but we have to be careful
-      forceReplication = true;
-      log.info("Last replication failed, so I'll force replication");
-    }
-
-    try {
-      if (fetchFromLeader) {
-        assert !solrCore.isClosed(): "Replication should be stopped before closing the core";
-        Replica replica = getLeaderReplica();
-        CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor();
-        if (cd.getCoreNodeName().equals(replica.getName())) {
-          return IndexFetchResult.EXPECTING_NON_LEADER;
-        }
-        if (replica.getState() != Replica.State.ACTIVE) {
-          log.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
-          return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
-        }
-        if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
-          log.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
-          return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
-        }
-        if (!replica.getCoreUrl().equals(masterUrl)) {
-          masterUrl = replica.getCoreUrl();
-          log.info("Updated masterUrl to {}", masterUrl);
-          // TODO: Do we need to set forceReplication = true?
-        } else {
-          log.debug("masterUrl didn't change");
-        }
-      }
-      //get the current 'replicateable' index version in the master
-      NamedList response;
-      try {
-        response = getLatestVersion();
-      } catch (Exception e) {
-        final String errorMsg = e.toString();
-        if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
-            log.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
-            return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
-        } else {
-            log.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
-            return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
-        }
-    }
-
-      long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
-      long latestGeneration = (Long) response.get(GENERATION);
-
-      log.info("Master's generation: " + latestGeneration);
-      log.info("Master's version: " + latestVersion);
-
-      // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
-      IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
-      if (commit == null) {
-        // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points
-        RefCounted<SolrIndexSearcher> searcherRefCounted = null;
-        try {
-          searcherRefCounted = solrCore.getNewestSearcher(false);
-          if (searcherRefCounted == null) {
-            log.warn("No open searcher found - fetch aborted");
-            return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
-          }
-          commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
-        } finally {
-          if (searcherRefCounted != null)
-            searcherRefCounted.decref();
-        }
-      }
-
-      log.info("Slave's generation: " + commit.getGeneration());
-      log.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
-
-      if (latestVersion == 0L) {
-        if (commit.getGeneration() != 0) {
-          // since we won't get the files for an empty index,
-          // we just clear ours and commit
-          log.info("New index in Master. Deleting mine...");
-          RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
-          try {
-            iw.get().deleteAll();
-          } finally {
-            iw.decref();
-          }
-          assert TestInjection.injectDelayBeforeSlaveCommitRefresh();
-          if (skipCommitOnMasterVersionZero) {
-            openNewSearcherAndUpdateCommitPoint();
-          } else {
-            SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams());
-            solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
-          }
-        }
-
-        //there is nothing to be replicated
-        successfulInstall = true;
-        log.debug("Nothing to replicate, master's version is 0");
-        return IndexFetchResult.MASTER_VERSION_ZERO;
-      }
-
-      // TODO: Should we be comparing timestamps (across machines) here?
-      if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
-        //master and slave are already in sync just return
-        log.info("Slave in sync with master.");
-        successfulInstall = true;
-        return IndexFetchResult.ALREADY_IN_SYNC;
-      }
-      log.info("Starting replication process");
-      // get the list of files first
-      fetchFileList(latestGeneration);
-      // this can happen if the commit point is deleted before we fetch the file list.
-      if (filesToDownload.isEmpty()) {
-        return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
-      }
-      log.info("Number of files in latest index in master: " + filesToDownload.size());
-      if (tlogFilesToDownload != null) {
-        log.info("Number of tlog files in master: " + tlogFilesToDownload.size());
-      }
-
-      // Create the sync service
-      fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService"));
-      // use a synchronized list because the list is read by other threads (to show details)
-      filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
-      // if the generation of master is older than that of the slave , it means they are not compatible to be copied
-      // then a new index directory to be created and all the files need to be copied
-      boolean isFullCopyNeeded = IndexDeletionPolicyWrapper
-          .getCommitTimestamp(commit) >= latestVersion
-          || commit.getGeneration() >= latestGeneration || forceReplication;
-
-      String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
-      String tmpIdxDirName = "index." + timestamp;
-      tmpIndexDirPath = solrCore.getDataDir() + tmpIdxDirName;
-
-      tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
-
-      // tmp dir for tlog files
-      if (tlogFilesToDownload != null) {
-        tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp);
-      }
-
-      // cindex dir...
-      indexDirPath = solrCore.getIndexDir();
-      indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
-
-      try {
-
-        // We will compare all the index files from the master vs the index files on disk to see if there is a mismatch
-        // in the metadata. If there is a mismatch for the same index file then we download the entire index
-        // (except when differential copy is applicable) again.
-        if (!isFullCopyNeeded && isIndexStale(indexDir)) {
-          isFullCopyNeeded = true;
-        }
-
-        if (!isFullCopyNeeded && !fetchFromLeader) {
-          // a searcher might be using some flushed but not committed segments
-          // because of soft commits (which open a searcher on IW's data)
-          // so we need to close the existing searcher on the last commit
-          // and wait until we are able to clean up all unused lucene files
-          if (solrCore.getCoreContainer().isZooKeeperAware()) {
-            solrCore.closeSearcher();
-          }
-
-          // rollback and reopen index writer and wait until all unused files
-          // are successfully deleted
-          solrCore.getUpdateHandler().newIndexWriter(true);
-          RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
-          try {
-            IndexWriter indexWriter = writer.get();
-            int c = 0;
-            indexWriter.deleteUnusedFiles();
-            while (hasUnusedFiles(indexDir, commit)) {
-              indexWriter.deleteUnusedFiles();
-              log.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
-              Thread.sleep(1000);
-              c++;
-              if (c >= 30)  {
-                log.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
-                isFullCopyNeeded = true;
-                break;
-              }
-            }
-            if (c > 0)  {
-              log.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
-            }
-          } finally {
-            writer.decref();
-          }
-        }
-        boolean reloadCore = false;
-
-        try {
-          // we have to be careful and do this after we know isFullCopyNeeded won't be flipped
-          if (!isFullCopyNeeded) {
-            solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
-          }
-
-          log.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
-          successfulInstall = false;
-
-          long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir,
-              tmpIndexDir, indexDirPath, tmpIndexDirPath, latestGeneration);
-          if (tlogFilesToDownload != null) {
-            bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration);
-            reloadCore = true; // reload update log
-          }
-          final long timeTakenSeconds = getReplicationTimeElapsed();
-          final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? Long.valueOf(bytesDownloaded / timeTakenSeconds) : null);
-          log.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}",
-              isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
-
-          Collection<Map<String,Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
-          if (!modifiedConfFiles.isEmpty()) {
-            reloadCore = true;
-            downloadConfFiles(confFilesToDownload, latestGeneration);
-            if (isFullCopyNeeded) {
-              successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
-              if (successfulInstall) deleteTmpIdxDir = false;
-            } else {
-              successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
-            }
-            if (tlogFilesToDownload != null) {
-              // move tlog files and refresh ulog only if we successfully installed a new index
-              successfulInstall &= moveTlogFiles(tmpTlogDir);
-            }
-            if (successfulInstall) {
-              if (isFullCopyNeeded) {
-                // let the system know we are changing dir's and the old one
-                // may be closed
-                if (indexDir != null) {
-                  solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
-                  // Cleanup all index files not associated with any *named* snapshot.
-                  solrCore.deleteNonSnapshotIndexFiles(indexDirPath);
-                }
-              }
-
-              log.info("Configuration files are modified, core will be reloaded");
-              logReplicationTimeAndConfFiles(modifiedConfFiles,
-                  successfulInstall);// write to a file time of replication and
-                                     // conf files.
-            }
-          } else {
-            terminateAndWaitFsyncService();
-            if (isFullCopyNeeded) {
-              successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
-              if (successfulInstall) deleteTmpIdxDir = false;
-            } else {
-              successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
-            }
-            if (tlogFilesToDownload != null) {
-              // move tlog files and refresh ulog only if we successfully installed a new index
-              successfulInstall &= moveTlogFiles(tmpTlogDir);
-            }
-            if (successfulInstall) {
-              logReplicationTimeAndConfFiles(modifiedConfFiles,
-                  successfulInstall);
-            }
-          }
-        } finally {
-          if (!isFullCopyNeeded) {
-            solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore);
-          }
-        }
-
-        // we must reload the core after we open the IW back up
-       if (successfulInstall && (reloadCore || forceCoreReload)) {
-         log.info("Reloading SolrCore {}", solrCore.getName());
-          reloadCore();
-        }
-
-        if (successfulInstall) {
-          if (isFullCopyNeeded) {
-            // let the system know we are changing dir's and the old one
-            // may be closed
-            if (indexDir != null) {
-              log.info("removing old index directory " + indexDir);
-              solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
-              solrCore.getDirectoryFactory().remove(indexDir);
-            }
-          }
-          if (isFullCopyNeeded) {
-            solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
-          }
-
-          openNewSearcherAndUpdateCommitPoint();
-        }
-
-        if (!isFullCopyNeeded && !forceReplication && !successfulInstall) {
-          cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
-          cleanupDone = true;
-          // we try with a full copy of the index
-          log.warn(
-              "Replication attempt was not successful - trying a full index replication reloadCore={}",
-              reloadCore);
-          successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
-        }
-
-        markReplicationStop();
-        return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
-      } catch (ReplicationHandlerException e) {
-        log.error("User aborted Replication");
-        return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
-      } catch (SolrException e) {
-        throw e;
-      } catch (InterruptedException e) {
-        throw new InterruptedException("Index fetch interrupted");
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
-      }
-    } finally {
-      if (!cleanupDone) {
-        cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
-      }
-    }
-  }
-
-  private Replica getLeaderReplica() throws InterruptedException {
-    ZkController zkController = solrCore.getCoreContainer().getZkController();
-    CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor();
-    Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry(
-        cd.getCollectionName(), cd.getShardId());
-    return leaderReplica;
-  }
-
-  private void cleanup(final SolrCore core, Directory tmpIndexDir,
-      Directory indexDir, boolean deleteTmpIdxDir, File tmpTlogDir, boolean successfulInstall) throws IOException {
-    try {
-      if (!successfulInstall) {
-        try {
-          logReplicationTimeAndConfFiles(null, successfulInstall);
-        } catch (Exception e) {
-          // this can happen on shutdown, a fetch may be running in a thread after DirectoryFactory is closed
-          log.warn("Could not log failed replication details", e);
-        }
-      }
-
-      if (core.getCoreContainer().isZooKeeperAware()) {
-        // we only track replication success in SolrCloud mode
-        core.getUpdateHandler().getSolrCoreState().setLastReplicateIndexSuccess(successfulInstall);
-      }
-
-      filesToDownload = filesDownloaded = confFilesDownloaded = confFilesToDownload = tlogFilesToDownload = tlogFilesDownloaded = null;
-      markReplicationStop();
-      dirFileFetcher = null;
-      localFileFetcher = null;
-      if (fsyncService != null && !fsyncService.isShutdown()) fsyncService.shutdown();
-      fsyncService = null;
-      stop = false;
-      fsyncException = null;
-    } finally {
-      // order below is important
-      try {
-        if (tmpIndexDir != null && deleteTmpIdxDir) {
-          core.getDirectoryFactory().doneWithDirectory(tmpIndexDir);
-          core.getDirectoryFactory().remove(tmpIndexDir);
-        }
-      } catch (Exception e) {
-        SolrException.log(log, e);
-      } finally {
-        try {
-          if (tmpIndexDir != null) core.getDirectoryFactory().release(tmpIndexDir);
-        } catch (Exception e) {
-          SolrException.log(log, e);
-        }
-        try {
-          if (indexDir != null) {
-            core.getDirectoryFactory().release(indexDir);
-          }
-        } catch (Exception e) {
-          SolrException.log(log, e);
-        }
-        try {
-          if (tmpTlogDir != null) delTree(tmpTlogDir);
-        } catch (Exception e) {
-          SolrException.log(log, e);
-        }
-      }
-    }
-  }
-
-  private boolean hasUnusedFiles(Directory indexDir, IndexCommit commit) throws IOException {
-    String segmentsFileName = commit.getSegmentsFileName();
-    SegmentInfos infos = SegmentInfos.readCommit(indexDir, segmentsFileName);
-    Set<String> currentFiles = new HashSet<>(infos.files(true));
-    String[] allFiles = indexDir.listAll();
-    for (String file : allFiles) {
-      if (!file.equals(segmentsFileName) && !currentFiles.contains(file) && !file.endsWith(".lock")) {
-        log.info("Found unused file: " + file);
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private volatile Exception fsyncException;
-
-  /**
-   * terminate the fsync service and wait for all the tasks to complete. If it is already terminated
-   */
-  private void terminateAndWaitFsyncService() throws Exception {
-    if (fsyncService.isTerminated()) return;
-    fsyncService.shutdown();
-     // give a long wait say 1 hr
-    fsyncService.awaitTermination(3600, TimeUnit.SECONDS);
-    // if any fsync failed, throw that exception back
-    Exception fsyncExceptionCopy = fsyncException;
-    if (fsyncExceptionCopy != null) throw fsyncExceptionCopy;
-  }
-
-  /**
-   * Helper method to record the last replication's details so that we can show them on the statistics page across
-   * restarts.
-   * @throws IOException on IO error
-   */
-  @SuppressForbidden(reason = "Need currentTimeMillis for debugging/stats")
-  private void logReplicationTimeAndConfFiles(Collection<Map<String, Object>> modifiedConfFiles, boolean successfulInstall) throws IOException {
-    List<String> confFiles = new ArrayList<>();
-    if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty())
-      for (Map<String, Object> map1 : modifiedConfFiles)
-        confFiles.add((String) map1.get(NAME));
-
-    Properties props = replicationHandler.loadReplicationProperties();
-    long replicationTime = System.currentTimeMillis();
-    long replicationTimeTaken = getReplicationTimeElapsed();
-    Directory dir = null;
-    try {
-      dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA, solrCore.getSolrConfig().indexConfig.lockType);
-
-      int indexCount = 1, confFilesCount = 1;
-      if (props.containsKey(TIMES_INDEX_REPLICATED)) {
-        indexCount = Integer.parseInt(props.getProperty(TIMES_INDEX_REPLICATED)) + 1;
-      }
-      StringBuilder sb = readToStringBuilder(replicationTime, props.getProperty(INDEX_REPLICATED_AT_LIST));
-      props.setProperty(INDEX_REPLICATED_AT_LIST, sb.toString());
-      props.setProperty(INDEX_REPLICATED_AT, String.valueOf(replicationTime));
-      props.setProperty(PREVIOUS_CYCLE_TIME_TAKEN, String.valueOf(replicationTimeTaken));
-      props.setProperty(TIMES_INDEX_REPLICATED, String.valueOf(indexCount));
-      if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty()) {
-        props.setProperty(CONF_FILES_REPLICATED, confFiles.toString());
-        props.setProperty(CONF_FILES_REPLICATED_AT, String.valueOf(replicationTime));
-        if (props.containsKey(TIMES_CONFIG_REPLICATED)) {
-          confFilesCount = Integer.parseInt(props.getProperty(TIMES_CONFIG_REPLICATED)) + 1;
-        }
-        props.setProperty(TIMES_CONFIG_REPLICATED, String.valueOf(confFilesCount));
-      }
-
-      props.setProperty(LAST_CYCLE_BYTES_DOWNLOADED, String.valueOf(getTotalBytesDownloaded()));
-      if (!successfulInstall) {
-        int numFailures = 1;
-        if (props.containsKey(TIMES_FAILED)) {
-          numFailures = Integer.parseInt(props.getProperty(TIMES_FAILED)) + 1;
-        }
-        props.setProperty(TIMES_FAILED, String.valueOf(numFailures));
-        props.setProperty(REPLICATION_FAILED_AT, String.valueOf(replicationTime));
-        sb = readToStringBuilder(replicationTime, props.getProperty(REPLICATION_FAILED_AT_LIST));
-        props.setProperty(REPLICATION_FAILED_AT_LIST, sb.toString());
-      }
-      
-      
-      String tmpFileName = REPLICATION_PROPERTIES + "." + System.nanoTime();
-      final IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-      Writer outFile = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
-      try {
-        props.store(outFile, "Replication details");
-        dir.sync(Collections.singleton(tmpFileName));
-      } finally {
-        IOUtils.closeQuietly(outFile);
-      }
-      
-      solrCore.getDirectoryFactory().renameWithOverwrite(dir, tmpFileName, REPLICATION_PROPERTIES);
-    } catch (Exception e) {
-      log.warn("Exception while updating statistics", e);
-    } finally {
-      if (dir != null) {
-        solrCore.getDirectoryFactory().release(dir);
-      }
-    }
-  }
-
-  long getTotalBytesDownloaded() {
-    long bytesDownloaded = 0;
-    //get size from list of files to download
-    for (Map<String, Object> file : getFilesDownloaded()) {
-      bytesDownloaded += (Long) file.get(SIZE);
-    }
-
-    //get size from list of conf files to download
-    for (Map<String, Object> file : getConfFilesDownloaded()) {
-      bytesDownloaded += (Long) file.get(SIZE);
-    }
-
-    //get size from current file being downloaded
-    Map<String, Object> currentFile = getCurrentFile();
-    if (currentFile != null) {
-      if (currentFile.containsKey("bytesDownloaded")) {
-        bytesDownloaded += (Long) currentFile.get("bytesDownloaded");
-      }
-    }
-    return bytesDownloaded;
-  }
-
-  private StringBuilder readToStringBuilder(long replicationTime, String str) {
-    StringBuilder sb = new StringBuilder();
-    List<String> l = new ArrayList<>();
-    if (str != null && str.length() != 0) {
-      String[] ss = str.split(",");
-      Collections.addAll(l, ss);
-    }
-    sb.append(replicationTime);
-    if (!l.isEmpty()) {
-      for (int i = 0; i < l.size() || i < 9; i++) {
-        if (i == l.size() || i == 9) break;
-        String s = l.get(i);
-        sb.append(",").append(s);
-      }
-    }
-    return sb;
-  }
-
-  private void openNewSearcherAndUpdateCommitPoint() throws IOException {
-    RefCounted<SolrIndexSearcher> searcher = null;
-    IndexCommit commitPoint;
-    // must get the latest solrCore object because the one we have might be closed because of a reload
-    // todo stop keeping solrCore around
-    SolrCore core = solrCore.getCoreContainer().getCore(solrCore.getName());
-    try {
-      Future[] waitSearcher = new Future[1];
-      searcher = core.getSearcher(true, true, waitSearcher, true);
-      if (waitSearcher[0] != null) {
-        try {
-          waitSearcher[0].get();
-        } catch (InterruptedException | ExecutionException e) {
-          SolrException.log(log, e);
-        }
-      }
-      commitPoint = searcher.get().getIndexReader().getIndexCommit();
-    } finally {
-      if (searcher != null) {
-        searcher.decref();
-      }
-      core.close();
-    }
-
-    // update the commit point in replication handler
-    replicationHandler.indexCommitPoint = commitPoint;
-
-  }
-
-  private void reloadCore() {
-    final CountDownLatch latch = new CountDownLatch(1);
-    new Thread(() -> {
-      try {
-        solrCore.getCoreContainer().reload(solrCore.getName());
-      } catch (Exception e) {
-        log.error("Could not reload core ", e);
-      } finally {
-        latch.countDown();
-      }
-    }).start();
-    try {
-      latch.await();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new RuntimeException("Interrupted while waiting for core reload to finish", e);
-    }
-  }
-
-  private void downloadConfFiles(List<Map<String, Object>> confFilesToDownload, long latestGeneration) throws Exception {
-    log.info("Starting download of configuration files from master: " + confFilesToDownload);
-    confFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
-    File tmpconfDir = new File(solrCore.getResourceLoader().getConfigDir(), "conf." + getDateAsStr(new Date()));
-    try {
-      boolean status = tmpconfDir.mkdirs();
-      if (!status) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                "Failed to create temporary config folder: " + tmpconfDir.getName());
-      }
-      for (Map<String, Object> file : confFilesToDownload) {
-        String saveAs = (String) (file.get(ALIAS) == null ? file.get(NAME) : file.get(ALIAS));
-        localFileFetcher = new LocalFsFileFetcher(tmpconfDir, file, saveAs, CONF_FILE_SHORT, latestGeneration);
-        currentFile = file;
-        localFileFetcher.fetchFile();
-        confFilesDownloaded.add(new HashMap<>(file));
-      }
-      // this is called before copying the files to the original conf dir
-      // so that if there is an exception avoid corrupting the original files.
-      terminateAndWaitFsyncService();
-      copyTmpConfFiles2Conf(tmpconfDir);
-    } finally {
-      delTree(tmpconfDir);
-    }
-  }
-
-  /**
-   * Download all the tlog files to the temp tlog directory.
-   */
-  private long downloadTlogFiles(File tmpTlogDir, long latestGeneration) throws Exception {
-    log.info("Starting download of tlog files from master: " + tlogFilesToDownload);
-    tlogFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
-    long bytesDownloaded = 0;
-
-    boolean status = tmpTlogDir.mkdirs();
-    if (!status) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Failed to create temporary tlog folder: " + tmpTlogDir.getName());
-    }
-    for (Map<String, Object> file : tlogFilesToDownload) {
-      String saveAs = (String) (file.get(ALIAS) == null ? file.get(NAME) : file.get(ALIAS));
-      localFileFetcher = new LocalFsFileFetcher(tmpTlogDir, file, saveAs, TLOG_FILE, latestGeneration);
-      currentFile = file;
-      localFileFetcher.fetchFile();
-      bytesDownloaded += localFileFetcher.getBytesDownloaded();
-      tlogFilesDownloaded.add(new HashMap<>(file));
-    }
-    return bytesDownloaded;
-  }
-
-  /**
-   * Download the index files. If a new index is needed, download all the files.
-   *
-   * @param downloadCompleteIndex is it a fresh index copy
-   * @param indexDir                 the indexDir to be merged to
-   * @param tmpIndexDir              the directory to which files need to be downloaded to
-   * @param indexDirPath             the path of indexDir
-   * @param latestGeneration         the version number
-   *
-   * @return number of bytes downloaded
-   */
-  private long downloadIndexFiles(boolean downloadCompleteIndex, Directory indexDir, Directory tmpIndexDir,
-                                  String indexDirPath, String tmpIndexDirPath, long latestGeneration)
-      throws Exception {
-    if (log.isDebugEnabled()) {
-      log.debug("Download files to dir: " + Arrays.asList(indexDir.listAll()));
-    }
-    long bytesDownloaded = 0;
-    long bytesSkippedCopying = 0;
-    boolean doDifferentialCopy = (indexDir instanceof FSDirectory ||
-        (indexDir instanceof FilterDirectory && FilterDirectory.unwrap(indexDir) instanceof FSDirectory))
-        && (tmpIndexDir instanceof FSDirectory ||
-        (tmpIndexDir instanceof FilterDirectory && FilterDirectory.unwrap(tmpIndexDir) instanceof FSDirectory));
-
-    for (Map<String,Object> file : filesToDownload) {
-      String filename = (String) file.get(NAME);
-      long size = (Long) file.get(SIZE);
-      CompareResult compareResult = compareFile(indexDir, filename, size, (Long) file.get(CHECKSUM));
-      boolean alwaysDownload = filesToAlwaysDownloadIfNoChecksums(filename, size, compareResult);
-      log.debug("Downloading file={} size={} checksum={} alwaysDownload={}", filename, size, file.get(CHECKSUM), alwaysDownload);
-      if (!compareResult.equal || downloadCompleteIndex || alwaysDownload) {
-        File localFile = new File(indexDirPath, filename);
-        if (downloadCompleteIndex && doDifferentialCopy && compareResult.equal && compareResult.checkSummed
-            && localFile.exists()) {
-          log.info("Don't need to download this file. Local file's path is: {}, checksum is: {}",
-              localFile.getAbsolutePath(), file.get(CHECKSUM));
-          // A hard link here should survive the eventual directory move, and should be more space efficient as
-          // compared to a file copy. TODO: Maybe we could do a move safely here?
-          Files.createLink(new File(tmpIndexDirPath, filename).toPath(), localFile.toPath());
-          bytesSkippedCopying += localFile.length();
-        } else {
-          dirFileFetcher = new DirectoryFileFetcher(tmpIndexDir, file,
-              (String) file.get(NAME), FILE, latestGeneration);
-          currentFile = file;
-          dirFileFetcher.fetchFile();
-          bytesDownloaded += dirFileFetcher.getBytesDownloaded();
-        }
-        filesDownloaded.add(new HashMap<>(file));
-      } else {
-        log.debug("Skipping download for {} because it already exists", file.get(NAME));
-      }
-    }
-    log.info("Bytes downloaded: {}, Bytes skipped downloading: {}", bytesDownloaded, bytesSkippedCopying);
-    return bytesDownloaded;
-  }
-  
-  static boolean filesToAlwaysDownloadIfNoChecksums(String filename,
-      long size, CompareResult compareResult) {
-    // without checksums to compare, we always download .si, .liv, segments_N,
-    // and any very small files
-    return !compareResult.checkSummed && (filename.endsWith(".si") || filename.endsWith(".liv")
-    || filename.startsWith("segments_") || size < _100K);
-  }
-
-  protected static class CompareResult {
-    boolean equal = false;
-    boolean checkSummed = false;
-  }
-
-  protected static CompareResult compareFile(Directory indexDir, String filename, Long backupIndexFileLen, Long backupIndexFileChecksum) {
-    CompareResult compareResult = new CompareResult();
-    try {
-      try (final IndexInput indexInput = indexDir.openInput(filename, IOContext.READONCE)) {
-        long indexFileLen = indexInput.length();
-        long indexFileChecksum = 0;
-        
-        if (backupIndexFileChecksum != null) {
-          try {
-            indexFileChecksum = CodecUtil.retrieveChecksum(indexInput);
-            compareResult.checkSummed = true;
-          } catch (Exception e) {
-            log.warn("Could not retrieve checksum from file.", e);
-          }
-        }
-
-        if (!compareResult.checkSummed) {
-          // we don't have checksums to compare
-
-          if (indexFileLen == backupIndexFileLen) {
-            compareResult.equal = true;
-            return compareResult;
-          } else {
-            log.info(
-                "File {} did not match. expected length is {} and actual length is {}", filename, backupIndexFileLen, indexFileLen);
-            compareResult.equal = false;
-            return compareResult;
-          }
-        }
-
-        // we have checksums to compare
-
-        if (indexFileLen == backupIndexFileLen && indexFileChecksum == backupIndexFileChecksum) {
-          compareResult.equal = true;
-          return compareResult;
-        } else {
-          log.warn("File {} did not match. expected checksum is {} and actual is checksum {}. " +
-              "expected length is {} and actual length is {}", filename, backupIndexFileChecksum, indexFileChecksum,
-              backupIndexFileLen, indexFileLen);
-          compareResult.equal = false;
-          return compareResult;
-        }
-      }
-    } catch (NoSuchFileException | FileNotFoundException e) {
-      compareResult.equal = false;
-      return compareResult;
-    } catch (IOException e) {
-      log.error("Could not read file " + filename + ". Downloading it again", e);
-      compareResult.equal = false;
-      return compareResult;
-    }
-  }
-
-  /** Returns true if the file exists (can be opened), false
-   *  if it cannot be opened, and (unlike Java's
-   *  File.exists) throws IOException if there's some
-   *  unexpected error. */
-  private static boolean slowFileExists(Directory dir, String fileName) throws IOException {
-    try {
-      dir.openInput(fileName, IOContext.DEFAULT).close();
-      return true;
-    } catch (NoSuchFileException | FileNotFoundException e) {
-      return false;
-    }
-  }
-
-  /**
-   * All the files which are common between master and slave must have same size and same checksum else we assume
-   * they are not compatible (stale).
-   *
-   * @return true if the index stale and we need to download a fresh copy, false otherwise.
-   * @throws IOException  if low level io error
-   */
-  private boolean isIndexStale(Directory dir) throws IOException {
-    for (Map<String, Object> file : filesToDownload) {
-      String filename = (String) file.get(NAME);
-      Long length = (Long) file.get(SIZE);
-      Long checksum = (Long) file.get(CHECKSUM);
-      if (slowFileExists(dir, filename)) {
-        if (checksum != null) {
-          if (!(compareFile(dir, filename, length, checksum).equal)) {
-            // file exists and size or checksum is different, therefore we must download it again
-            return true;
-          }
-        } else {
-          if (length != dir.fileLength(filename)) {
-            log.warn("File {} did not match. expected length is {} and actual length is {}",
-                filename, length, dir.fileLength(filename));
-            return true;
-          }
-        }
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Copy a file by the File#renameTo() method. If it fails, it is considered a failure
-   * <p/>
-   */
-  private boolean moveAFile(Directory tmpIdxDir, Directory indexDir, String fname) {
-    log.debug("Moving file: {}", fname);
-    boolean success = false;
-    try {
-      if (slowFileExists(indexDir, fname)) {
-        log.warn("Cannot complete replication attempt because file already exists:" + fname);
-        
-        // we fail - we downloaded the files we need, if we can't move one in, we can't
-        // count on the correct index
-        return false;
-      }
-    } catch (IOException e) {
-      SolrException.log(log, "could not check if a file exists", e);
-      return false;
-    }
-    try {
-      solrCore.getDirectoryFactory().move(tmpIdxDir, indexDir, fname, DirectoryFactory.IOCONTEXT_NO_CACHE);
-      success = true;
-    } catch (IOException e) {
-      SolrException.log(log, "Could not move file", e);
-    }
-    return success;
-  }
-
-  /**
-   * Copy all index files from the temp index dir to the actual index. The segments_N file is copied last.
-   */
-  private boolean moveIndexFiles(Directory tmpIdxDir, Directory indexDir) {
-    if (log.isDebugEnabled()) {
-      try {
-        log.info("From dir files:" + Arrays.asList(tmpIdxDir.listAll()));
-        log.info("To dir files:" + Arrays.asList(indexDir.listAll()));
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-    String segmentsFile = null;
-    for (Map<String, Object> f : filesDownloaded) {
-      String fname = (String) f.get(NAME);
-      // the segments file must be copied last
-      // or else if there is a failure in between the
-      // index will be corrupted
-      if (fname.startsWith("segments_")) {
-        //The segments file must be copied in the end
-        //Otherwise , if the copy fails index ends up corrupted
-        segmentsFile = fname;
-        continue;
-      }
-      if (!moveAFile(tmpIdxDir, indexDir, fname)) return false;
-    }
-    //copy the segments file last
-    if (segmentsFile != null) {
-      if (!moveAFile(tmpIdxDir, indexDir, segmentsFile)) return false;
-    }
-    return true;
-  }
-
-  /**
-   * <p>
-   *   Copy all the tlog files from the temp tlog dir to the actual tlog dir, and reset
-   *   the {@link UpdateLog}. The copy will try to preserve the original tlog directory
-   *   if the copy fails.
-   * </p>
-   * <p>
-   *   This assumes that the tlog files transferred from the leader are in synch with the
-   *   index files transferred from the leader. The reset of the update log relies on the version
-   *   of the latest operations found in the tlog files. If the tlogs are ahead of the latest commit
-   *   point, it will not copy all the needed buffered updates for the replay and it will miss
-   *   some operations.
-   * </p>
-   */
-  private boolean moveTlogFiles(File tmpTlogDir) {
-    UpdateLog ulog = solrCore.getUpdateHandler().getUpdateLog();
-
-    VersionInfo vinfo = ulog.getVersionInfo();
-    vinfo.blockUpdates(); // block updates until the new update log is initialised
-    try {
-      // reset the update log before copying the new tlog directory
-      CdcrUpdateLog.BufferedUpdates bufferedUpdates = ((CdcrUpdateLog) ulog).resetForRecovery();
-      // try to move the temp tlog files to the tlog directory
-      if (!copyTmpTlogFiles2Tlog(tmpTlogDir)) return false;
-      // reinitialise the update log and copy the buffered updates
-      if (bufferedUpdates.tlog != null) {
-        // map file path to its new backup location
-        File parentDir = FileSystems.getDefault().getPath(solrCore.getUpdateHandler().getUpdateLog().getLogDir()).getParent().toFile();
-        File backupTlogDir = new File(parentDir, tmpTlogDir.getName());
-        bufferedUpdates.tlog = new File(backupTlogDir, bufferedUpdates.tlog.getName());
-      }
-      // init the update log with the new set of tlog files, and copy the buffered updates
-      ((CdcrUpdateLog) ulog).initForRecovery(bufferedUpdates.tlog, bufferedUpdates.offset);
-    }
-    catch (Exception e) {
-      log.error("Unable to copy tlog files", e);
-      return false;
-    }
-    finally {
-      vinfo.unblockUpdates();
-    }
-    return true;
-  }
-
-  /**
-   * Make file list
-   */
-  private List<File> makeTmpConfDirFileList(File dir, List<File> fileList) {
-    File[] files = dir.listFiles();
-    for (File file : files) {
-      if (file.isFile()) {
-        fileList.add(file);
-      } else if (file.isDirectory()) {
-        fileList = makeTmpConfDirFileList(file, fileList);
-      }
-    }
-    return fileList;
-  }
-
-  /**
-   * The conf files are copied to the tmp dir to the conf dir. A backup of the old file is maintained
-   */
-  private void copyTmpConfFiles2Conf(File tmpconfDir) {
-    boolean status = false;
-    File confDir = new File(solrCore.getResourceLoader().getConfigDir());
-    for (File file : makeTmpConfDirFileList(tmpconfDir, new ArrayList<>())) {
-      File oldFile = new File(confDir, file.getPath().substring(tmpconfDir.getPath().length(), file.getPath().length()));
-      if (!oldFile.getParentFile().exists()) {
-        status = oldFile.getParentFile().mkdirs();
-        if (!status) {
-          throw new SolrException(ErrorCode.SERVER_ERROR,
-                  "Unable to mkdirs: " + oldFile.getParentFile());
-        }
-      }
-      if (oldFile.exists()) {
-        File backupFile = new File(oldFile.getPath() + "." + getDateAsStr(new Date(oldFile.lastModified())));
-        if (!backupFile.getParentFile().exists()) {
-          status = backupFile.getParentFile().mkdirs();
-          if (!status) {
-            throw new SolrException(ErrorCode.SERVER_ERROR,
-                    "Unable to mkdirs: " + backupFile.getParentFile());
-          }
-        }
-        status = oldFile.renameTo(backupFile);
-        if (!status) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                  "Unable to rename: " + oldFile + " to: " + backupFile);
-        }
-      }
-      status = file.renameTo(oldFile);
-      if (!status) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-                "Unable to rename: " + file + " to: " + oldFile);
-      }
-    }
-  }
-
-  /**
-   * The tlog files are moved from the tmp dir to the tlog dir as an atomic filesystem operation.
-   * A backup of the old directory is maintained. If the directory move fails, it will try to revert back the original
-   * tlog directory.
-   */
-  private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) {
-    Path tlogDir = FileSystems.getDefault().getPath(solrCore.getUpdateHandler().getUpdateLog().getLogDir());
-    Path backupTlogDir = FileSystems.getDefault().getPath(tlogDir.getParent().toAbsolutePath().toString(), tmpTlogDir.getName());
-
-    try {
-      Files.move(tlogDir, backupTlogDir, StandardCopyOption.ATOMIC_MOVE);
-    } catch (IOException e) {
-      SolrException.log(log, "Unable to rename: " + tlogDir + " to: " + backupTlogDir, e);
-      return false;
-    }
-
-    Path src = FileSystems.getDefault().getPath(backupTlogDir.toAbsolutePath().toString(), tmpTlogDir.getName());
-    try {
-      Files.move(src, tlogDir, StandardCopyOption.ATOMIC_MOVE);
-    } catch (IOException e) {
-      SolrException.log(log, "Unable to rename: " + src + " to: " + tlogDir, e);
-
-      // In case of error, try to revert back the original tlog directory
-      try {
-        Files.move(backupTlogDir, tlogDir, StandardCopyOption.ATOMIC_MOVE);
-      } catch (IOException e2) {
-        // bad, we were not able to revert back the original tlog directory
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Unable to rename: " + backupTlogDir + " to: " + tlogDir);
-      }
-
-      return false;
-    }
-
-    return true;
-  }
-
-  private String getDateAsStr(Date d) {
-    return new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(d);
-  }
-
-  private final Map<String, FileInfo> confFileInfoCache = new HashMap<>();
-
-  /**
-   * The local conf files are compared with the conf files in the master. If they are same (by checksum) do not copy.
-   *
-   * @param confFilesToDownload The list of files obtained from master
-   *
-   * @return a list of configuration files which have changed on the master and need to be downloaded.
-   */
-  private Collection<Map<String, Object>> getModifiedConfFiles(List<Map<String, Object>> confFilesToDownload) {
-    if (confFilesToDownload == null || confFilesToDownload.isEmpty())
-      return Collections.EMPTY_LIST;
-    //build a map with alias/name as the key
-    Map<String, Map<String, Object>> nameVsFile = new HashMap<>();
-    NamedList names = new NamedList();
-    for (Map<String, Object> map : confFilesToDownload) {
-      //if alias is present that is the name the file may have in the slave
-      String name = (String) (map.get(ALIAS) == null ? map.get(NAME) : map.get(ALIAS));
-      nameVsFile.put(name, map);
-      names.add(name, null);
-    }
-    //get the details of the local conf files with the same alias/name
-    List<Map<String, Object>> localFilesInfo = replicationHandler.getConfFileInfoFromCache(names, confFileInfoCache);
-    //compare their size/checksum to see if
-    for (Map<String, Object> fileInfo : localFilesInfo) {
-      String name = (String) fileInfo.get(NAME);
-      Map<String, Object> m = nameVsFile.get(name);
-      if (m == null) continue; // the file is not even present locally (so must be downloaded)
-      if (m.get(CHECKSUM).equals(fileInfo.get(CHECKSUM))) {
-        nameVsFile.remove(name); //checksums are same so the file need not be downloaded
-      }
-    }
-    return nameVsFile.isEmpty() ? Collections.EMPTY_LIST : nameVsFile.values();
-  }
-
-  /**
-   * This simulates File.delete exception-wise, since this class has some strange behavior with it.
-   * The only difference is it returns null on success, throws SecurityException on SecurityException,
-   * otherwise returns Throwable preventing deletion (instead of false), for additional information.
-   */
-  static Throwable delete(File file) {
-    try {
-      Files.delete(file.toPath());
-      return null;
-    } catch (SecurityException e) {
-      throw e;
-    } catch (Throwable other) {
-      return other;
-    }
-  }
-
-  static boolean delTree(File dir) {
-    try {
-      org.apache.lucene.util.IOUtils.rm(dir.toPath());
-      return true;
-    } catch (IOException e) {
-      log.warn("Unable to delete directory : " + dir, e);
-      return false;
-    }
-  }
-
-  /**
-   * Stops the ongoing fetch
-   */
-  void abortFetch() {
-    stop = true;
-  }
-
-  @SuppressForbidden(reason = "Need currentTimeMillis for debugging/stats")
-  private void markReplicationStart() {
-    replicationTimer = new RTimer();
-    replicationStartTimeStamp = new Date();
-  }
-
-  private void markReplicationStop() {
-    replicationStartTimeStamp = null;
-    replicationTimer = null;
-  }
-
-  Date getReplicationStartTimeStamp() {
-    return replicationStartTimeStamp;
-  }
-
-  long getReplicationTimeElapsed() {
-    long timeElapsed = 0;
-    if (replicationStartTimeStamp != null)
-      timeElapsed = TimeUnit.SECONDS.convert((long) replicationTimer.getTime(), TimeUnit.MILLISECONDS);
-    return timeElapsed;
-  }
-
-  List<Map<String, Object>> getTlogFilesToDownload() {
-    //make a copy first because it can be null later
-    List<Map<String, Object>> tmp = tlogFilesToDownload;
-    //create a new instance. or else iterator may fail
-    return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp);
-  }
-
-  List<Map<String, Object>> getTlogFilesDownloaded() {
-    //make a copy first because it can be null later
-    List<Map<String, Object>> tmp = tlogFilesDownloaded;
-    // NOTE: it's safe to make a copy of a SynchronizedCollection(ArrayList)
-    return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp);
-  }
-
-  List<Map<String, Object>> getConfFilesToDownload() {
-    //make a copy first because it can be null later
-    List<Map<String, Object>> tmp = confFilesToDownload;
-    //create a new instance. or else iterator may fail
-    return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp);
-  }
-
-  List<Map<String, Object>> getConfFilesDownloaded() {
-    //make a copy first because it can be null later
-    List<Map<String, Object>> tmp = confFilesDownloaded;
-    // NOTE: it's safe to make a copy of a SynchronizedCollection(ArrayList)
-    return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp);
-  }
-
-  List<Map<String, Object>> getFilesToDownload() {
-    //make a copy first because it can be null later
-    List<Map<String, Object>> tmp = filesToDownload;
-    return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp);
-  }
-
-  List<Map<String, Object>> getFilesDownloaded() {
-    List<Map<String, Object>> tmp = filesDownloaded;
-    return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp);
-  }
-
-  // TODO: currently does not reflect conf files
-  Map<String, Object> getCurrentFile() {
-    Map<String, Object> tmp = currentFile;
-    DirectoryFileFetcher tmpFileFetcher = dirFileFetcher;
-    if (tmp == null)
-      return null;
-    tmp = new HashMap<>(tmp);
-    if (tmpFileFetcher != null)
-      tmp.put("bytesDownloaded", tmpFileFetcher.getBytesDownloaded());
-    return tmp;
-  }
-
-  private static class ReplicationHandlerException extends InterruptedException {
-    public ReplicationHandlerException(String message) {
-      super(message);
-    }
-  }
-
-  private interface FileInterface {
-    public void sync() throws IOException;
-    public void write(byte[] buf, int packetSize) throws IOException;
-    public void close() throws Exception;
-    public void delete() throws Exception;
-  }
-
-  /**
-   * The class acts as a client for ReplicationHandler.FileStream. It understands the protocol of wt=filestream
-   *
-   * @see org.apache.solr.handler.ReplicationHandler.DirectoryFileStream
-   */
-  private class FileFetcher {
-    private final FileInterface file;
-    private boolean includeChecksum = true;
-    private final String fileName;
-    private final String saveAs;
-    private final String solrParamOutput;
-    private final Long indexGen;
-
-    private final long size;
-    private long bytesDownloaded = 0;
-    private byte[] buf;
-    private final Checksum checksum;
-    private int errorCount = 0;
-    private boolean aborted = false;
-
-    FileFetcher(FileInterface file, Map<String, Object> fileDetails, String saveAs,
-                String solrParamOutput, long latestGen) throws IOException {
-      this.file = file;
-      this.fileName = (String) fileDetails.get(NAME);
-      this.size = (Long) fileDetails.get(SIZE);
-      buf = new byte[(int)Math.min(this.size, ReplicationHandler.PACKET_SZ)];
-      this.solrParamOutput = solrParamOutput;
-      this.saveAs = saveAs;
-      indexGen = latestGen;
-      if (includeChecksum) {
-        checksum = new Adler32();
-      } else {
-        checksum = null;
-      }
-    }
-
-    public long getBytesDownloaded() {
-      return bytesDownloaded;
-    }
-
-    /**
-     * The main method which downloads file
-     */
-    public void fetchFile() throws Exception {
-      bytesDownloaded = 0;
-      try {
-        fetch();
-      } catch(Exception e) {
-        if (!aborted) {
-          SolrException.log(IndexFetcher.log, "Error fetching file, doing one retry...", e);
-          // one retry
-          fetch();
-        } else {
-          throw e;
-        }
-      }
-    }
-    
-    private void fetch() throws Exception {
-      try {
-        while (true) {
-          final FastInputStream is = getStream();
-          int result;
-          try {
-            //fetch packets one by one in a single request
-            result = fetchPackets(is);
-            if (result == 0 || result == NO_CONTENT) {
-
-              return;
-            }
-            //if there is an error continue. But continue from the point where it got broken
-          } finally {
-            IOUtils.closeQuietly(is);
-          }
-        }
-      } finally {
-        cleanup();
-        //if cleanup succeeds . The file is downloaded fully. do an fsync
-        fsyncService.submit(() -> {
-          try {
-            file.sync();
-          } catch (IOException e) {
-            fsyncException = e;
-          }
-        });
-      }
-    }
-
-    private int fetchPackets(FastInputStream fis) throws Exception {
-      byte[] intbytes = new byte[4];
-      byte[] longbytes = new byte[8];
-      try {
-        while (true) {
-          if (stop) {
-            stop = false;
-            aborted = true;
-            throw new ReplicationHandlerException("User aborted replication");
-          }
-          long checkSumServer = -1;
-          fis.readFully(intbytes);
-          //read the size of the packet
-          int packetSize = readInt(intbytes);
-          if (packetSize <= 0) {
-            log.warn("No content received for file: {}", fileName);
-            return NO_CONTENT;
-          }
-          //TODO consider recoding the remaining logic to not use/need buf[]; instead use the internal buffer of fis
-          if (buf.length < packetSize) {
-            //This shouldn't happen since sender should use PACKET_SZ and we init the buf based on that too
-            buf = new byte[packetSize];
-          }
-          if (checksum != null) {
-            //read the checksum
-            fis.readFully(longbytes);
-            checkSumServer = readLong(longbytes);
-          }
-          //then read the packet of bytes
-          fis.readFully(buf, 0, packetSize);
-          //compare the checksum as sent from the master
-          if (includeChecksum) {
-            checksum.reset();
-            checksum.update(buf, 0, packetSize);
-            long checkSumClient = checksum.getValue();
-            if (checkSumClient != checkSumServer) {
-              log.error("Checksum not matched between client and server for file: {}", fileName);
-              //if checksum is wrong it is a problem return for retry
-              return 1;
-            }
-          }
-          //if everything is fine, write down the packet to the file
-          file.write(buf, packetSize);
-          bytesDownloaded += packetSize;
-          log.debug("Fetched and wrote {} bytes of file: {}", bytesDownloaded, fileName);
-          if (bytesDownloaded >= size)
-            return 0;
-          //errorCount is always set to zero after a successful packet
-          errorCount = 0;
-        }
-      } catch (ReplicationHandlerException e) {
-        throw e;
-      } catch (Exception e) {
-        log.warn("Error in fetching file: {} (downloaded {} of {} bytes)",
-            fileName, bytesDownloaded, size, e);
-        //for any failure, increment the error count
-        errorCount++;
-        //if it fails for the same packet for MAX_RETRIES fail and come out
-        if (errorCount > MAX_RETRIES) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Failed to fetch file: " + fileName +
-                  " (downloaded " + bytesDownloaded + " of " + size + " bytes" +
-                  ", error count: " + errorCount + " > " + MAX_RETRIES + ")", e);
-        }
-        return ERR;
-      }
-    }
-
-    /**
-     * The webcontainer flushes the data only after it fills the buffer size. So, all data has to be read as readFully()
-     * other wise it fails. So read everything as bytes and then extract an integer out of it
-     */
-    private int readInt(byte[] b) {
-      return (((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16)
-          | ((b[2] & 0xff) << 8) | (b[3] & 0xff));
-
-    }
-
-    /**
-     * Same as above but to read longs from a byte array
-     */
-    private long readLong(byte[] b) {
-      return (((long) (b[0] & 0xff)) << 56) | (((long) (b[1] & 0xff)) << 48)
-          | (((long) (b[2] & 0xff)) << 40) | (((long) (b[3] & 0xff)) << 32)
-          | (((long) (b[4] & 0xff)) << 24) | ((b[5] & 0xff) << 16)
-          | ((b[6] & 0xff) << 8) | ((b[7] & 0xff));
-
-    }
-
-    /**
-     * cleanup everything
-     */
-    private void cleanup() {
-      try {
-        file.close();
-      } catch (Exception e) {/* no-op */
-        log.error("Error closing file: {}", this.saveAs, e);
-      }
-      if (bytesDownloaded != size) {
-        //if the download is not complete then
-        //delete the file being downloaded
-        try {
-          file.delete();
-        } catch (Exception e) {
-          log.error("Error deleting file: {}", this.saveAs, e);
-        }
-        //if the failure is due to a user abort it is returned normally else an exception is thrown
-        if (!aborted)
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Unable to download " + fileName + " completely. Downloaded "
-                  + bytesDownloaded + "!=" + size);
-      }
-    }
-
-    /**
-     * Open a new stream using HttpClient
-     */
-    private FastInputStream getStream() throws IOException {
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-
-//    //the method is command=filecontent
-      params.set(COMMAND, CMD_GET_FILE);
-      params.set(GENERATION, Long.toString(indexGen));
-      params.set(CommonParams.QT, ReplicationHandler.PATH);
-      //add the version to download. This is used to reserve the download
-      params.set(solrParamOutput, fileName);
-      if (useInternalCompression) {
-        params.set(COMPRESSION, "true");
-      }
-      //use checksum
-      if (this.includeChecksum) {
-        params.set(CHECKSUM, true);
-      }
-      //wt=filestream this is a custom protocol
-      params.set(CommonParams.WT, FILE_STREAM);
-      // This happen if there is a failure there is a retry. the offset=<sizedownloaded> ensures that
-      // the server starts from the offset
-      if (bytesDownloaded > 0) {
-        params.set(OFFSET, Long.toString(bytesDownloaded));
-      }
-
-
-      NamedList response;
-      InputStream is = null;
-
-      // TODO use shardhandler
-      try (HttpSolrClient client = new Builder(masterUrl)
-          .withHttpClient(myHttpClient)
-          .withResponseParser(null)
-          .withConnectionTimeout(connTimeout)
-          .withSocketTimeout(soTimeout)
-          .build()) {
-        QueryRequest req = new QueryRequest(params);
-        response = client.request(req);
-        is = (InputStream) response.get("stream");
-        if(useInternalCompression) {
-          is = new InflaterInputStream(is);
-        }
-        return new FastInputStream(is);
-      } catch (Exception e) {
-        //close stream on error
-        org.apache.commons.io.IOUtils.closeQuietly(is);
-        throw new IOException("Could not download file '" + fileName + "'", e);
-      }
-    }
-  }
-
-  private static class DirectoryFile implements FileInterface {
-    private final String saveAs;
-    private Directory copy2Dir;
-    private IndexOutput outStream;
-
-    DirectoryFile(Directory tmpIndexDir, String saveAs) throws IOException {
-      this.saveAs = saveAs;
-      this.copy2Dir = tmpIndexDir;
-      outStream = copy2Dir.createOutput(this.saveAs, DirectoryFactory.IOCONTEXT_NO_CACHE);
-    }
-
-    public void sync() throws IOException {
-      copy2Dir.sync(Collections.singleton(saveAs));
-    }
-
-    public void write(byte[] buf, int packetSize) throws IOException {
-      outStream.writeBytes(buf, 0, packetSize);
-    }
-
-    public void close() throws Exception {
-      outStream.close();
-    }
-
-    public void delete() throws Exception {
-      copy2Dir.deleteFile(saveAs);
-    }
-  }
-
-  private class DirectoryFileFetcher extends FileFetcher {
-    DirectoryFileFetcher(Directory tmpIndexDir, Map<String, Object> fileDetails, String saveAs,
-                         String solrParamOutput, long latestGen) throws IOException {
-      super(new DirectoryFile(tmpIndexDir, saveAs), fileDetails, saveAs, solrParamOutput, latestGen);
-    }
-  }
-
-  private static class LocalFsFile implements FileInterface {
-    private File copy2Dir;
-
-    FileChannel fileChannel;
-    private FileOutputStream fileOutputStream;
-    File file;
-
-    LocalFsFile(File dir, String saveAs) throws IOException {
-      this.copy2Dir = dir;
-
-      this.file = new File(copy2Dir, saveAs);
-
-      File parentDir = this.file.getParentFile();
-      if( ! parentDir.exists() ){
-        if ( ! parentDir.mkdirs() ) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Failed to create (sub)directory for file: " + saveAs);
-        }
-      }
-
-      this.fileOutputStream = new FileOutputStream(file);
-      this.fileChannel = this.fileOutputStream.getChannel();
-    }
-
-    public void sync() throws IOException {
-      FileUtils.sync(file);
-    }
-
-    public void write(byte[] buf, int packetSize) throws IOException {
-      fileChannel.write(ByteBuffer.wrap(buf, 0, packetSize));
-    }
-
-    public void close() throws Exception {
-      //close the FileOutputStream (which also closes the Channel)
-      fileOutputStream.close();
-    }
-
-    public void delete() throws Exception {
-      Files.delete(file.toPath());
-    }
-  }
-
-  private class LocalFsFileFetcher extends FileFetcher {
-    LocalFsFileFetcher(File dir, Map<String, Object> fileDetails, String saveAs,
-                       String solrParamOutput, long latestGen) throws IOException {
-      super(new LocalFsFile(dir, saveAs), fileDetails, saveAs, solrParamOutput, latestGen);
-    }
-  }
-
-  NamedList getDetails() throws IOException, SolrServerException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(COMMAND, CMD_DETAILS);
-    params.set("slave", false);
-    params.set(CommonParams.QT, ReplicationHandler.PATH);
-
-    // TODO use shardhandler
-    try (HttpSolrClient client = new HttpSolrClient.Builder(masterUrl)
-        .withHttpClient(myHttpClient)
-        .withConnectionTimeout(connTimeout)
-        .withSocketTimeout(soTimeout)
-        .build()) {
-      QueryRequest request = new QueryRequest(params);
-      return client.request(request);
-    }
-  }
-
-  public void destroy() {
-    abortFetch();
-    HttpClientUtil.close(myHttpClient);
-  }
-
-  String getMasterUrl() {
-    return masterUrl;
-  }
-
-  private static final int MAX_RETRIES = 5;
-
-  private static final int NO_CONTENT = 1;
-
-  private static final int ERR = 2;
-
-  public static final String REPLICATION_PROPERTIES = "replication.properties";
-
-  static final String INDEX_REPLICATED_AT = "indexReplicatedAt";
-
-  static final String TIMES_INDEX_REPLICATED = "timesIndexReplicated";
-
-  static final String CONF_FILES_REPLICATED = "confFilesReplicated";
-
-  static final String CONF_FILES_REPLICATED_AT = "confFilesReplicatedAt";
-
-  static final String TIMES_CONFIG_REPLICATED = "timesConfigReplicated";
-
-  static final String LAST_CYCLE_BYTES_DOWNLOADED = "lastCycleBytesDownloaded";
-
-  static final String TIMES_FAILED = "timesFailed";
-
-  static final String REPLICATION_FAILED_AT = "replicationFailedAt";
-
-  static final String PREVIOUS_CYCLE_TIME_TAKEN = "previousCycleTimeInSeconds";
-
-  static final String INDEX_REPLICATED_AT_LIST = "indexReplicatedAtList";
-
-  static final String REPLICATION_FAILED_AT_LIST = "replicationFailedAtList";
-}


[02/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java b/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java
deleted file mode 100644
index 1f9e2d5..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.grouping.SearchGroup;
-import org.apache.lucene.search.grouping.TopGroups;
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestInfo;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.CursorMark;
-import org.apache.solr.search.DocListAndSet;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.QueryCommand;
-import org.apache.solr.search.QueryResult;
-import org.apache.solr.search.SortSpec;
-import org.apache.solr.search.RankQuery;
-import org.apache.solr.search.grouping.GroupingSpecification;
-import org.apache.solr.search.grouping.distributed.command.QueryCommandResult;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * This class is experimental and will be changing in the future.
- *
- *
- * @since solr 1.3
- */
-public class ResponseBuilder
-{
-  public SolrQueryRequest req;
-  public SolrQueryResponse rsp;
-  public boolean doHighlights;
-  public boolean doFacets;
-  public boolean doExpand;
-  public boolean doStats;
-  public boolean doTerms;
-  public boolean doAnalytics;
-  public MergeStrategy mergeFieldHandler;
-
-  private boolean needDocList = false;
-  private boolean needDocSet = false;
-  private int fieldFlags = 0;
-  //private boolean debug = false;
-  private boolean debugTimings, debugQuery, debugResults, debugTrack;
-
-  private QParser qparser = null;
-  private String queryString = null;
-  private Query query = null;
-  private List<Query> filters = null;
-  private SortSpec sortSpec = null;
-  private GroupingSpecification groupingSpec;
-  private CursorMark cursorMark;
-  private CursorMark nextCursorMark;
-
-  private List<MergeStrategy> mergeStrategies;
-  private RankQuery rankQuery;
-
-
-  private DocListAndSet results = null;
-  private NamedList<Object> debugInfo = null;
-  private RTimer timer = null;
-
-  private Query highlightQuery = null;
-
-  public List<SearchComponent> components;
-
-  SolrRequestInfo requestInfo;
-
-  public ResponseBuilder(SolrQueryRequest req, SolrQueryResponse rsp, List<SearchComponent> components)
-  {
-    this.req = req;
-    this.rsp = rsp;
-    this.components = components;
-    this.requestInfo = SolrRequestInfo.getRequestInfo();
-  }
-
-  //////////////////////////////////////////////////////////
-  //////////////////////////////////////////////////////////
-  //// Distributed Search section
-  //////////////////////////////////////////////////////////
-  //////////////////////////////////////////////////////////
-
-  public static final String FIELD_SORT_VALUES = "fsv";
-  public static final String SHARDS = "shards";
-  public static final String IDS = "ids";
-
-  /**
-   * public static final String NUMDOCS = "nd";
-   * public static final String DOCFREQS = "tdf";
-   * public static final String TERMS = "terms";
-   * public static final String EXTRACT_QUERY_TERMS = "eqt";
-   * public static final String LOCAL_SHARD = "local";
-   * public static final String DOC_QUERY = "dq";
-   * *
-   */
-
-  public static int STAGE_START = 0;
-  public static int STAGE_PARSE_QUERY = 1000;
-  public static int STAGE_TOP_GROUPS = 1500;
-  public static int STAGE_EXECUTE_QUERY = 2000;
-  public static int STAGE_GET_FIELDS = 3000;
-  public static int STAGE_DONE = Integer.MAX_VALUE;
-
-  public int stage;  // What stage is this current request at?
-
-  //The address of the Shard
-  boolean isDistrib; // is this a distributed search?
-  public String[] shards;
-  public String[] slices; // the optional logical ids of the shards
-  public int shards_rows = -1;
-  public int shards_start = -1;
-  public List<ShardRequest> outgoing;  // requests to be sent
-  public List<ShardRequest> finished;  // requests that have received responses from all shards
-  public String shortCircuitedURL;
-
-  /**
-   * This function will return true if this was a distributed search request.
-   */
-  public boolean isDistributed() {
-    return this.isDistrib;
-  }
-
-  public int getShardNum(String shard) {
-    for (int i = 0; i < shards.length; i++) {
-      if (shards[i] == shard || shards[i].equals(shard)) return i;
-    }
-    return -1;
-  }
-
-  public void addRequest(SearchComponent me, ShardRequest sreq) {
-    outgoing.add(sreq);
-    if ((sreq.purpose & ShardRequest.PURPOSE_PRIVATE) == 0) {
-      // if this isn't a private request, let other components modify it.
-      for (SearchComponent component : components) {
-        if (component != me) {
-          component.modifyRequest(this, me, sreq);
-        }
-      }
-    }
-  }
-
-  public GlobalCollectionStat globalCollectionStat;
-
-  public Map<Object, ShardDoc> resultIds;
-  // Maps uniqueKeyValue to ShardDoc, which may be used to
-  // determine order of the doc or uniqueKey in the final
-  // returned sequence.
-  // Only valid after STAGE_EXECUTE_QUERY has completed.
-
-  public boolean onePassDistributedQuery;
-
-  public FacetComponent.FacetInfo _facetInfo;
-  /* private... components that don't own these shouldn't use them */
-  SolrDocumentList _responseDocs;
-  StatsInfo _statsInfo;
-  TermsComponent.TermsHelper _termsHelper;
-  SimpleOrderedMap<List<NamedList<Object>>> _pivots;
-  Object _analyticsRequestManager;
-  boolean _isOlapAnalytics;
-
-  // Context fields for grouping
-  public final Map<String, Collection<SearchGroup<BytesRef>>> mergedSearchGroups = new HashMap<>();
-  public final Map<String, Integer> mergedGroupCounts = new HashMap<>();
-  public final Map<String, Map<SearchGroup<BytesRef>, Set<String>>> searchGroupToShards = new HashMap<>();
-  public final Map<String, TopGroups<BytesRef>> mergedTopGroups = new HashMap<>();
-  public final Map<String, QueryCommandResult> mergedQueryCommandResults = new HashMap<>();
-  public final Map<Object, SolrDocument> retrievedDocuments = new HashMap<>();
-  public int totalHitCount; // Hit count used when distributed grouping is performed.
-  // Used for timeAllowed parameter. First phase elapsed time is subtracted from the time allowed for the second phase.
-  public int firstPhaseElapsedTime;
-
-  /**
-   * Utility function to add debugging info.  This will make sure a valid
-   * debugInfo exists before adding to it.
-   */
-  public void addDebugInfo( String name, Object val )
-  {
-    if( debugInfo == null ) {
-      debugInfo = new SimpleOrderedMap<>();
-    }
-    debugInfo.add( name, val );
-  }
-
-  public void addDebug(Object val, String... path) {
-    if( debugInfo == null ) {
-      debugInfo = new SimpleOrderedMap<>();
-    }
-
-    NamedList<Object> target = debugInfo;
-    for (int i=0; i<path.length-1; i++) {
-      String elem = path[i];
-      NamedList<Object> newTarget = (NamedList<Object>)debugInfo.get(elem);
-      if (newTarget == null) {
-        newTarget = new SimpleOrderedMap<>();
-        target.add(elem, newTarget);
-      }
-      target = newTarget;
-    }
-
-    target.add(path[path.length-1], val);
-  }
-
-  //-------------------------------------------------------------------------
-  //-------------------------------------------------------------------------
-
-  public boolean isDebug() {
-    return debugQuery || debugTimings || debugResults || debugTrack;
-  }
-
-  /**
-   *
-   * @return true if all debugging options are on
-   */
-  public boolean isDebugAll(){
-    return debugQuery && debugTimings && debugResults && debugTrack;
-  }
-
-  public void setDebug(boolean dbg){
-    debugQuery = dbg;
-    debugTimings = dbg;
-    debugResults = dbg;
-    debugTrack = dbg;
-  }
-
-  public void addMergeStrategy(MergeStrategy mergeStrategy) {
-    if(mergeStrategies == null) {
-      mergeStrategies = new ArrayList();
-    }
-
-    mergeStrategies.add(mergeStrategy);
-  }
-
-  public List<MergeStrategy> getMergeStrategies() {
-    return this.mergeStrategies;
-  }
-
-  public RankQuery getRankQuery() {
-    return rankQuery;
-  }
-
-  public void setRankQuery(RankQuery rankQuery) {
-    this.rankQuery = rankQuery;
-  }
-
-  public void setResponseDocs(SolrDocumentList _responseDocs) {
-    this._responseDocs = _responseDocs;
-  }
-  
-  public SolrDocumentList getResponseDocs() {
-    return this._responseDocs;
-  }
-
-  public boolean isDebugTrack() {
-    return debugTrack;
-  }
-
-  public void setDebugTrack(boolean debugTrack) {
-    this.debugTrack = debugTrack;
-  }
-
-  public boolean isDebugTimings() {
-    return debugTimings;
-  }
-
-  public void setDebugTimings(boolean debugTimings) {
-    this.debugTimings = debugTimings;
-  }
-
-  public boolean isDebugQuery() {
-    return debugQuery;
-  }
-
-  public void setDebugQuery(boolean debugQuery) {
-    this.debugQuery = debugQuery;
-  }
-
-  public boolean isDebugResults() {
-    return debugResults;
-  }
-
-  public void setDebugResults(boolean debugResults) {
-    this.debugResults = debugResults;
-  }
-
-  public NamedList<Object> getDebugInfo() {
-    return debugInfo;
-  }
-
-  public void setDebugInfo(NamedList<Object> debugInfo) {
-    this.debugInfo = debugInfo;
-  }
-
-  public int getFieldFlags() {
-    return fieldFlags;
-  }
-
-  public void setFieldFlags(int fieldFlags) {
-    this.fieldFlags = fieldFlags;
-  }
-
-  public List<Query> getFilters() {
-    return filters;
-  }
-
-  public void setFilters(List<Query> filters) {
-    this.filters = filters;
-  }
-
-  public Query getHighlightQuery() {
-    return highlightQuery;
-  }
-
-  public void setHighlightQuery(Query highlightQuery) {
-    this.highlightQuery = highlightQuery;
-  }
-
-  public boolean isNeedDocList() {
-    return needDocList;
-  }
-
-  public void setNeedDocList(boolean needDocList) {
-    this.needDocList = needDocList;
-  }
-
-  public boolean isNeedDocSet() {
-    return needDocSet;
-  }
-
-  public void setNeedDocSet(boolean needDocSet) {
-    this.needDocSet = needDocSet;
-  }
-
-  public QParser getQparser() {
-    return qparser;
-  }
-
-  public void setQparser(QParser qparser) {
-    this.qparser = qparser;
-  }
-
-  public String getQueryString() {
-    return queryString;
-  }
-
-  public void setQueryString(String qstr) {
-    this.queryString = qstr;
-  }
-
-  public Query getQuery() {
-    return query;
-  }
-
-  public void setQuery(Query query) {
-    this.query = query;
-  }
-
-  public DocListAndSet getResults() {
-    return results;
-  }
-
-  public void setResults(DocListAndSet results) {
-    this.results = results;
-  }
-
-  public SortSpec getSortSpec() {
-    return sortSpec;
-  }
-
-  public void setSortSpec(SortSpec sortSpec) {
-    this.sortSpec = sortSpec;
-  }
-
-  public GroupingSpecification getGroupingSpec() {
-    return groupingSpec;
-  }
-
-  public void setGroupingSpec(GroupingSpecification groupingSpec) {
-    this.groupingSpec = groupingSpec;
-  }
-
-  public boolean grouping() {
-    return groupingSpec != null;
-  }
-
-  public RTimer getTimer() {
-    return timer;
-  }
-
-  public void setTimer(RTimer timer) {
-    this.timer = timer;
-  }
-
-
-  public static class GlobalCollectionStat {
-    public final long numDocs;
-
-    public final Map<String, Long> dfMap;
-
-    public GlobalCollectionStat(int numDocs, Map<String, Long> dfMap) {
-      this.numDocs = numDocs;
-      this.dfMap = dfMap;
-    }
-  }
-
-  /**
-   * Creates a SolrIndexSearcher.QueryCommand from this
-   * ResponseBuilder.  TimeAllowed is left unset.
-   */
-  public QueryCommand createQueryCommand() {
-    QueryCommand cmd = new QueryCommand();
-    cmd.setQuery(wrap(getQuery()))
-            .setFilterList(getFilters())
-            .setSort(getSortSpec().getSort())
-            .setOffset(getSortSpec().getOffset())
-            .setLen(getSortSpec().getCount())
-            .setFlags(getFieldFlags())
-            .setNeedDocSet(isNeedDocSet())
-            .setCursorMark(getCursorMark());
-    return cmd;
-  }
-
-  /** Calls {@link RankQuery#wrap(Query)} if there's a rank query, otherwise just returns the query. */
-  public Query wrap(Query q) {
-    if(this.rankQuery != null) {
-      return this.rankQuery.wrap(q);
-    } else {
-      return q;
-    }
-  }
-
-  /**
-   * Sets results from a SolrIndexSearcher.QueryResult.
-   */
-  public void setResult(QueryResult result) {
-    setResults(result.getDocListAndSet());
-    if (result.isPartialResults()) {
-      rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
-    }
-    final Boolean segmentTerminatedEarly = result.getSegmentTerminatedEarly();
-    if (segmentTerminatedEarly != null) {
-      rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, segmentTerminatedEarly);
-    }
-    if (null != cursorMark) {
-      assert null != result.getNextCursorMark() : "using cursor but no next cursor set";
-      this.setNextCursorMark(result.getNextCursorMark());
-    }
-  }
-  
-  public long getNumberDocumentsFound() {
-    if (_responseDocs == null) {
-      return 0;
-    }
-    return _responseDocs.getNumFound();
-  }
-
-  public CursorMark getCursorMark() {
-    return cursorMark;
-  }
-  public void setCursorMark(CursorMark cursorMark) {
-    this.cursorMark = cursorMark;
-  }
-
-  public CursorMark getNextCursorMark() {
-    return nextCursorMark;
-  }
-  public void setNextCursorMark(CursorMark nextCursorMark) {
-    this.nextCursorMark = nextCursorMark;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ResponseLogComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ResponseLogComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ResponseLogComponent.java
deleted file mode 100644
index 9bbfb81..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ResponseLogComponent.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Set;
-
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.response.ResultContext;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.SolrIndexSearcher;
-
-/**
- * Adds to the log file the document IDs that are sent in the query response.
- * If document scores are available in the response (by adding the pseudo-
- * column 'score' to the field list) then each document ID will be followed
- * by its score, as in:
- * <pre>
- * "... hits=55 responseLog=22:0.71231794,44:0.61231794 status=0 ..."
- * </pre>
- * 
- * Add it to a requestHandler in solrconfig.xml like this:
- * <pre class="prettyprint">
- * &lt;searchComponent name="responselog" class="solr.ResponseLogComponent"/&gt;
- * 
- * &lt;requestHandler name="/select" class="solr.SearchHandler"&gt;
- *   &lt;lst name="defaults"&gt;
- *   
- *     ...
- *     
- *   &lt;/lst&gt;
- *   &lt;arr name="components"&gt;
- *     &lt;str&gt;responselog&lt;/str&gt;
- *   &lt;/arr&gt;
- * &lt;/requestHandler&gt;</pre>
- *  
- *  It can then be enabled at query time by supplying <pre>responseLog=true</pre>
- *  query parameter.
- */
-public class ResponseLogComponent extends SearchComponent {
-
-  public static final String COMPONENT_NAME = "responseLog";
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {}
-
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) return;
-    
-    SolrIndexSearcher searcher = rb.req.getSearcher();
-    IndexSchema schema = searcher.getSchema();
-    if (schema.getUniqueKeyField() == null) return;
-
-    ResultContext rc = (ResultContext) rb.rsp.getResponse();
-
-    DocList docs = rc.getDocList();
-    if (docs.hasScores()) {
-      processScores(rb, docs, schema, searcher);
-    } else {
-      processIds(rb, docs, schema, searcher);
-    }
-  }
-
-  protected void processIds(ResponseBuilder rb, DocList dl, IndexSchema schema,
-      SolrIndexSearcher searcher) throws IOException {
-    
-    StringBuilder sb = new StringBuilder();
-
-    Set<String> fields = Collections.singleton(schema.getUniqueKeyField().getName());
-    for(DocIterator iter = dl.iterator(); iter.hasNext();) {
-
-      sb.append(schema.printableUniqueKey(searcher.doc(iter.nextDoc(), fields)))
-        .append(',');
-    }
-    if (sb.length() > 0) {
-      rb.rsp.addToLog("responseLog", sb.substring(0, sb.length() - 1));
-    }  
-  }
-  
-  protected void processScores(ResponseBuilder rb, DocList dl, IndexSchema schema,
-      SolrIndexSearcher searcher) throws IOException {
-    
-    StringBuilder sb = new StringBuilder();
-    Set<String> fields = Collections.singleton(schema.getUniqueKeyField().getName());
-    for(DocIterator iter = dl.iterator(); iter.hasNext();) {
-      sb.append(schema.printableUniqueKey(searcher.doc(iter.nextDoc(), fields)))
-        .append(':')
-        .append(iter.score())
-        .append(',');
-    }
-    if (sb.length() > 0) {
-      rb.rsp.addToLog("responseLog", sb.substring(0, sb.length() - 1));
-    }  
-  }
-  
-  @Override
-  public String getDescription() {
-    return "A component that inserts the retrieved documents (and optionally scores) into the response log entry";
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
deleted file mode 100644
index d923306..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import com.codahale.metrics.MetricRegistry;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.search.facet.FacetModule;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-
-/**
- * TODO!
- * 
- *
- * @since solr 1.3
- */
-public abstract class SearchComponent implements SolrInfoBean, NamedListInitializedPlugin
-{
-  /**
-   * The name given to this component in solrconfig.xml file
-   */
-  private String name = this.getClass().getName();
-
-  protected Set<String> metricNames = ConcurrentHashMap.newKeySet();
-  protected MetricRegistry registry;
-
-  /**
-   * Prepare the response.  Guaranteed to be called before any SearchComponent {@link #process(org.apache.solr.handler.component.ResponseBuilder)} method.
-   * Called for every incoming request.
-   *
-   * The place to do initialization that is request dependent.
-   * @param rb The {@link org.apache.solr.handler.component.ResponseBuilder}
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract void prepare(ResponseBuilder rb) throws IOException;
-
-  /**
-   * Process the request for this component 
-   * @param rb The {@link ResponseBuilder}
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract void process(ResponseBuilder rb) throws IOException;
-
-  /**
-   * Process for a distributed search.
-   * @return the next stage for this component
-   */
-  public int distributedProcess(ResponseBuilder rb) throws IOException {
-    return ResponseBuilder.STAGE_DONE;
-  }
-
-  /** Called after another component adds a request */
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-  }
-
-  /** Called after all responses for a single request were received */
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-  }
-
-  /** Called after all responses have been received for this stage.
-   * Useful when different requests are sent to each shard.
-   */
-  public void finishStage(ResponseBuilder rb) {
-  }
-  
-  /**
-   * Sets the name of the SearchComponent. The name of the component is usually
-   * the name defined for it in the configuration.
-   */
-  public void setName(String name) {
-    this.name = name;
-  }
-
-
-  //////////////////////// NamedListInitializedPlugin methods //////////////////////
-  @Override
-  public void init( NamedList args )
-  {
-    // By default do nothing
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public abstract String getDescription();
-
-  @Override
-  public Category getCategory() {
-    return Category.OTHER;
-  }
-
-  @Override
-  public Set<String> getMetricNames() {
-    return metricNames;
-  }
-
-  @Override
-  public MetricRegistry getMetricRegistry() {
-    return registry;
-  }
-
-  public static final Map<String, Class<? extends SearchComponent>> standard_components;
-
-
-  static {
-    HashMap<String, Class<? extends SearchComponent>> map = new HashMap<>();
-    map.put(HighlightComponent.COMPONENT_NAME, HighlightComponent.class);
-    map.put(QueryComponent.COMPONENT_NAME, QueryComponent.class);
-    map.put(FacetComponent.COMPONENT_NAME, FacetComponent.class);
-    map.put(FacetModule.COMPONENT_NAME, FacetModule.class);
-    map.put(MoreLikeThisComponent.COMPONENT_NAME, MoreLikeThisComponent.class);
-    map.put(StatsComponent.COMPONENT_NAME, StatsComponent.class);
-    map.put(DebugComponent.COMPONENT_NAME, DebugComponent.class);
-    map.put(RealTimeGetComponent.COMPONENT_NAME, RealTimeGetComponent.class);
-    map.put(ExpandComponent.COMPONENT_NAME, ExpandComponent.class);
-    map.put(TermsComponent.COMPONENT_NAME, TermsComponent.class);
-
-    standard_components = Collections.unmodifiableMap(map);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
deleted file mode 100644
index d4c680c..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.lucene.index.ExitableDirectoryReader;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.CloseHook;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.SolrQueryTimeoutImpl;
-import org.apache.solr.search.facet.FacetModule;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.RTimerTree;
-import org.apache.solr.util.SolrPluginUtils;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-import static org.apache.solr.common.params.CommonParams.PATH;
-
-
-/**
- *
- * Refer SOLR-281
- *
- */
-public class SearchHandler extends RequestHandlerBase implements SolrCoreAware , PluginInfoInitialized, PermissionNameProvider {
-  static final String INIT_COMPONENTS = "components";
-  static final String INIT_FIRST_COMPONENTS = "first-components";
-  static final String INIT_LAST_COMPONENTS = "last-components";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected volatile List<SearchComponent> components;
-  private ShardHandlerFactory shardHandlerFactory ;
-  private PluginInfo shfInfo;
-  private SolrCore core;
-
-  protected List<String> getDefaultComponents()
-  {
-    ArrayList<String> names = new ArrayList<>(8);
-    names.add( QueryComponent.COMPONENT_NAME );
-    names.add( FacetComponent.COMPONENT_NAME );
-    names.add( FacetModule.COMPONENT_NAME );
-    names.add( MoreLikeThisComponent.COMPONENT_NAME );
-    names.add( HighlightComponent.COMPONENT_NAME );
-    names.add( StatsComponent.COMPONENT_NAME );
-    names.add( DebugComponent.COMPONENT_NAME );
-    names.add( ExpandComponent.COMPONENT_NAME);
-    names.add( TermsComponent.COMPONENT_NAME);
-
-    return names;
-  }
-
-  @Override
-  public void init(PluginInfo info) {
-    init(info.initArgs);
-    for (PluginInfo child : info.children) {
-      if("shardHandlerFactory".equals(child.type)){
-        this.shfInfo = child;
-        break;
-      }
-    }
-  }
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext ctx) {
-    return PermissionNameProvider.Name.READ_PERM;
-  }
-
-  /**
-   * Initialize the components based on name.  Note, if using <code>INIT_FIRST_COMPONENTS</code> or <code>INIT_LAST_COMPONENTS</code>,
-   * then the {@link DebugComponent} will always occur last.  If this is not desired, then one must explicitly declare all components using
-   * the <code>INIT_COMPONENTS</code> syntax.
-   */
-  @Override
-  @SuppressWarnings("unchecked")
-  public void inform(SolrCore core)
-  {
-    this.core = core;
-    Set<String> missing = new HashSet<>();
-    List<String> c = (List<String>) initArgs.get(INIT_COMPONENTS);
-    missing.addAll(core.getSearchComponents().checkContains(c));
-    List<String> first = (List<String>) initArgs.get(INIT_FIRST_COMPONENTS);
-    missing.addAll(core.getSearchComponents().checkContains(first));
-    List<String> last = (List<String>) initArgs.get(INIT_LAST_COMPONENTS);
-    missing.addAll(core.getSearchComponents().checkContains(last));
-    if (!missing.isEmpty()) throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-        "Missing SearchComponents named : " + missing);
-    if (c != null && (first != null || last != null)) throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-        "First/Last components only valid if you do not declare 'components'");
-
-    if (shfInfo == null) {
-      shardHandlerFactory = core.getCoreContainer().getShardHandlerFactory();
-    } else {
-      shardHandlerFactory = core.createInitInstance(shfInfo, ShardHandlerFactory.class, null, null);
-      core.addCloseHook(new CloseHook() {
-        @Override
-        public void preClose(SolrCore core) {
-          shardHandlerFactory.close();
-        }
-
-        @Override
-        public void postClose(SolrCore core) {
-        }
-      });
-    }
-
-  }
-
-  private void initComponents() {
-    Object declaredComponents = initArgs.get(INIT_COMPONENTS);
-    List<String> first = (List<String>) initArgs.get(INIT_FIRST_COMPONENTS);
-    List<String> last  = (List<String>) initArgs.get(INIT_LAST_COMPONENTS);
-
-    List<String> list = null;
-    boolean makeDebugLast = true;
-    if( declaredComponents == null ) {
-      // Use the default component list
-      list = getDefaultComponents();
-
-      if( first != null ) {
-        List<String> clist = first;
-        clist.addAll( list );
-        list = clist;
-      }
-
-      if( last != null ) {
-        list.addAll( last );
-      }
-    }
-    else {
-      list = (List<String>)declaredComponents;
-      if( first != null || last != null ) {
-        throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-            "First/Last components only valid if you do not declare 'components'");
-      }
-      makeDebugLast = false;
-    }
-
-    // Build the component list
-    List<SearchComponent> components = new ArrayList<>(list.size());
-    DebugComponent dbgCmp = null;
-    for(String c : list){
-      SearchComponent comp = core.getSearchComponent( c );
-      if (comp instanceof DebugComponent && makeDebugLast == true){
-        dbgCmp = (DebugComponent) comp;
-      } else {
-        components.add(comp);
-        log.debug("Adding  component:{}", comp);
-      }
-    }
-    if (makeDebugLast == true && dbgCmp != null){
-      components.add(dbgCmp);
-      log.debug("Adding  debug component:{}", dbgCmp);
-    }
-    this.components = components;
-  }
-
-  public List<SearchComponent> getComponents() {
-    List<SearchComponent> result = components;  // volatile read
-    if (result == null) {
-      synchronized (this) {
-        if (components == null) {
-          initComponents();
-        }
-        result = components;
-      }
-    }
-    return result;
-  }
-
-  private ShardHandler getAndPrepShardHandler(SolrQueryRequest req, ResponseBuilder rb) {
-    ShardHandler shardHandler = null;
-
-    CoreContainer cc = req.getCore().getCoreContainer();
-    boolean isZkAware = cc.isZooKeeperAware();
-    rb.isDistrib = req.getParams().getBool(DISTRIB, isZkAware);
-    if (!rb.isDistrib) {
-      // for back compat, a shards param with URLs like localhost:8983/solr will mean that this
-      // search is distributed.
-      final String shards = req.getParams().get(ShardParams.SHARDS);
-      rb.isDistrib = ((shards != null) && (shards.indexOf('/') > 0));
-    }
-    
-    if (rb.isDistrib) {
-      shardHandler = shardHandlerFactory.getShardHandler();
-      shardHandler.prepDistributed(rb);
-      if (!rb.isDistrib) {
-        shardHandler = null; // request is not distributed after all and so the shard handler is not needed
-      }
-    }
-
-    if (isZkAware) {
-      String shardsTolerant = req.getParams().get(ShardParams.SHARDS_TOLERANT);
-      boolean requireZkConnected = shardsTolerant != null && shardsTolerant.equals(ShardParams.REQUIRE_ZK_CONNECTED);
-      ZkController zkController = cc.getZkController();
-      boolean zkConnected = zkController != null && ! zkController.getZkClient().getConnectionManager().isLikelyExpired();
-      if (requireZkConnected && false == zkConnected) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ZooKeeper is not connected");
-      } else {
-        NamedList<Object> headers = rb.rsp.getResponseHeader();
-        if (headers != null) {
-          headers.add("zkConnected", zkConnected);
-        }
-      }
-    }
-
-    return shardHandler;
-  }
-  
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
-  {
-    List<SearchComponent> components  = getComponents();
-    ResponseBuilder rb = new ResponseBuilder(req, rsp, components);
-    if (rb.requestInfo != null) {
-      rb.requestInfo.setResponseBuilder(rb);
-    }
-
-    boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);
-    rb.setDebug(dbg);
-    if (dbg == false){//if it's true, we are doing everything anyway.
-      SolrPluginUtils.getDebugInterests(req.getParams().getParams(CommonParams.DEBUG), rb);
-    }
-
-    final RTimerTree timer = rb.isDebug() ? req.getRequestTimer() : null;
-
-    final ShardHandler shardHandler1 = getAndPrepShardHandler(req, rb); // creates a ShardHandler object only if it's needed
-    
-    if (timer == null) {
-      // non-debugging prepare phase
-      for( SearchComponent c : components ) {
-        c.prepare(rb);
-      }
-    } else {
-      // debugging prepare phase
-      RTimerTree subt = timer.sub( "prepare" );
-      for( SearchComponent c : components ) {
-        rb.setTimer( subt.sub( c.getName() ) );
-        c.prepare(rb);
-        rb.getTimer().stop();
-      }
-      subt.stop();
-    }
-
-    if (!rb.isDistrib) {
-      // a normal non-distributed request
-
-      long timeAllowed = req.getParams().getLong(CommonParams.TIME_ALLOWED, -1L);
-      if (timeAllowed > 0L) {
-        SolrQueryTimeoutImpl.set(timeAllowed);
-      }
-      try {
-        // The semantics of debugging vs not debugging are different enough that
-        // it makes sense to have two control loops
-        if(!rb.isDebug()) {
-          // Process
-          for( SearchComponent c : components ) {
-            c.process(rb);
-          }
-        }
-        else {
-          // Process
-          RTimerTree subt = timer.sub( "process" );
-          for( SearchComponent c : components ) {
-            rb.setTimer( subt.sub( c.getName() ) );
-            c.process(rb);
-            rb.getTimer().stop();
-          }
-          subt.stop();
-
-          // add the timing info
-          if (rb.isDebugTimings()) {
-            rb.addDebugInfo("timing", timer.asNamedList() );
-          }
-        }
-      } catch (ExitableDirectoryReader.ExitingReaderException ex) {
-        log.warn( "Query: " + req.getParamString() + "; " + ex.getMessage());
-        SolrDocumentList r = (SolrDocumentList) rb.rsp.getResponse();
-        if(r == null)
-          r = new SolrDocumentList();
-        r.setNumFound(0);
-        rb.rsp.addResponse(r);
-        if(rb.isDebug()) {
-          NamedList debug = new NamedList();
-          debug.add("explain", new NamedList());
-          rb.rsp.add("debug", debug);
-        }
-        rb.rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
-      } finally {
-        SolrQueryTimeoutImpl.reset();
-      }
-    } else {
-      // a distributed request
-
-      if (rb.outgoing == null) {
-        rb.outgoing = new LinkedList<>();
-      }
-      rb.finished = new ArrayList<>();
-
-      int nextStage = 0;
-      do {
-        rb.stage = nextStage;
-        nextStage = ResponseBuilder.STAGE_DONE;
-
-        // call all components
-        for( SearchComponent c : components ) {
-          // the next stage is the minimum of what all components report
-          nextStage = Math.min(nextStage, c.distributedProcess(rb));
-        }
-
-
-        // check the outgoing queue and send requests
-        while (rb.outgoing.size() > 0) {
-
-          // submit all current request tasks at once
-          while (rb.outgoing.size() > 0) {
-            ShardRequest sreq = rb.outgoing.remove(0);
-            sreq.actualShards = sreq.shards;
-            if (sreq.actualShards==ShardRequest.ALL_SHARDS) {
-              sreq.actualShards = rb.shards;
-            }
-            sreq.responses = new ArrayList<>(sreq.actualShards.length); // presume we'll get a response from each shard we send to
-
-            // TODO: map from shard to address[]
-            for (String shard : sreq.actualShards) {
-              ModifiableSolrParams params = new ModifiableSolrParams(sreq.params);
-              params.remove(ShardParams.SHARDS);      // not a top-level request
-              params.set(DISTRIB, "false");               // not a top-level request
-              params.remove("indent");
-              params.remove(CommonParams.HEADER_ECHO_PARAMS);
-              params.set(ShardParams.IS_SHARD, true);  // a sub (shard) request
-              params.set(ShardParams.SHARDS_PURPOSE, sreq.purpose);
-              params.set(ShardParams.SHARD_URL, shard); // so the shard knows what was asked
-              if (rb.requestInfo != null) {
-                // we could try and detect when this is needed, but it could be tricky
-                params.set("NOW", Long.toString(rb.requestInfo.getNOW().getTime()));
-              }
-              String shardQt = params.get(ShardParams.SHARDS_QT);
-              if (shardQt != null) {
-                params.set(CommonParams.QT, shardQt);
-              } else {
-                // for distributed queries that don't include shards.qt, use the original path
-                // as the default but operators need to update their luceneMatchVersion to enable
-                // this behavior since it did not work this way prior to 5.1
-                String reqPath = (String) req.getContext().get(PATH);
-                if (!"/select".equals(reqPath)) {
-                  params.set(CommonParams.QT, reqPath);
-                } // else if path is /select, then the qt gets passed thru if set
-              }
-              shardHandler1.submit(sreq, shard, params);
-            }
-          }
-
-
-          // now wait for replies, but if anyone puts more requests on
-          // the outgoing queue, send them out immediately (by exiting
-          // this loop)
-          boolean tolerant = ShardParams.getShardsTolerantAsBool(rb.req.getParams());
-          while (rb.outgoing.size() == 0) {
-            ShardResponse srsp = tolerant ? 
-                shardHandler1.takeCompletedIncludingErrors():
-                shardHandler1.takeCompletedOrError();
-            if (srsp == null) break;  // no more requests to wait for
-
-            // Was there an exception?  
-            if (srsp.getException() != null) {
-              // If things are not tolerant, abort everything and rethrow
-              if(!tolerant) {
-                shardHandler1.cancelAll();
-                if (srsp.getException() instanceof SolrException) {
-                  throw (SolrException)srsp.getException();
-                } else {
-                  throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, srsp.getException());
-                }
-              } else {
-                if(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY) == null) {
-                  rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
-                }
-              }
-            }
-
-            rb.finished.add(srsp.getShardRequest());
-
-            // let the components see the responses to the request
-            for(SearchComponent c : components) {
-              c.handleResponses(rb, srsp.getShardRequest());
-            }
-          }
-        }
-
-        for(SearchComponent c : components) {
-          c.finishStage(rb);
-        }
-
-        // we are done when the next stage is MAX_VALUE
-      } while (nextStage != Integer.MAX_VALUE);
-    }
-    
-    // SOLR-5550: still provide shards.info if requested even for a short circuited distrib request
-    if(!rb.isDistrib && req.getParams().getBool(ShardParams.SHARDS_INFO, false) && rb.shortCircuitedURL != null) {  
-      NamedList<Object> shardInfo = new SimpleOrderedMap<Object>();
-      SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();        
-      if (rsp.getException() != null) {
-        Throwable cause = rsp.getException();
-        if (cause instanceof SolrServerException) {
-          cause = ((SolrServerException)cause).getRootCause();
-        } else {
-          if (cause.getCause() != null) {
-            cause = cause.getCause();
-          }          
-        }
-        nl.add("error", cause.toString() );
-        StringWriter trace = new StringWriter();
-        cause.printStackTrace(new PrintWriter(trace));
-        nl.add("trace", trace.toString() );
-      }
-      else {
-        nl.add("numFound", rb.getResults().docList.matches());
-        nl.add("maxScore", rb.getResults().docList.maxScore());
-      }
-      nl.add("shardAddress", rb.shortCircuitedURL);
-      nl.add("time", req.getRequestTimer().getTime()); // elapsed time of this request so far
-      
-      int pos = rb.shortCircuitedURL.indexOf("://");        
-      String shardInfoName = pos != -1 ? rb.shortCircuitedURL.substring(pos+3) : rb.shortCircuitedURL;
-      shardInfo.add(shardInfoName, nl);   
-      rsp.getValues().add(ShardParams.SHARDS_INFO,shardInfo);            
-    }
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("Search using components: ");
-    if( components != null ) {
-      for(SearchComponent c : components){
-        sb.append(c.getName());
-        sb.append(",");
-      }
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-}
-
-
-// TODO: generalize how a comm component can fit into search component framework
-// TODO: statics should be per-core singletons
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java b/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java
deleted file mode 100644
index 2935aa1..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.lucene.search.FieldDoc;
-import org.apache.solr.common.util.NamedList;
-
-public class ShardDoc extends FieldDoc {
-  public String shard;
-  public String shardAddress;  // TODO
-  
-  public int orderInShard;
-    // the position of this doc within the shard... this can be used
-    // to short-circuit comparisons if the shard is equal, and can
-    // also be used to break ties within the same shard.
-
-  public Object id;
-    // this is currently the uniqueKeyField but
-    // may be replaced with internal docid in a future release.
-
-  public NamedList sortFieldValues;
-  // sort field values for *all* docs in a particular shard.
-  // this doc's values are in position orderInShard
-
-  // TODO: store the SolrDocument here?
-  // Store the order in the merged list for lookup when getting stored fields?
-  // (other components need this ordering to store data in order, like highlighting)
-  // but we shouldn't expose uniqueKey (have a map by it) until the stored-field
-  // retrieval stage.
-
-  public int positionInResponse;
-  // the ordinal position in the merged response arraylist  
-
-  public ShardDoc(float score, Object[] fields, Object uniqueId, String shard) {
-      super(-1, score, fields);
-      this.id = uniqueId;
-      this.shard = shard;
-  }
-
-  public ShardDoc() {
-    super(-1, Float.NaN);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ShardDoc shardDoc = (ShardDoc) o;
-
-    if (id != null ? !id.equals(shardDoc.id) : shardDoc.id != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return id != null ? id.hashCode() : 0;
-  }
-
-  @Override
-  public String toString(){
-    return "id="+id
-            +" ,score="+score
-            +" ,shard="+shard
-            +" ,orderInShard="+orderInShard
-            +" ,positionInResponse="+positionInResponse
-            +" ,sortFieldValues="+sortFieldValues;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java b/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
deleted file mode 100644
index ef0e624..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.List;
-
-import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.util.PriorityQueue;
-import org.apache.solr.common.SolrException;
-
-import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
-
-// used by distributed search to merge results.
-public class ShardFieldSortedHitQueue extends PriorityQueue<ShardDoc> {
-
-  /** Stores a comparator corresponding to each field being sorted by */
-  protected Comparator<ShardDoc>[] comparators;
-
-  /** Stores the sort criteria being used. */
-  protected SortField[] fields;
-
-  /** The order of these fieldNames should correspond to the order of sort field values retrieved from the shard */
-  protected List<String> fieldNames = new ArrayList<>();
-
-  public ShardFieldSortedHitQueue(SortField[] fields, int size, IndexSearcher searcher) {
-    super(size);
-    final int n = fields.length;
-    //noinspection unchecked
-    comparators = new Comparator[n];
-    this.fields = new SortField[n];
-    for (int i = 0; i < n; ++i) {
-
-      // keep track of the named fields
-      SortField.Type type = fields[i].getType();
-      if (type!=SortField.Type.SCORE && type!=SortField.Type.DOC) {
-        fieldNames.add(fields[i].getField());
-      }
-
-      String fieldname = fields[i].getField();
-      comparators[i] = getCachedComparator(fields[i], searcher);
-
-     if (fields[i].getType() == SortField.Type.STRING) {
-        this.fields[i] = new SortField(fieldname, SortField.Type.STRING,
-            fields[i].getReverse());
-      } else {
-        this.fields[i] = new SortField(fieldname, fields[i].getType(),
-            fields[i].getReverse());
-      }
-
-      //System.out.println("%%%%%%%%%%%%%%%%%% got "+fields[i].getType() +"   for "+ fieldname +"  fields[i].getReverse(): "+fields[i].getReverse());
-    }
-  }
-
-  @Override
-  protected boolean lessThan(ShardDoc docA, ShardDoc docB) {
-    // If these docs are from the same shard, then the relative order
-    // is how they appeared in the response from that shard.    
-    if (docA.shard == docB.shard) {
-      // if docA has a smaller position, it should be "larger" so it
-      // comes before docB.
-      // This will handle sorting by docid within the same shard
-
-      // comment this out to test comparators.
-      return !(docA.orderInShard < docB.orderInShard);
-    }
-
-
-    // run comparators
-    final int n = comparators.length;
-    int c = 0;
-    for (int i = 0; i < n && c == 0; i++) {
-      c = (fields[i].getReverse()) ? comparators[i].compare(docB, docA)
-          : comparators[i].compare(docA, docB);
-    }
-
-    // solve tiebreaks by comparing shards (similar to using docid)
-    // smaller docid's beat larger ids, so reverse the natural ordering
-    if (c == 0) {
-      c = -docA.shard.compareTo(docB.shard);
-    }
-
-    return c < 0;
-  }
-
-  Comparator<ShardDoc> getCachedComparator(SortField sortField, IndexSearcher searcher) {
-    SortField.Type type = sortField.getType();
-    if (type == SortField.Type.SCORE) {
-      return (o1, o2) -> {
-        final float f1 = o1.score;
-        final float f2 = o2.score;
-        if (f1 < f2)
-          return -1;
-        if (f1 > f2)
-          return 1;
-        return 0;
-      };
-    } else if (type == SortField.Type.REWRITEABLE) {
-      try {
-        sortField = sortField.rewrite(searcher);
-      } catch (IOException e) {
-        throw new SolrException(SERVER_ERROR, "Exception rewriting sort field " + sortField, e);
-      }
-    }
-    return comparatorFieldComparator(sortField);
-  }
-
-  abstract class ShardComparator implements Comparator<ShardDoc> {
-    final SortField sortField;
-    final String fieldName;
-    final int fieldNum;
-
-    public ShardComparator(SortField sortField) {
-      this.sortField = sortField;
-      this.fieldName = sortField.getField();
-      int fieldNum = 0;
-      for (int i=0; i<fieldNames.size(); i++) {
-        if (fieldNames.get(i).equals(fieldName)) {
-          fieldNum = i;
-          break;
-        }
-      }
-      this.fieldNum = fieldNum;
-    }
-
-    Object sortVal(ShardDoc shardDoc) {
-      assert(shardDoc.sortFieldValues.getName(fieldNum).equals(fieldName));
-      List lst = (List)shardDoc.sortFieldValues.getVal(fieldNum);
-      return lst.get(shardDoc.orderInShard);
-    }
-  }
-
-  Comparator<ShardDoc> comparatorFieldComparator(SortField sortField) {
-    final FieldComparator fieldComparator = sortField.getComparator(0, 0);
-    return new ShardComparator(sortField) {
-      // Since the PriorityQueue keeps the biggest elements by default,
-      // we need to reverse the field compare ordering so that the
-      // smallest elements are kept instead of the largest... hence
-      // the negative sign.
-      @Override
-      public int compare(final ShardDoc o1, final ShardDoc o2) {
-        //noinspection unchecked
-        return -fieldComparator.compareValues(sortVal(o1), sortVal(o2));
-      }
-    };
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/ShardHandler.java
deleted file mode 100644
index 4c89806..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardHandler.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-import org.apache.solr.common.params.ModifiableSolrParams;
-
-public abstract class ShardHandler {
-  public abstract void prepDistributed(ResponseBuilder rb);
-  public abstract void submit(ShardRequest sreq, String shard, ModifiableSolrParams params);
-  public abstract ShardResponse takeCompletedIncludingErrors();
-  public abstract ShardResponse takeCompletedOrError();
-  public abstract void cancelAll();
-  public abstract ShardHandlerFactory getShardHandlerFactory();
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShardHandlerFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/ShardHandlerFactory.java
deleted file mode 100644
index 49b7679..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardHandlerFactory.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-
-import java.util.Collections;
-import java.util.Locale;
-
-public abstract class ShardHandlerFactory {
-
-  public abstract ShardHandler getShardHandler();
-
-  public abstract void close();
-
-  /**
-   * Create a new ShardHandlerFactory instance
-   * @param info    a PluginInfo object defining which type to create.  If null,
-   *                the default {@link HttpShardHandlerFactory} will be used
-   * @param loader  a SolrResourceLoader used to find the ShardHandlerFactory classes
-   * @return a new, initialized ShardHandlerFactory instance
-   */
-  public static ShardHandlerFactory newInstance(PluginInfo info, SolrResourceLoader loader) {
-    if (info == null)
-      info = DEFAULT_SHARDHANDLER_INFO;
-
-    try {
-      ShardHandlerFactory shf = loader.findClass(info.className, ShardHandlerFactory.class).newInstance();
-      if (PluginInfoInitialized.class.isAssignableFrom(shf.getClass()))
-        PluginInfoInitialized.class.cast(shf).init(info);
-      return shf;
-    }
-    catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          String.format(Locale.ROOT, "Error instantiating shardHandlerFactory class [%s]: %s",
-                        info.className, e.getMessage()), e);
-    }
-
-  }
-
-  public static final PluginInfo DEFAULT_SHARDHANDLER_INFO =
-      new PluginInfo("shardHandlerFactory", ImmutableMap.of("class", HttpShardHandlerFactory.class.getName()),
-          null, Collections.<PluginInfo>emptyList());
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java b/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java
deleted file mode 100644
index f7c05d2..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.solr.common.params.ModifiableSolrParams;
-
-import java.util.ArrayList;
-import java.util.List;
-
-
-// todo... when finalized make accessors
-public class ShardRequest {
-  public final static String[] ALL_SHARDS = null;
-
-  public final static int PURPOSE_PRIVATE         = 0x01;
-  public final static int PURPOSE_GET_TERM_DFS    = 0x02;
-  public final static int PURPOSE_GET_TOP_IDS     = 0x04;
-  public final static int PURPOSE_REFINE_TOP_IDS  = 0x08;
-  public final static int PURPOSE_GET_FACETS      = 0x10;
-  public final static int PURPOSE_REFINE_FACETS   = 0x20;
-  public final static int PURPOSE_GET_FIELDS      = 0x40;
-  public final static int PURPOSE_GET_HIGHLIGHTS  = 0x80;
-  public final static int PURPOSE_GET_DEBUG       =0x100;
-  public final static int PURPOSE_GET_STATS       =0x200;
-  public final static int PURPOSE_GET_TERMS       =0x400;
-  public final static int PURPOSE_GET_TOP_GROUPS  =0x800;
-  public final static int PURPOSE_GET_MLT_RESULTS =0x1000;
-  public final static int PURPOSE_REFINE_PIVOT_FACETS =0x2000;
-  public final static int PURPOSE_SET_TERM_STATS  =0x4000;
-  public final static int PURPOSE_GET_TERM_STATS  = 0x8000;
-
-  public int purpose;  // the purpose of this request
-
-  public String[] shards;  // the shards this request should be sent to, null for all
-
-  public ModifiableSolrParams params;
-
-
-  /** list of responses... filled out by framework */
-  public List<ShardResponse> responses = new ArrayList<>();
-
-  /** actual shards to send the request to, filled out by framework */
-  public String[] actualShards;
-
-  /** may be null */
-  public String nodeName;
-
-  // TODO: one could store a list of numbers to correlate where returned docs
-  // go in the top-level response rather than looking up by id...
-  // this would work well if we ever transitioned to using internal ids and
-  // didn't require a uniqueId
-
-  @Override
-  public String toString() {
-    return "ShardRequest:{params=" + params
-            + ", purpose=" + Integer.toHexString(purpose)
-            + ", nResponses =" + responses.size()
-            + "}";
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShardResponse.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardResponse.java b/solr/core/src/java/org/apache/solr/handler/component/ShardResponse.java
deleted file mode 100644
index 5da721c..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardResponse.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.common.SolrException;
-
-public final class ShardResponse {
-  private ShardRequest req;
-  private String shard;
-  private String nodeName;
-  private String shardAddress;  // the specific shard that this response was received from
-  private int rspCode;
-  private Throwable exception;
-  private SolrResponse rsp;
-
-  @Override
-  public String toString() {
-    return "ShardResponse:{shard="+shard+",shardAddress="+shardAddress
-            +"\n\trequest=" + req
-            +"\n\tresponse=" + rsp
-            + (exception==null ? "" : "\n\texception="+ SolrException.toStr(exception))
-            +"\n}";
-  }
-
-  public Throwable getException()
-  {
-    return exception;
-  }
-
-  public ShardRequest getShardRequest()
-  {
-    return req;
-  }
-
-  public SolrResponse getSolrResponse()
-  {
-    return rsp;
-  }
-
-  public String getShard()
-  {
-    return shard;
-  }
-
-  public String getNodeName()
-  {
-    return nodeName;
-  }
-  
-  public void setShardRequest(ShardRequest rsp)
-  {
-    this.req = rsp;
-  }
-
-  public void setSolrResponse(SolrResponse rsp)
-  {
-    this.rsp = rsp;
-  }
-
-  void setShard(String shard)
-  {
-    this.shard = shard;
-  }
-
-  void setException(Throwable exception)
-  {
-    this.exception = exception;
-  }
-
-  void setResponseCode(int rspCode)
-  {
-    this.rspCode = rspCode;
-  }
-  
-  void setNodeName(String nodeName) 
-  {
-    this.nodeName = nodeName;
-  }
-
-  /** What was the shard address that returned this response.  Example:  "http://localhost:8983/solr" */
-  public String getShardAddress() { return this.shardAddress; }
-
-  void setShardAddress(String addr) { this.shardAddress = addr; }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ShufflingReplicaListTransformer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShufflingReplicaListTransformer.java b/solr/core/src/java/org/apache/solr/handler/component/ShufflingReplicaListTransformer.java
deleted file mode 100644
index 428e348..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ShufflingReplicaListTransformer.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-
-class ShufflingReplicaListTransformer implements ReplicaListTransformer {
-
-  private final Random r;
-
-  public ShufflingReplicaListTransformer(Random r)
-  {
-    this.r = r;
-  }
-
-  public void transform(List<?> choices)
-  {
-    if (choices.size() > 1) {
-      Collections.shuffle(choices, r);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/SortedDateStatsValues.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SortedDateStatsValues.java b/solr/core/src/java/org/apache/solr/handler/component/SortedDateStatsValues.java
deleted file mode 100644
index 0df45c7..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/SortedDateStatsValues.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.Date;
-import java.util.Map;
-
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.common.util.NamedList;
-
-public class SortedDateStatsValues implements StatsValues {
-
-  private final DateStatsValues dsv;
-  private final String fieldName;
-  private SortedNumericDocValues sndv;
-
-
-  public SortedDateStatsValues(DateStatsValues dsv, StatsField field) {
-    this.dsv = dsv;
-    this.fieldName = field.getSchemaField().getName();
-  }
-
-  @Override
-  public void accumulate(NamedList stv) {
-    dsv.accumulate(stv);
-  }
-
-  @Override
-  public void accumulate(int docId) throws IOException {
-    if (!sndv.advanceExact(docId)) {
-      missing();
-    } else {
-      for (int i = 0 ; i < sndv.docValueCount(); i++) {
-        dsv.accumulate(new Date(sndv.nextValue()), 1);
-      }
-    }
-
-  }
-
-  @Override
-  public void accumulate(BytesRef value, int count) {
-    dsv.accumulate(value, count);
-  }
-
-  @Override
-  public void missing() {
-    dsv.missing();
-  }
-
-  @Override
-  public void addMissing(int count) {
-    dsv.addMissing(count);
-  }
-
-  @Override
-  public void addFacet(String facetName, Map<String,StatsValues> facetValues) {
-    dsv.addFacet(facetName, facetValues);
-  }
-
-  @Override
-  public NamedList<?> getStatsValues() {
-    return dsv.getStatsValues();
-  }
-
-  @Override
-  public void setNextReader(LeafReaderContext ctx) throws IOException {
-    sndv = DocValues.getSortedNumeric(ctx.reader(), fieldName);
-    assert sndv != null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/SortedNumericStatsValues.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SortedNumericStatsValues.java b/solr/core/src/java/org/apache/solr/handler/component/SortedNumericStatsValues.java
deleted file mode 100644
index 007fb28..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/SortedNumericStatsValues.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.schema.NumberType;
-
-public class SortedNumericStatsValues implements StatsValues {
-  
-  private final NumericStatsValues nsv;
-  private final String fieldName;
-  private final NumberType numberType;
-  private SortedNumericDocValues sndv;
-  
-  
-  public SortedNumericStatsValues(NumericStatsValues nsv, StatsField field) {
-    this.nsv = nsv;
-    this.fieldName = field.getSchemaField().getName();
-    this.numberType = field.getSchemaField().getType().getNumberType();
-  }
-
-  @Override
-  public void accumulate(NamedList stv) {
-    nsv.accumulate(stv);
-  }
-  
-  @Override
-  public void accumulate(int docId) throws IOException {
-    if (!sndv.advanceExact(docId)) {
-      missing();
-    } else {
-      for (int i = 0 ; i < sndv.docValueCount(); i++) {
-        nsv.accumulate(toCorrectType(sndv.nextValue()), 1);
-      }
-    }
-    
-  }
-
-  private Number toCorrectType(long value) {
-    switch (numberType) {
-      case INTEGER:
-      case LONG:
-        return value;
-      case FLOAT:
-        return NumericUtils.sortableIntToFloat((int)value);
-      case DOUBLE:
-        return NumericUtils.sortableLongToDouble(value);
-      default:
-        throw new AssertionError("Unsupported number type");
-    }
-  }
-
-  @Override
-  public void accumulate(BytesRef value, int count) {
-    nsv.accumulate(value, count);
-  }
-
-  @Override
-  public void missing() {
-    nsv.missing();
-  }
-
-  @Override
-  public void addMissing(int count) {
-    nsv.addMissing(count);
-  }
-
-  @Override
-  public void addFacet(String facetName, Map<String,StatsValues> facetValues) {
-    nsv.addFacet(facetName, facetValues);
-  }
-
-  @Override
-  public NamedList<?> getStatsValues() {
-    return nsv.getStatsValues();
-  }
-
-  @Override
-  public void setNextReader(LeafReaderContext ctx) throws IOException {
-    sndv = DocValues.getSortedNumeric(ctx.reader(), fieldName);
-    assert sndv != null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java b/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java
deleted file mode 100644
index 8814953..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.search.DocSet;
-import org.apache.solr.search.facet.FacetHeatmap;
-import org.apache.solr.search.facet.FacetMerger;
-import org.apache.solr.search.facet.FacetRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/** A 2D spatial faceting summary of a rectangular region. Used by {@link org.apache.solr.handler.component.FacetComponent}
- * and {@link org.apache.solr.request.SimpleFacets}.
- * @see FacetHeatmap
- */
-public class SpatialHeatmapFacets {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  //underneath facet_counts we put this here:
-  public static final String RESPONSE_KEY = "facet_heatmaps";
-
-  /** Called by {@link org.apache.solr.request.SimpleFacets} to compute heatmap facets. */
-  public static NamedList<Object> getHeatmapForField(String fieldKey, String fieldName, ResponseBuilder rb, SolrParams params, DocSet docSet) throws IOException {
-    final FacetRequest facetRequest = createHeatmapRequest(fieldKey, fieldName, rb, params);
-    return (NamedList) facetRequest.process(rb.req, docSet);
-  }
-
-  private static FacetRequest createHeatmapRequest(String fieldKey, String fieldName, ResponseBuilder rb, SolrParams params) {
-    Map<String, Object> jsonFacet = new HashMap<>();
-    jsonFacet.put("type", "heatmap");
-    jsonFacet.put("field", fieldName);
-    // jsonFacets has typed values, unlike SolrParams which is all string
-    jsonFacet.put(FacetHeatmap.GEOM_PARAM, params.getFieldParam(fieldKey, FacetParams.FACET_HEATMAP_GEOM));
-    jsonFacet.put(FacetHeatmap.LEVEL_PARAM, params.getFieldInt(fieldKey, FacetParams.FACET_HEATMAP_LEVEL));
-    jsonFacet.put(FacetHeatmap.DIST_ERR_PCT_PARAM, params.getFieldDouble(fieldKey, FacetParams.FACET_HEATMAP_DIST_ERR_PCT));
-    jsonFacet.put(FacetHeatmap.DIST_ERR_PARAM, params.getFieldDouble(fieldKey, FacetParams.FACET_HEATMAP_DIST_ERR));
-    jsonFacet.put(FacetHeatmap.MAX_CELLS_PARAM, params.getFieldInt(fieldKey, FacetParams.FACET_HEATMAP_MAX_CELLS));
-    jsonFacet.put(FacetHeatmap.FORMAT_PARAM, params.getFieldParam(fieldKey, FacetParams.FACET_HEATMAP_FORMAT));
-
-    return FacetRequest.parseOneFacetReq(rb.req, jsonFacet);
-  }
-
-  //
-  // Distributed Support
-  //
-
-  /** Parses request to "HeatmapFacet" instances. */
-  public static LinkedHashMap<String,HeatmapFacet> distribParse(SolrParams params, ResponseBuilder rb) {
-    final LinkedHashMap<String, HeatmapFacet> heatmapFacets = new LinkedHashMap<>();
-    final String[] heatmapFields = params.getParams(FacetParams.FACET_HEATMAP);
-    if (heatmapFields != null) {
-      for (String heatmapField : heatmapFields) {
-        HeatmapFacet facet = new HeatmapFacet(rb, heatmapField);
-        heatmapFacets.put(facet.getKey(), facet);
-      }
-    }
-    return heatmapFacets;
-  }
-
-  /** Called by FacetComponent's impl of
-   * {@link org.apache.solr.handler.component.SearchComponent#modifyRequest(ResponseBuilder, SearchComponent, ShardRequest)}. */
-  public static void distribModifyRequest(ShardRequest sreq, LinkedHashMap<String, HeatmapFacet> heatmapFacets) {
-    // Set the format to PNG because it's compressed and it's the only format we have code to read at the moment.
-    // We re-write the facet.heatmap list with PNG format in local-params where it has highest precedence.
-
-    //Remove existing heatmap field param vals; we will rewrite
-    sreq.params.remove(FacetParams.FACET_HEATMAP);
-    for (HeatmapFacet facet : heatmapFacets.values()) {
-      //add heatmap field param
-      ModifiableSolrParams newLocalParams = new ModifiableSolrParams();
-      if (facet.localParams != null) {
-        newLocalParams.add(facet.localParams);
-      }
-      // Set format to PNG; it's the only one we parse
-      newLocalParams.set(FacetParams.FACET_HEATMAP_FORMAT, FacetHeatmap.FORMAT_PNG);
-      sreq.params.add(FacetParams.FACET_HEATMAP,
-          newLocalParams.toLocalParamsString() + facet.facetOn);
-    }
-  }
-
-  /** Called by FacetComponent.countFacets which is in turn called by FC's impl of
-   * {@link org.apache.solr.handler.component.SearchComponent#handleResponses(ResponseBuilder, ShardRequest)}. */
-  @SuppressWarnings("unchecked")
-  public static void distribHandleResponse(LinkedHashMap<String, HeatmapFacet> heatmapFacets, NamedList srsp_facet_counts) {
-    NamedList<NamedList<Object>> facet_heatmaps = (NamedList<NamedList<Object>>) srsp_facet_counts.get(RESPONSE_KEY);
-    if (facet_heatmaps == null) {
-      return;
-    }
-    // (should the caller handle the above logic?  Arguably yes.)
-    for (Map.Entry<String, NamedList<Object>> entry : facet_heatmaps) {
-      String fieldKey = entry.getKey();
-      NamedList<Object> shardNamedList = entry.getValue();
-      final HeatmapFacet facet = heatmapFacets.get(fieldKey);
-      if (facet == null) {
-        log.error("received heatmap for field/key {} that we weren't expecting", fieldKey);
-        continue;
-      }
-      facet.jsonFacetMerger.merge(shardNamedList, null);//merge context not needed (null)
-    }
-  }
-
-
-  /** Called by FacetComponent's impl of
-   * {@link org.apache.solr.handler.component.SearchComponent#finishStage(ResponseBuilder)}. */
-  public static NamedList distribFinish(LinkedHashMap<String, HeatmapFacet> heatmapInfos, ResponseBuilder rb) {
-    NamedList<NamedList<Object>> result = new SimpleOrderedMap<>();
-    for (Map.Entry<String, HeatmapFacet> entry : heatmapInfos.entrySet()) {
-      final HeatmapFacet facet = entry.getValue();
-      result.add(entry.getKey(), (NamedList<Object>) facet.jsonFacetMerger.getMergedResult());
-    }
-    return result;
-  }
-
-  /** Goes in {@link org.apache.solr.handler.component.FacetComponent.FacetInfo#heatmapFacets}, created by
-   * {@link #distribParse(org.apache.solr.common.params.SolrParams, ResponseBuilder)}. */
-  public static class HeatmapFacet extends FacetComponent.FacetBase {
-    //note: 'public' following-suit with FacetBase & existing subclasses... though should this really be?
-
-    public FacetMerger jsonFacetMerger;
-
-    public HeatmapFacet(ResponseBuilder rb, String facetStr) {
-      super(rb, FacetParams.FACET_HEATMAP, facetStr);
-      //note: logic in super (FacetBase) is partially redundant with SimpleFacet.parseParams :-(
-      final SolrParams params = SolrParams.wrapDefaults(localParams, rb.req.getParams());
-      final FacetRequest heatmapRequest = createHeatmapRequest(getKey(), facetOn, rb, params);
-      jsonFacetMerger = heatmapRequest.createFacetMerger(null);
-    }
-  }
-
-  // Note: originally there was a lot more code here but it migrated to the JSON Facet API as "FacetHeatmap"
-
-}


[47/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
deleted file mode 100644
index 74781d7..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ /dev/null
@@ -1,840 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.codahale.metrics.Timer;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.impl.ClusterStateProvider;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
-import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.cloud.overseer.CollectionMutator;
-import org.apache.solr.cloud.overseer.NodeMutator;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.cloud.overseer.ReplicaMutator;
-import org.apache.solr.cloud.overseer.SliceMutator;
-import org.apache.solr.cloud.overseer.ZkStateWriter;
-import org.apache.solr.cloud.overseer.ZkWriteCommand;
-import org.apache.solr.common.SolrCloseable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.ObjectReleaseTracker;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CloudConfig;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.admin.CollectionsHandler;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.update.UpdateShardHandler;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.ID;
-
-/**
- * Cluster leader. Responsible for processing state updates, node assignments, creating/deleting
- * collections, shards, replicas and setting various properties.
- */
-public class Overseer implements SolrCloseable {
-  public static final String QUEUE_OPERATION = "operation";
-
-  // System properties are used in tests to make them run fast
-  public static final int STATE_UPDATE_DELAY = ZkStateReader.STATE_UPDATE_DELAY;
-  public static final int STATE_UPDATE_BATCH_SIZE = Integer.getInteger("solr.OverseerStateUpdateBatchSize", 10000);
-  public static final int STATE_UPDATE_MAX_QUEUE = 20000;
-
-  public static final int NUM_RESPONSES_TO_STORE = 10000;
-  public static final String OVERSEER_ELECT = "/overseer_elect";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  enum LeaderStatus {DONT_KNOW, NO, YES}
-
-  private class ClusterStateUpdater implements Runnable, Closeable {
-
-    private final ZkStateReader reader;
-    private final SolrZkClient zkClient;
-    private final String myId;
-    //queue where everybody can throw tasks
-    private final ZkDistributedQueue stateUpdateQueue;
-    //TODO remove in 9.0, we do not push message into this queue anymore
-    //Internal queue where overseer stores events that have not yet been published into cloudstate
-    //If Overseer dies while extracting the main queue a new overseer will start from this queue
-    private final ZkDistributedQueue workQueue;
-    // Internal map which holds the information about running tasks.
-    private final DistributedMap runningMap;
-    // Internal map which holds the information about successfully completed tasks.
-    private final DistributedMap completedMap;
-    // Internal map which holds the information about failed tasks.
-    private final DistributedMap failureMap;
-
-    private final Stats zkStats;
-
-    private boolean isClosed = false;
-
-    public ClusterStateUpdater(final ZkStateReader reader, final String myId, Stats zkStats) {
-      this.zkClient = reader.getZkClient();
-      this.zkStats = zkStats;
-      this.stateUpdateQueue = getStateUpdateQueue(zkClient, zkStats);
-      this.workQueue = getInternalWorkQueue(zkClient, zkStats);
-      this.failureMap = getFailureMap(zkClient);
-      this.runningMap = getRunningMap(zkClient);
-      this.completedMap = getCompletedMap(zkClient);
-      this.myId = myId;
-      this.reader = reader;
-    }
-
-    public Stats getStateUpdateQueueStats() {
-      return stateUpdateQueue.getZkStats();
-    }
-
-    public Stats getWorkQueueStats()  {
-      return workQueue.getZkStats();
-    }
-
-    @Override
-    public void run() {
-      MDCLoggingContext.setNode(zkController.getNodeName() );
-
-      LeaderStatus isLeader = amILeader();
-      while (isLeader == LeaderStatus.DONT_KNOW) {
-        log.debug("am_i_leader unclear {}", isLeader);
-        isLeader = amILeader();  // not a no, not a yes, try ask again
-      }
-
-      log.info("Starting to work on the main queue : {}", LeaderElector.getNodeName(myId));
-      try {
-        ZkStateWriter zkStateWriter = null;
-        ClusterState clusterState = null;
-        boolean refreshClusterState = true; // let's refresh in the first iteration
-        // we write updates in batch, but if an exception is thrown when writing new clusterstate,
-        // we do not sure which message is bad message, therefore we will re-process node one by one
-        int fallbackQueueSize = Integer.MAX_VALUE;
-        ZkDistributedQueue fallbackQueue = workQueue;
-        while (!this.isClosed) {
-          isLeader = amILeader();
-          if (LeaderStatus.NO == isLeader) {
-            break;
-          }
-          else if (LeaderStatus.YES != isLeader) {
-            log.debug("am_i_leader unclear {}", isLeader);
-            continue; // not a no, not a yes, try ask again
-          }
-
-          //TODO consider removing 'refreshClusterState' and simply check if clusterState is null
-          if (refreshClusterState) {
-            try {
-              reader.forciblyRefreshAllClusterStateSlow();
-              clusterState = reader.getClusterState();
-              zkStateWriter = new ZkStateWriter(reader, stats);
-              refreshClusterState = false;
-
-              // if there were any errors while processing
-              // the state queue, items would have been left in the
-              // work queue so let's process those first
-              byte[] data = fallbackQueue.peek();
-              while (fallbackQueueSize > 0 && data != null)  {
-                final ZkNodeProps message = ZkNodeProps.load(data);
-                log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
-                // force flush to ZK after each message because there is no fallback if workQueue items
-                // are removed from workQueue but fail to be written to ZK
-                try {
-                  clusterState = processQueueItem(message, clusterState, zkStateWriter, false, null);
-                } catch (Exception e) {
-                  if (isBadMessage(e)) {
-                    log.warn("Exception when process message = {}, consider as bad message and poll out from the queue", message);
-                    fallbackQueue.poll();
-                  }
-                  throw e;
-                }
-                fallbackQueue.poll(); // poll-ing removes the element we got by peek-ing
-                data = fallbackQueue.peek();
-                fallbackQueueSize--;
-              }
-              // force flush at the end of the loop, if there are no pending updates, this is a no op call
-              clusterState = zkStateWriter.writePendingUpdates();
-              // the workQueue is empty now, use stateUpdateQueue as fallback queue
-              fallbackQueue = stateUpdateQueue;
-              fallbackQueueSize = 0;
-            } catch (KeeperException.SessionExpiredException e) {
-              log.warn("Solr cannot talk to ZK, exiting Overseer work queue loop", e);
-              return;
-            } catch (InterruptedException e) {
-              Thread.currentThread().interrupt();
-              return;
-            } catch (Exception e) {
-              log.error("Exception in Overseer when process message from work queue, retrying", e);
-              refreshClusterState = true;
-              continue;
-            }
-          }
-
-          LinkedList<Pair<String, byte[]>> queue = null;
-          try {
-            // We do not need to filter any nodes here cause all processed nodes are removed once we flush clusterstate
-            queue = new LinkedList<>(stateUpdateQueue.peekElements(1000, 3000L, (x) -> true));
-          } catch (KeeperException.SessionExpiredException e) {
-            log.warn("Solr cannot talk to ZK, exiting Overseer main queue loop", e);
-            return;
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            return;
-          } catch (Exception e) {
-            log.error("Exception in Overseer main queue loop", e);
-          }
-          try {
-            Set<String> processedNodes = new HashSet<>();
-            while (queue != null && !queue.isEmpty()) {
-              for (Pair<String, byte[]> head : queue) {
-                byte[] data = head.second();
-                final ZkNodeProps message = ZkNodeProps.load(data);
-                log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
-
-                processedNodes.add(head.first());
-                fallbackQueueSize = processedNodes.size();
-                // The callback always be called on this thread
-                clusterState = processQueueItem(message, clusterState, zkStateWriter, true, () -> {
-                  stateUpdateQueue.remove(processedNodes);
-                  processedNodes.clear();
-                });
-              }
-              if (isClosed) break;
-              // if an event comes in the next 100ms batch it together
-              queue = new LinkedList<>(stateUpdateQueue.peekElements(1000, 100, node -> !processedNodes.contains(node)));
-            }
-            fallbackQueueSize = processedNodes.size();
-            // we should force write all pending updates because the next iteration might sleep until there
-            // are more items in the main queue
-            clusterState = zkStateWriter.writePendingUpdates();
-            // clean work queue
-            stateUpdateQueue.remove(processedNodes);
-            processedNodes.clear();
-          } catch (KeeperException.SessionExpiredException e) {
-            log.warn("Solr cannot talk to ZK, exiting Overseer main queue loop", e);
-            return;
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            return;
-          } catch (Exception e) {
-            log.error("Exception in Overseer main queue loop", e);
-            refreshClusterState = true; // it might have been a bad version error
-          }
-        }
-      } finally {
-        log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
-        //do this in a separate thread because any wait is interrupted in this main thread
-        new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start();
-      }
-    }
-
-    // Return true whenever the exception thrown by ZkStateWriter is correspond
-    // to a invalid state or 'bad' message (in this case, we should remove that message from queue)
-    private boolean isBadMessage(Exception e) {
-      if (e instanceof KeeperException) {
-        KeeperException ke = (KeeperException) e;
-        return ke.code() == KeeperException.Code.NONODE || ke.code() == KeeperException.Code.NODEEXISTS;
-      }
-      return !(e instanceof InterruptedException);
-    }
-
-    private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter, boolean enableBatching, ZkStateWriter.ZkWriteCallback callback) throws Exception {
-      final String operation = message.getStr(QUEUE_OPERATION);
-      if (operation == null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Message missing " + QUEUE_OPERATION + ":" + message);
-      }
-      List<ZkWriteCommand> zkWriteCommands = null;
-      final Timer.Context timerContext = stats.time(operation);
-      try {
-        zkWriteCommands = processMessage(clusterState, message, operation);
-        stats.success(operation);
-      } catch (Exception e) {
-        // generally there is nothing we can do - in most cases, we have
-        // an issue that will fail again on retry or we cannot communicate with     a
-        // ZooKeeper in which case another Overseer should take over
-        // TODO: if ordering for the message is not important, we could
-        // track retries and put it back on the end of the queue
-        log.error("Overseer could not process the current clusterstate state update message, skipping the message: " + message, e);
-        stats.error(operation);
-      } finally {
-        timerContext.stop();
-      }
-      if (zkWriteCommands != null) {
-        clusterState = zkStateWriter.enqueueUpdate(clusterState, zkWriteCommands, callback);
-        if (!enableBatching)  {
-          clusterState = zkStateWriter.writePendingUpdates();
-        }
-      }
-      return clusterState;
-    }
-
-    private void checkIfIamStillLeader() {
-      if (zkController != null && (zkController.getCoreContainer().isShutDown() || zkController.isClosed())) {
-        return;//shutting down no need to go further
-      }
-      org.apache.zookeeper.data.Stat stat = new org.apache.zookeeper.data.Stat();
-      final String path = OVERSEER_ELECT + "/leader";
-      byte[] data;
-      try {
-        data = zkClient.getData(path, null, stat, true);
-      } catch (Exception e) {
-        log.error("could not read the "+path+" data" ,e);
-        return;
-      }
-      try {
-        Map m = (Map) Utils.fromJSON(data);
-        String id = (String) m.get(ID);
-        if(overseerCollectionConfigSetProcessor.getId().equals(id)){
-          try {
-            log.warn("I (id={}) am exiting, but I'm still the leader",
-                overseerCollectionConfigSetProcessor.getId());
-            zkClient.delete(path,stat.getVersion(),true);
-          } catch (KeeperException.BadVersionException e) {
-            //no problem ignore it some other Overseer has already taken over
-          } catch (Exception e) {
-            log.error("Could not delete my leader node "+path, e);
-          }
-
-        } else{
-          log.info("somebody else (id={}) has already taken up the overseer position", id);
-        }
-      } finally {
-        //if I am not shutting down, Then I need to rejoin election
-        try {
-          if (zkController != null && !zkController.getCoreContainer().isShutDown()) {
-            zkController.rejoinOverseerElection(null, false);
-          }
-        } catch (Exception e) {
-          log.warn("Unable to rejoinElection ",e);
-        }
-      }
-    }
-
-    private List<ZkWriteCommand> processMessage(ClusterState clusterState,
-        final ZkNodeProps message, final String operation) {
-      CollectionParams.CollectionAction collectionAction = CollectionParams.CollectionAction.get(operation);
-      if (collectionAction != null) {
-        switch (collectionAction) {
-          case CREATE:
-            return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).createCollection(clusterState, message));
-          case DELETE:
-            return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).deleteCollection(clusterState, message));
-          case CREATESHARD:
-            return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).createShard(clusterState, message));
-          case DELETESHARD:
-            return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).deleteShard(clusterState, message));
-          case ADDREPLICA:
-            return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addReplica(clusterState, message));
-          case ADDREPLICAPROP:
-            return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).addReplicaProperty(clusterState, message));
-          case DELETEREPLICAPROP:
-            return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).deleteReplicaProperty(clusterState, message));
-          case BALANCESHARDUNIQUE:
-            ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(clusterState, message);
-            if (dProp.balanceProperty()) {
-              String collName = message.getStr(ZkStateReader.COLLECTION_PROP);
-              return Collections.singletonList(new ZkWriteCommand(collName, dProp.getDocCollection()));
-            }
-            break;
-          case MODIFYCOLLECTION:
-            CollectionsHandler.verifyRuleParams(zkController.getCoreContainer() ,message.getProperties());
-            return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).modifyCollection(clusterState,message));
-          case MIGRATESTATEFORMAT:
-            return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).migrateStateFormat(clusterState, message));
-          default:
-            throw new RuntimeException("unknown operation:" + operation
-                + " contents:" + message.getProperties());
-        }
-      } else {
-        OverseerAction overseerAction = OverseerAction.get(operation);
-        if (overseerAction == null) {
-          throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
-        }
-        switch (overseerAction) {
-          case STATE:
-            return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).setState(clusterState, message));
-          case LEADER:
-            return Collections.singletonList(new SliceMutator(getSolrCloudManager()).setShardLeader(clusterState, message));
-          case DELETECORE:
-            return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeReplica(clusterState, message));
-          case ADDROUTINGRULE:
-            return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addRoutingRule(clusterState, message));
-          case REMOVEROUTINGRULE:
-            return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeRoutingRule(clusterState, message));
-          case UPDATESHARDSTATE:
-            return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
-          case QUIT:
-            if (myId.equals(message.get(ID))) {
-              log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
-              overseerCollectionConfigSetProcessor.close();
-              close();
-            } else {
-              log.warn("Overseer received wrong QUIT message {}", message);
-            }
-            break;
-          case DOWNNODE:
-            return new NodeMutator().downNode(clusterState, message);
-          default:
-            throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
-        }
-      }
-
-      return Collections.singletonList(ZkStateWriter.NO_OP);
-    }
-
-    private LeaderStatus amILeader() {
-      Timer.Context timerContext = stats.time("am_i_leader");
-      boolean success = true;
-      String propsId = null;
-      try {
-        ZkNodeProps props = ZkNodeProps.load(zkClient.getData(
-            OVERSEER_ELECT + "/leader", null, null, true));
-        propsId = props.getStr(ID);
-        if (myId.equals(propsId)) {
-          return LeaderStatus.YES;
-        }
-      } catch (KeeperException e) {
-        success = false;
-        if (e.code() == KeeperException.Code.CONNECTIONLOSS) {
-          log.error("", e);
-          return LeaderStatus.DONT_KNOW;
-        } else if (e.code() != KeeperException.Code.SESSIONEXPIRED) {
-          log.warn("", e);
-        } else {
-          log.debug("", e);
-        }
-      } catch (InterruptedException e) {
-        success = false;
-        Thread.currentThread().interrupt();
-      } finally {
-        timerContext.stop();
-        if (success)  {
-          stats.success("am_i_leader");
-        } else  {
-          stats.error("am_i_leader");
-        }
-      }
-      log.info("According to ZK I (id={}) am no longer a leader. propsId={}", myId, propsId);
-      return LeaderStatus.NO;
-    }
-
-    @Override
-      public void close() {
-        this.isClosed = true;
-      }
-
-  }
-
-  public static class OverseerThread extends Thread implements Closeable {
-
-    protected volatile boolean isClosed;
-    private Closeable thread;
-
-    public OverseerThread(ThreadGroup tg, Closeable thread) {
-      super(tg, (Runnable) thread);
-      this.thread = thread;
-    }
-
-    public OverseerThread(ThreadGroup ccTg, Closeable thread, String name) {
-      super(ccTg, (Runnable) thread, name);
-      this.thread = thread;
-    }
-
-    @Override
-    public void close() throws IOException {
-      thread.close();
-      this.isClosed = true;
-    }
-
-    public Closeable getThread() {
-      return thread;
-    }
-
-    public boolean isClosed() {
-      return this.isClosed;
-    }
-
-  }
-
-  private OverseerThread ccThread;
-
-  private OverseerThread updaterThread;
-
-  private OverseerThread triggerThread;
-
-  private final ZkStateReader reader;
-
-  private final ShardHandler shardHandler;
-
-  private final UpdateShardHandler updateShardHandler;
-
-  private final String adminPath;
-
-  private OverseerCollectionConfigSetProcessor overseerCollectionConfigSetProcessor;
-
-  private ZkController zkController;
-
-  private Stats stats;
-  private String id;
-  private boolean closed;
-  private CloudConfig config;
-
-  // overseer not responsible for closing reader
-  public Overseer(ShardHandler shardHandler,
-      UpdateShardHandler updateShardHandler, String adminPath,
-      final ZkStateReader reader, ZkController zkController, CloudConfig config)
-      throws KeeperException, InterruptedException {
-    this.reader = reader;
-    this.shardHandler = shardHandler;
-    this.updateShardHandler = updateShardHandler;
-    this.adminPath = adminPath;
-    this.zkController = zkController;
-    this.stats = new Stats();
-    this.config = config;
-  }
-
-  public synchronized void start(String id) {
-    MDCLoggingContext.setNode(zkController == null ?
-        null :
-        zkController.getNodeName());
-    this.id = id;
-    closed = false;
-    doClose();
-    stats = new Stats();
-    log.info("Overseer (id=" + id + ") starting");
-    createOverseerNode(reader.getZkClient());
-    //launch cluster state updater thread
-    ThreadGroup tg = new ThreadGroup("Overseer state updater.");
-    updaterThread = new OverseerThread(tg, new ClusterStateUpdater(reader, id, stats), "OverseerStateUpdate-" + id);
-    updaterThread.setDaemon(true);
-
-    ThreadGroup ccTg = new ThreadGroup("Overseer collection creation process.");
-
-    OverseerNodePrioritizer overseerPrioritizer = new OverseerNodePrioritizer(reader, adminPath, shardHandler.getShardHandlerFactory());
-    overseerCollectionConfigSetProcessor = new OverseerCollectionConfigSetProcessor(reader, id, shardHandler, adminPath, stats, Overseer.this, overseerPrioritizer);
-    ccThread = new OverseerThread(ccTg, overseerCollectionConfigSetProcessor, "OverseerCollectionConfigSetProcessor-" + id);
-    ccThread.setDaemon(true);
-
-    ThreadGroup triggerThreadGroup = new ThreadGroup("Overseer autoscaling triggers");
-    OverseerTriggerThread trigger = new OverseerTriggerThread(zkController.getCoreContainer().getResourceLoader(),
-        zkController.getSolrCloudManager(), config);
-    triggerThread = new OverseerThread(triggerThreadGroup, trigger, "OverseerAutoScalingTriggerThread-" + id);
-
-    updaterThread.start();
-    ccThread.start();
-    triggerThread.start();
-    if (this.id != null) {
-      assert ObjectReleaseTracker.track(this);
-    }
-  }
-
-  public Stats getStats() {
-    return stats;
-  }
-
-  ZkController getZkController(){
-    return zkController;
-  }
-
-  public CoreContainer getCoreContainer() {
-    return zkController.getCoreContainer();
-  }
-
-  public SolrCloudManager getSolrCloudManager() {
-    return zkController.getSolrCloudManager();
-  }
-
-  /**
-   * For tests.
-   * 
-   * @lucene.internal
-   * @return state updater thread
-   */
-  public synchronized OverseerThread getUpdaterThread() {
-    return updaterThread;
-  }
-
-  /**
-   * For tests.
-   * @lucene.internal
-   * @return trigger thread
-   */
-  public synchronized OverseerThread getTriggerThread() {
-    return triggerThread;
-  }
-  
-  public synchronized void close() {
-    if (closed) return;
-    if (this.id != null) {
-      log.info("Overseer (id=" + id + ") closing");
-    }
-    
-    doClose();
-    this.closed = true;
-    if (this.id != null) {
-      assert ObjectReleaseTracker.release(this);
-    }
-  }
-
-  @Override
-  public boolean isClosed() {
-    return closed;
-  }
-
-  private void doClose() {
-    
-    if (updaterThread != null) {
-      IOUtils.closeQuietly(updaterThread);
-      updaterThread.interrupt();
-    }
-    if (ccThread != null) {
-      IOUtils.closeQuietly(ccThread);
-      ccThread.interrupt();
-    }
-    if (triggerThread != null)  {
-      IOUtils.closeQuietly(triggerThread);
-      triggerThread.interrupt();
-    }
-    if (updaterThread != null) {
-      try {
-        updaterThread.join();
-      } catch (InterruptedException e) {}
-    }
-    if (ccThread != null) {
-      try {
-        ccThread.join();
-      } catch (InterruptedException e) {}
-    }
-    if (triggerThread != null)  {
-      try {
-        triggerThread.join();
-      } catch (InterruptedException e)  {}
-    }
-    updaterThread = null;
-    ccThread = null;
-    triggerThread = null;
-  }
-
-  /**
-   * Get queue that can be used to send messages to Overseer.
-   * <p>
-   * Any and all modifications to the cluster state must be sent to
-   * the overseer via this queue. The complete list of overseer actions
-   * supported by this queue are documented inside the {@link OverseerAction} enum.
-   * <p>
-   * Performance statistics on the returned queue
-   * are <em>not</em> tracked by the Overseer Stats API,
-   * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
-   * Therefore, this method should be used only by clients for writing to the overseer queue.
-   * <p>
-   * This method will create the /overseer znode in ZooKeeper if it does not exist already.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  public static ZkDistributedQueue getStateUpdateQueue(final SolrZkClient zkClient) {
-    return getStateUpdateQueue(zkClient, new Stats());
-  }
-
-  /**
-   * The overseer uses the returned queue to read any operations submitted by clients.
-   * This method should not be used directly by anyone other than the Overseer itself.
-   * This method will create the /overseer znode in ZooKeeper if it does not exist already.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @param zkStats  a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  static ZkDistributedQueue getStateUpdateQueue(final SolrZkClient zkClient, Stats zkStats) {
-    createOverseerNode(zkClient);
-    return new ZkDistributedQueue(zkClient, "/overseer/queue", zkStats, STATE_UPDATE_MAX_QUEUE);
-  }
-
-  /**
-   * Internal overseer work queue. This should not be used outside of Overseer.
-   * <p>
-   * This queue is used to store overseer operations that have been removed from the
-   * state update queue but are being executed as part of a batch. Once
-   * the result of the batch is persisted to zookeeper, these items are removed from the
-   * work queue. If the overseer dies while processing a batch then a new overseer always
-   * operates from the work queue first and only then starts processing operations from the
-   * state update queue.
-   * This method will create the /overseer znode in ZooKeeper if it does not exist already.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @param zkStats  a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  static ZkDistributedQueue getInternalWorkQueue(final SolrZkClient zkClient, Stats zkStats) {
-    createOverseerNode(zkClient);
-    return new ZkDistributedQueue(zkClient, "/overseer/queue-work", zkStats);
-  }
-
-  /* Internal map for failed tasks, not to be used outside of the Overseer */
-  static DistributedMap getRunningMap(final SolrZkClient zkClient) {
-    createOverseerNode(zkClient);
-    return new DistributedMap(zkClient, "/overseer/collection-map-running");
-  }
-
-  /* Size-limited map for successfully completed tasks*/
-  static DistributedMap getCompletedMap(final SolrZkClient zkClient) {
-    createOverseerNode(zkClient);
-    return new SizeLimitedDistributedMap(zkClient, "/overseer/collection-map-completed", NUM_RESPONSES_TO_STORE, (child) -> getAsyncIdsMap(zkClient).remove(child));
-  }
-
-  /* Map for failed tasks, not to be used outside of the Overseer */
-  static DistributedMap getFailureMap(final SolrZkClient zkClient) {
-    createOverseerNode(zkClient);
-    return new SizeLimitedDistributedMap(zkClient, "/overseer/collection-map-failure", NUM_RESPONSES_TO_STORE, (child) -> getAsyncIdsMap(zkClient).remove(child));
-  }
-  
-  /* Map of async IDs currently in use*/
-  static DistributedMap getAsyncIdsMap(final SolrZkClient zkClient) {
-    createOverseerNode(zkClient);
-    return new DistributedMap(zkClient, "/overseer/async_ids");
-  }
-
-  /**
-   * Get queue that can be used to submit collection API tasks to the Overseer.
-   * <p>
-   * This queue is used internally by the {@link CollectionsHandler} to submit collection API
-   * tasks which are executed by the {@link OverseerCollectionMessageHandler}. The actions supported
-   * by this queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
-   * enum.
-   * <p>
-   * Performance statistics on the returned queue
-   * are <em>not</em> tracked by the Overseer Stats API,
-   * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  static OverseerTaskQueue getCollectionQueue(final SolrZkClient zkClient) {
-    return getCollectionQueue(zkClient, new Stats());
-  }
-
-  /**
-   * Get queue that can be used to read collection API tasks to the Overseer.
-   * <p>
-   * This queue is used internally by the {@link OverseerCollectionMessageHandler} to read collection API
-   * tasks submitted by the {@link CollectionsHandler}. The actions supported
-   * by this queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
-   * enum.
-   * <p>
-   * Performance statistics on the returned queue are tracked by the Overseer Stats API,
-   * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  static OverseerTaskQueue getCollectionQueue(final SolrZkClient zkClient, Stats zkStats) {
-    createOverseerNode(zkClient);
-    return new OverseerTaskQueue(zkClient, "/overseer/collection-queue-work", zkStats);
-  }
-
-  /**
-   * Get queue that can be used to submit configset API tasks to the Overseer.
-   * <p>
-   * This queue is used internally by the {@link org.apache.solr.handler.admin.ConfigSetsHandler} to submit
-   * tasks which are executed by the {@link OverseerConfigSetMessageHandler}. The actions supported
-   * by this queue are listed in the {@link org.apache.solr.common.params.ConfigSetParams.ConfigSetAction}
-   * enum.
-   * <p>
-   * Performance statistics on the returned queue
-   * are <em>not</em> tracked by the Overseer Stats API,
-   * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  static OverseerTaskQueue getConfigSetQueue(final SolrZkClient zkClient)  {
-    return getConfigSetQueue(zkClient, new Stats());
-  }
-
-  /**
-   * Get queue that can be used to read configset API tasks to the Overseer.
-   * <p>
-   * This queue is used internally by the {@link OverseerConfigSetMessageHandler} to read configset API
-   * tasks submitted by the {@link org.apache.solr.handler.admin.ConfigSetsHandler}. The actions supported
-   * by this queue are listed in the {@link org.apache.solr.common.params.ConfigSetParams.ConfigSetAction}
-   * enum.
-   * <p>
-   * Performance statistics on the returned queue are tracked by the Overseer Stats API,
-   * see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
-   * <p>
-   * For now, this internally returns the same queue as {@link #getCollectionQueue(SolrZkClient, Stats)}.
-   * It is the responsibility of the client to ensure that configset API actions are prefixed with
-   * {@link OverseerConfigSetMessageHandler#CONFIGSETS_ACTION_PREFIX} so that it is processed by
-   * {@link OverseerConfigSetMessageHandler}.
-   *
-   * @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
-   * @return a {@link ZkDistributedQueue} object
-   */
-  static OverseerTaskQueue getConfigSetQueue(final SolrZkClient zkClient, Stats zkStats) {
-    // For now, we use the same queue as the collection queue, but ensure
-    // that the actions are prefixed with a unique string.
-    createOverseerNode(zkClient);
-    return getCollectionQueue(zkClient, zkStats);
-  }
-  
-
-  private static void createOverseerNode(final SolrZkClient zkClient) {
-    try {
-      zkClient.create("/overseer", new byte[0], CreateMode.PERSISTENT, true);
-    } catch (KeeperException.NodeExistsException e) {
-      //ok
-    } catch (InterruptedException e) {
-      log.error("Could not create Overseer node", e);
-      Thread.currentThread().interrupt();
-      throw new RuntimeException(e);
-    } catch (KeeperException e) {
-      log.error("Could not create Overseer node", e);
-      throw new RuntimeException(e);
-    }
-  }
-  public static boolean isLegacy(ZkStateReader stateReader) {
-    String legacyProperty = stateReader.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
-    return "true".equals(legacyProperty);
-  }
-
-  public static boolean isLegacy(ClusterStateProvider clusterStateProvider) {
-    String legacyProperty = clusterStateProvider.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
-    return "true".equals(legacyProperty);
-  }
-
-  public ZkStateReader getZkStateReader() {
-    return reader;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
deleted file mode 100644
index e8d85ce..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-
-import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.CONFIGSETS_ACTION_PREFIX;
-
-/**
- * An {@link OverseerTaskProcessor} that handles:
- * 1) collection-related Overseer messages
- * 2) configset-related Overseer messages
- */
-public class OverseerCollectionConfigSetProcessor extends OverseerTaskProcessor {
-
-   public OverseerCollectionConfigSetProcessor(ZkStateReader zkStateReader, String myId,
-                                               final ShardHandler shardHandler,
-                                               String adminPath, Stats stats, Overseer overseer,
-                                               OverseerNodePrioritizer overseerNodePrioritizer) {
-    this(
-        zkStateReader,
-        myId,
-        shardHandler.getShardHandlerFactory(),
-        adminPath,
-        stats,
-        overseer,
-        overseerNodePrioritizer,
-        Overseer.getCollectionQueue(zkStateReader.getZkClient(), stats),
-        Overseer.getRunningMap(zkStateReader.getZkClient()),
-        Overseer.getCompletedMap(zkStateReader.getZkClient()),
-        Overseer.getFailureMap(zkStateReader.getZkClient())
-    );
-  }
-
-  protected OverseerCollectionConfigSetProcessor(ZkStateReader zkStateReader, String myId,
-                                        final ShardHandlerFactory shardHandlerFactory,
-                                        String adminPath,
-                                        Stats stats,
-                                        Overseer overseer,
-                                        OverseerNodePrioritizer overseerNodePrioritizer,
-                                        OverseerTaskQueue workQueue,
-                                        DistributedMap runningMap,
-                                        DistributedMap completedMap,
-                                        DistributedMap failureMap) {
-    super(
-        zkStateReader,
-        myId,
-        stats,
-        getOverseerMessageHandlerSelector(zkStateReader, myId, shardHandlerFactory,
-            adminPath, stats, overseer, overseerNodePrioritizer),
-        overseerNodePrioritizer,
-        workQueue,
-        runningMap,
-        completedMap,
-        failureMap);
-  }
-
-  private static OverseerMessageHandlerSelector getOverseerMessageHandlerSelector(
-      ZkStateReader zkStateReader,
-      String myId,
-      final ShardHandlerFactory shardHandlerFactory,
-      String adminPath,
-      Stats stats,
-      Overseer overseer,
-      OverseerNodePrioritizer overseerNodePrioritizer) {
-    final OverseerCollectionMessageHandler collMessageHandler = new OverseerCollectionMessageHandler(
-        zkStateReader, myId, shardHandlerFactory, adminPath, stats, overseer, overseerNodePrioritizer);
-    final OverseerConfigSetMessageHandler configMessageHandler = new OverseerConfigSetMessageHandler(
-        zkStateReader);
-    return new OverseerMessageHandlerSelector() {
-      @Override
-      public void close() throws IOException {
-        IOUtils.closeQuietly(collMessageHandler);
-      }
-
-      @Override
-      public OverseerMessageHandler selectOverseerMessageHandler(ZkNodeProps message) {
-        String operation = message.getStr(Overseer.QUEUE_OPERATION);
-        if (operation != null && operation.startsWith(CONFIGSETS_ACTION_PREFIX)) {
-          return configMessageHandler;
-        }
-        return collMessageHandler;
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java
deleted file mode 100644
index 6812971..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerConfigSetMessageHandler.java
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.ConfigSetParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.ConfigSetProperties;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.noggit.JSONUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.ConfigSetParams.ConfigSetAction.CREATE;
-import static org.apache.solr.handler.admin.ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME;
-
-/**
- * A {@link OverseerMessageHandler} that handles ConfigSets API related
- * overseer messages.
- */
-public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
-
-  /**
-   * Prefix to specify an action should be handled by this handler.
-   */
-  public static final String CONFIGSETS_ACTION_PREFIX = "configsets:";
-
-  /**
-   * Name of the ConfigSet to copy from for CREATE
-   */
-  public static final String BASE_CONFIGSET = "baseConfigSet";
-
-  /**
-   * Prefix for properties that should be applied to the ConfigSet for CREATE
-   */
-  public static final String PROPERTY_PREFIX = "configSetProp";
-
-  private ZkStateReader zkStateReader;
-
-  // we essentially implement a read/write lock for the ConfigSet exclusivity as follows:
-  // WRITE: CREATE/DELETE on the ConfigSet under operation
-  // READ: for the Base ConfigSet being copied in CREATE.
-  // in this way, we prevent a Base ConfigSet from being deleted while it is being copied
-  // but don't prevent different ConfigSets from being created with the same Base ConfigSet
-  // at the same time.
-  final private Set configSetWriteWip;
-  final private Set configSetReadWip;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public OverseerConfigSetMessageHandler(ZkStateReader zkStateReader) {
-    this.zkStateReader = zkStateReader;
-    this.configSetWriteWip = new HashSet();
-    this.configSetReadWip = new HashSet();
-  }
-
-  @Override
-  public SolrResponse processMessage(ZkNodeProps message, String operation) {
-    NamedList results = new NamedList();
-    try {
-      if (!operation.startsWith(CONFIGSETS_ACTION_PREFIX)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Operation does not contain proper prefix: " + operation
-                + " expected: " + CONFIGSETS_ACTION_PREFIX);
-      }
-      operation = operation.substring(CONFIGSETS_ACTION_PREFIX.length());
-      log.info("OverseerConfigSetMessageHandler.processMessage : " + operation + " , " + message.toString());
-
-      ConfigSetParams.ConfigSetAction action = ConfigSetParams.ConfigSetAction.get(operation);
-      if (action == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
-      }
-      switch (action) {
-        case CREATE:
-          createConfigSet(message);
-          break;
-        case DELETE:
-          deleteConfigSet(message);
-          break;
-        default:
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
-              + operation);
-      }
-    } catch (Exception e) {
-      String configSetName = message.getStr(NAME);
-
-      if (configSetName == null) {
-        SolrException.log(log, "Operation " + operation + " failed", e);
-      } else {
-        SolrException.log(log, "ConfigSet: " + configSetName + " operation: " + operation
-            + " failed", e);
-      }
-
-      results.add("Operation " + operation + " caused exception:", e);
-      SimpleOrderedMap nl = new SimpleOrderedMap();
-      nl.add("msg", e.getMessage());
-      nl.add("rspCode", e instanceof SolrException ? ((SolrException) e).code() : -1);
-      results.add("exception", nl);
-    }
-    return new OverseerSolrResponse(results);
-  }
-
-  @Override
-  public String getName() {
-    return "Overseer ConfigSet Message Handler";
-  }
-
-  @Override
-  public String getTimerName(String operation) {
-    return "configset_" + operation;
-  }
-
-  @Override
-  public Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch) {
-    String configSetName = getTaskKey(message);
-    if (canExecute(configSetName, message)) {
-      markExclusiveTask(configSetName, message);
-      return () -> unmarkExclusiveTask(configSetName, message);
-    }
-    return null;
-  }
-
-  @Override
-  public String getTaskKey(ZkNodeProps message) {
-    return message.getStr(NAME);
-  }
-
-
-  private void markExclusiveTask(String configSetName, ZkNodeProps message) {
-    String baseConfigSet = getBaseConfigSetIfCreate(message);
-    markExclusive(configSetName, baseConfigSet);
-  }
-
-  private void markExclusive(String configSetName, String baseConfigSetName) {
-    synchronized (configSetWriteWip) {
-      configSetWriteWip.add(configSetName);
-      if (baseConfigSetName != null) configSetReadWip.add(baseConfigSetName);
-    }
-  }
-
-  private void unmarkExclusiveTask(String configSetName, ZkNodeProps message) {
-    String baseConfigSet = getBaseConfigSetIfCreate(message);
-    unmarkExclusiveConfigSet(configSetName, baseConfigSet);
-  }
-
-  private void unmarkExclusiveConfigSet(String configSetName, String baseConfigSetName) {
-    synchronized (configSetWriteWip) {
-      configSetWriteWip.remove(configSetName);
-      if (baseConfigSetName != null) configSetReadWip.remove(baseConfigSetName);
-    }
-  }
-
-
-  private boolean canExecute(String configSetName, ZkNodeProps message) {
-    String baseConfigSetName = getBaseConfigSetIfCreate(message);
-
-    synchronized (configSetWriteWip) {
-      // need to acquire:
-      // 1) write lock on ConfigSet
-      // 2) read lock on Base ConfigSet
-      if (configSetWriteWip.contains(configSetName) || configSetReadWip.contains(configSetName)) {
-        return false;
-      }
-      if (baseConfigSetName != null && configSetWriteWip.contains(baseConfigSetName)) {
-        return false;
-      }
-    }
-
-    return true;
-  }
-
-
-  private String getBaseConfigSetIfCreate(ZkNodeProps message) {
-    String operation = message.getStr(Overseer.QUEUE_OPERATION);
-    if (operation != null) {
-      operation = operation.substring(CONFIGSETS_ACTION_PREFIX.length());
-      ConfigSetParams.ConfigSetAction action = ConfigSetParams.ConfigSetAction.get(operation);
-      if (action == CREATE) {
-        String baseConfigSetName = message.getStr(BASE_CONFIGSET);
-        if (baseConfigSetName == null || baseConfigSetName.length() == 0) {
-          baseConfigSetName = DEFAULT_CONFIGSET_NAME;
-        }
-        return baseConfigSetName;
-      }
-    }
-    return null;
-  }
-
-  private NamedList getConfigSetProperties(String path) throws IOException {
-    byte[] oldPropsData = null;
-    try {
-      oldPropsData = zkStateReader.getZkClient().getData(path, null, null, true);
-    } catch (KeeperException.NoNodeException e) {
-      log.info("no existing ConfigSet properties found");
-    } catch (KeeperException | InterruptedException e) {
-      throw new IOException("Error reading old properties",
-          SolrZkClient.checkInterrupted(e));
-    }
-
-    if (oldPropsData != null) {
-      InputStreamReader reader = new InputStreamReader(new ByteArrayInputStream(oldPropsData), StandardCharsets.UTF_8);
-      try {
-        return ConfigSetProperties.readFromInputStream(reader);
-      } finally {
-        reader.close();
-      }
-    }
-    return null;
-  }
-
-  private Map<String, Object> getNewProperties(ZkNodeProps message) {
-    Map<String, Object> properties = null;
-    for (Map.Entry<String, Object> entry : message.getProperties().entrySet()) {
-      if (entry.getKey().startsWith(PROPERTY_PREFIX + ".")) {
-        if (properties == null) {
-          properties = new HashMap<String, Object>();
-        }
-        properties.put(entry.getKey().substring((PROPERTY_PREFIX + ".").length()),
-            entry.getValue());
-      }
-    }
-    return properties;
-  }
-
-  private void mergeOldProperties(Map<String, Object> newProps, NamedList oldProps) {
-    Iterator<Map.Entry<String, Object>> it = oldProps.iterator();
-    while (it.hasNext()) {
-      Map.Entry<String, Object> oldEntry = it.next();
-      if (!newProps.containsKey(oldEntry.getKey())) {
-        newProps.put(oldEntry.getKey(), oldEntry.getValue());
-      }
-    }
-  }
-
-  private byte[] getPropertyData(Map<String, Object> newProps) {
-    if (newProps != null) {
-      String propertyDataStr = JSONUtil.toJSON(newProps);
-      if (propertyDataStr == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid property specification");
-      }
-      return propertyDataStr.getBytes(StandardCharsets.UTF_8);
-    }
-    return null;
-  }
-
-  private String getPropertyPath(String configName, String propertyPath) {
-    return ZkConfigManager.CONFIGS_ZKNODE + "/" + configName + "/" + propertyPath;
-  }
-
-  private void createConfigSet(ZkNodeProps message) throws IOException {
-    String configSetName = getTaskKey(message);
-    if (configSetName == null || configSetName.length() == 0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "ConfigSet name not specified");
-    }
-
-    String baseConfigSetName = message.getStr(BASE_CONFIGSET, DEFAULT_CONFIGSET_NAME);
-
-    ZkConfigManager configManager = new ZkConfigManager(zkStateReader.getZkClient());
-    if (configManager.configExists(configSetName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "ConfigSet already exists: " + configSetName);
-    }
-
-    // is there a base config that already exists
-    if (!configManager.configExists(baseConfigSetName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "Base ConfigSet does not exist: " + baseConfigSetName);
-    }
-
-    String propertyPath = ConfigSetProperties.DEFAULT_FILENAME;
-    Map<String, Object> props = getNewProperties(message);
-    if (props != null) {
-      // read the old config properties and do a merge, if necessary
-      NamedList oldProps = getConfigSetProperties(getPropertyPath(baseConfigSetName, propertyPath));
-      if (oldProps != null) {
-        mergeOldProperties(props, oldProps);
-      }
-    }
-    byte[] propertyData = getPropertyData(props);
-
-    Set<String> copiedToZkPaths = new HashSet<String>();
-    try {
-      configManager.copyConfigDir(baseConfigSetName, configSetName, copiedToZkPaths);
-      if (propertyData != null) {
-        try {
-          zkStateReader.getZkClient().makePath(
-              getPropertyPath(configSetName, propertyPath),
-              propertyData, CreateMode.PERSISTENT, null, false, true);
-        } catch (KeeperException | InterruptedException e) {
-          throw new IOException("Error writing new properties",
-              SolrZkClient.checkInterrupted(e));
-        }
-      }
-    } catch (Exception e) {
-      // copying the config dir or writing the properties file may have failed.
-      // we should delete the ConfigSet because it may be invalid,
-      // assuming we actually wrote something.  E.g. could be
-      // the entire baseConfig set with the old properties, including immutable,
-      // that would make it impossible for the user to delete.
-      try {
-        if (configManager.configExists(configSetName) && copiedToZkPaths.size() > 0) {
-          deleteConfigSet(configSetName, true);
-        }
-      } catch (IOException ioe) {
-        log.error("Error while trying to delete partially created ConfigSet", ioe);
-      }
-      throw e;
-    }
-  }
-
-  private void deleteConfigSet(ZkNodeProps message) throws IOException {
-    String configSetName = getTaskKey(message);
-    if (configSetName == null || configSetName.length() == 0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "ConfigSet name not specified");
-    }
-
-    deleteConfigSet(configSetName, false);
-  }
-
-  private void deleteConfigSet(String configSetName, boolean force) throws IOException {
-    ZkConfigManager configManager = new ZkConfigManager(zkStateReader.getZkClient());
-    if (!configManager.configExists(configSetName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "ConfigSet does not exist to delete: " + configSetName);
-    }
-
-    for (Map.Entry<String, DocCollection> entry : zkStateReader.getClusterState().getCollectionsMap().entrySet()) {
-      if (configSetName.equals(zkStateReader.readConfigName(entry.getKey())))
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Can not delete ConfigSet as it is currently being used by collection [" + entry.getKey() + "]");
-    }
-
-    String propertyPath = ConfigSetProperties.DEFAULT_FILENAME;
-    NamedList properties = getConfigSetProperties(getPropertyPath(configSetName, propertyPath));
-    if (properties != null) {
-      Object immutable = properties.get(ConfigSetProperties.IMMUTABLE_CONFIGSET_ARG);
-      boolean isImmutableConfigSet = immutable != null ? Boolean.parseBoolean(immutable.toString()) : false;
-      if (!force && isImmutableConfigSet) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Requested delete of immutable ConfigSet: " + configSetName);
-      }
-    }
-    configManager.deleteConfigDir(configSetName);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java
deleted file mode 100644
index c4027cc..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerMessageHandler.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.common.cloud.ZkNodeProps;
-
-/**
- * Interface for processing messages received by an {@link OverseerTaskProcessor}
- */
-public interface OverseerMessageHandler {
-
-  /**
-   * @param message the message to process
-   * @param operation the operation to process
-   *
-   * @return response
-   */
-  SolrResponse processMessage(ZkNodeProps message, String operation);
-
-  /**
-   * @return the name of the OverseerMessageHandler
-   */
-  String getName();
-
-  /**
-   * @param operation the operation to be timed
-   *
-   * @return the name of the timer to use for the operation
-   */
-  String getTimerName(String operation);
-
-  interface Lock {
-    void unlock();
-  }
-
-  /**Try to provide an exclusive lock for this particular task
-   * return null if locking is not possible. If locking is not necessary
-   */
-  Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch);
-
-  /**
-   * @param message the message being processed
-   *
-   * @return the taskKey for the message for handling task exclusivity
-   */
-  String getTaskKey(ZkNodeProps message);
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java b/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java
deleted file mode 100644
index 34ee041..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerNodePrioritizer.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.ID;
-
-/**
- * Responsible for prioritization of Overseer nodes, for example with the
- * ADDROLE collection command.
- */
-public class OverseerNodePrioritizer {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final ZkStateReader zkStateReader;
-  private final String adminPath;
-  private final ShardHandlerFactory shardHandlerFactory;
-
-  public OverseerNodePrioritizer(ZkStateReader zkStateReader, String adminPath, ShardHandlerFactory shardHandlerFactory) {
-    this.zkStateReader = zkStateReader;
-    this.adminPath = adminPath;
-    this.shardHandlerFactory = shardHandlerFactory;
-  }
-
-  public synchronized void prioritizeOverseerNodes(String overseerId) throws Exception {
-    SolrZkClient zk = zkStateReader.getZkClient();
-    if(!zk.exists(ZkStateReader.ROLES,true))return;
-    Map m = (Map) Utils.fromJSON(zk.getData(ZkStateReader.ROLES, null, new Stat(), true));
-
-    List overseerDesignates = (List) m.get("overseer");
-    if(overseerDesignates==null || overseerDesignates.isEmpty()) return;
-    String ldr = OverseerTaskProcessor.getLeaderNode(zk);
-    if(overseerDesignates.contains(ldr)) return;
-    log.info("prioritizing overseer nodes at {} overseer designates are {}", overseerId, overseerDesignates);
-    List<String> electionNodes = OverseerTaskProcessor.getSortedElectionNodes(zk, Overseer.OVERSEER_ELECT + LeaderElector.ELECTION_NODE);
-    if(electionNodes.size()<2) return;
-    log.info("sorted nodes {}", electionNodes);
-
-    String designateNodeId = null;
-    for (String electionNode : electionNodes) {
-      if(overseerDesignates.contains( LeaderElector.getNodeName(electionNode))){
-        designateNodeId = electionNode;
-        break;
-      }
-    }
-
-    if(designateNodeId == null){
-      log.warn("No live overseer designate ");
-      return;
-    }
-    if(!designateNodeId.equals( electionNodes.get(1))) { //checking if it is already at no:1
-      log.info("asking node {} to come join election at head", designateNodeId);
-      invokeOverseerOp(designateNodeId, "rejoinAtHead"); //ask designate to come first
-      log.info("asking the old first in line {} to rejoin election  ",electionNodes.get(1) );
-      invokeOverseerOp(electionNodes.get(1), "rejoin");//ask second inline to go behind
-    }
-    //now ask the current leader to QUIT , so that the designate can takeover
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(
-        Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(),
-            ID, OverseerTaskProcessor.getLeaderId(zkStateReader.getZkClient()))));
-
-  }
-
-  private void invokeOverseerOp(String electionNode, String op) {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.OVERSEEROP.toString());
-    params.set("op", op);
-    params.set("qt", adminPath);
-    params.set("electionNode", electionNode);
-    ShardRequest sreq = new ShardRequest();
-    sreq.purpose = 1;
-    String replica = zkStateReader.getBaseUrlForNodeName(LeaderElector.getNodeName(electionNode));
-    sreq.shards = new String[]{replica};
-    sreq.actualShards = sreq.shards;
-    sreq.params = params;
-    shardHandler.submit(sreq, replica, sreq.params);
-    shardHandler.takeCompletedOrError();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/OverseerSolrResponse.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerSolrResponse.java b/solr/core/src/java/org/apache/solr/cloud/OverseerSolrResponse.java
deleted file mode 100644
index 92f6443..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerSolrResponse.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.common.util.NamedList;
-
-public class OverseerSolrResponse extends SolrResponse {
-  
-  NamedList responseList = null;
-
-  private long elapsedTime;
-  
-  public OverseerSolrResponse(NamedList list) {
-    responseList = list;
-  }
-  
-  @Override
-  public long getElapsedTime() {
-    return elapsedTime;
-  }
-  
-  @Override
-  public void setResponse(NamedList<Object> rsp) {
-    this.responseList = rsp;
-  }
-
-  @Override
-  public void setElapsedTime(long elapsedTime) {
-    this.elapsedTime = elapsedTime;
-  }
-
-  @Override
-  public NamedList<Object> getResponse() {
-    return responseList;
-  }
-  
-}


[28/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/QuerySenderListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/QuerySenderListener.java b/solr/core/src/java/org/apache/solr/core/QuerySenderListener.java
deleted file mode 100644
index 9037ef3..0000000
--- a/solr/core/src/java/org/apache/solr/core/QuerySenderListener.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.lang.invoke.MethodHandles;
-import java.util.List;
-
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestInfo;
-import org.apache.solr.response.ResultContext;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-
-/**
- *
- */
-public class QuerySenderListener extends AbstractSolrEventListener {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  public QuerySenderListener(SolrCore core) {
-    super(core);
-  }
-
-  @Override
-  public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
-    final SolrIndexSearcher searcher = newSearcher;
-    log.info("QuerySenderListener sending requests to " + newSearcher);
-    List<NamedList> allLists = (List<NamedList>)getArgs().get("queries");
-    if (allLists == null) return;
-    boolean createNewReqInfo = SolrRequestInfo.getRequestInfo() == null;
-    for (NamedList nlst : allLists) {
-      SolrQueryRequest req = null;
-      try {
-        // bind the request to a particular searcher (the newSearcher)
-        NamedList params = addEventParms(currentSearcher, nlst);
-        // for this, we default to distrib = false
-        if (params.get(DISTRIB) == null) {
-          params.add(DISTRIB, false);
-        }
-        req = new LocalSolrQueryRequest(getCore(),params) {
-          @Override public SolrIndexSearcher getSearcher() { return searcher; }
-          @Override public void close() { }
-        };
-
-        SolrQueryResponse rsp = new SolrQueryResponse();
-        if (createNewReqInfo) {
-          // SolrRequerstInfo for this thread could have been transferred from the parent
-          // thread.
-          SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
-        }
-        getCore().execute(getCore().getRequestHandler(req.getParams().get(CommonParams.QT)), req, rsp);
-
-        // Retrieve the Document instances (not just the ids) to warm
-        // the OS disk cache, and any Solr document cache.  Only the top
-        // level values in the NamedList are checked for DocLists.
-        NamedList values = rsp.getValues();
-        for (int i=0; i<values.size(); i++) {
-          Object o = values.getVal(i);
-          if (o instanceof ResultContext) {
-            o = ((ResultContext)o).getDocList();
-          }
-          if (o instanceof DocList) {
-            DocList docs = (DocList)o;
-            for (DocIterator iter = docs.iterator(); iter.hasNext();) {
-              newSearcher.doc(iter.nextDoc());
-            }
-          }
-        }
-
-      } catch (Exception e) {
-        // do nothing... we want to continue with the other requests.
-        // the failure should have already been logged.
-      } finally {
-        if (req != null) req.close();
-        if (createNewReqInfo) SolrRequestInfo.clearRequestInfo();
-      }
-    }
-    log.info("QuerySenderListener done.");
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/RAMDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/RAMDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/RAMDirectoryFactory.java
deleted file mode 100644
index 25d0a1c..0000000
--- a/solr/core/src/java/org/apache/solr/core/RAMDirectoryFactory.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.store.SingleInstanceLockFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-
-/**
- * Factory to instantiate {@link org.apache.lucene.store.RAMDirectory}
- */
-public class RAMDirectoryFactory extends EphemeralDirectoryFactory {
-
-  @Override
-  protected LockFactory createLockFactory(String rawLockType) throws IOException {
-    if (!(rawLockType == null || DirectoryFactory.LOCK_TYPE_SINGLE.equalsIgnoreCase(rawLockType.trim()))) {
-      throw new SolrException(ErrorCode.FORBIDDEN,
-          "RAMDirectory can only be used with the '"+DirectoryFactory.LOCK_TYPE_SINGLE+"' lock factory type.");
-    }
-    return new SingleInstanceLockFactory();
-  }
-
-  @Override
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    return new RAMDirectory(lockFactory);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/RequestHandlers.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/RequestHandlers.java b/solr/core/src/java/org/apache/solr/core/RequestHandlers.java
deleted file mode 100644
index dd06fa5..0000000
--- a/solr/core/src/java/org/apache/solr/core/RequestHandlers.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.request.SolrRequestHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- */
-public final class RequestHandlers {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final SolrCore core;
-
-  final PluginBag<SolrRequestHandler> handlers;
-
-  /**
-   * Trim the trailing '/' if it's there, and convert null to empty string.
-   * 
-   * we want:
-   *  /update/csv   and
-   *  /update/csv/
-   * to map to the same handler 
-   * 
-   */
-  public static String normalize( String p )
-  {
-    if(p == null) return "";
-    if( p.endsWith( "/" ) && p.length() > 1 )
-      return p.substring( 0, p.length()-1 );
-    
-    return p;
-  }
-  
-  public RequestHandlers(SolrCore core) {
-      this.core = core;
-    // we need a thread safe registry since methods like register are currently documented to be thread safe.
-    handlers =  new PluginBag<>(SolrRequestHandler.class, core, true);
-  }
-
-  /**
-   * @return the RequestHandler registered at the given name 
-   */
-  public SolrRequestHandler get(String handlerName) {
-    return handlers.get(normalize(handlerName));
-  }
-
-  /**
-   * Handlers must be initialized before calling this function.  As soon as this is
-   * called, the handler can immediately accept requests.
-   * 
-   * This call is thread safe.
-   * 
-   * @return the previous handler at the given path or null
-   */
-  public SolrRequestHandler register( String handlerName, SolrRequestHandler handler ) {
-    String norm = normalize(handlerName);
-    if (handler == null) {
-      return handlers.remove(norm);
-    }
-    return handlers.put(norm, handler);
-//    return register(handlerName, new PluginRegistry.PluginHolder<>(null, handler));
-  }
-
-
-  /**
-   * Returns an unmodifiable Map containing the registered handlers
-   */
-  public PluginBag<SolrRequestHandler> getRequestHandlers() {
-    return handlers;
-  }
-
-
-  /**
-   * Read solrconfig.xml and register the appropriate handlers
-   * 
-   * This function should <b>only</b> be called from the SolrCore constructor.  It is
-   * not intended as a public API.
-   * 
-   * While the normal runtime registration contract is that handlers MUST be initialized
-   * before they are registered, this function does not do that exactly.
-   *
-   * This function registers all handlers first and then calls init() for each one.
-   *
-   * This is OK because this function is only called at startup and there is no chance that
-   * a handler could be asked to handle a request before it is initialized.
-   * 
-   * The advantage to this approach is that handlers can know what path they are registered
-   * to and what other handlers are available at startup.
-   * 
-   * Handlers will be registered and initialized in the order they appear in solrconfig.xml
-   */
-
-  void initHandlersFromConfig(SolrConfig config) {
-    List<PluginInfo> implicits = core.getImplicitHandlers();
-    // use link map so we iterate in the same order
-    Map<String, PluginInfo> infoMap= new LinkedHashMap<>();
-    //deduping implicit and explicit requesthandlers
-    for (PluginInfo info : implicits) infoMap.put(info.name,info);
-    for (PluginInfo info : config.getPluginInfos(SolrRequestHandler.class.getName())) infoMap.put(info.name, info);
-    ArrayList<PluginInfo> infos = new ArrayList<>(infoMap.values());
-
-    List<PluginInfo> modifiedInfos = new ArrayList<>();
-    for (PluginInfo info : infos) {
-      modifiedInfos.add(applyInitParams(config, info));
-    }
-    handlers.init(Collections.emptyMap(),core, modifiedInfos);
-    handlers.alias(handlers.getDefault(), "");
-    log.debug("Registered paths: {}" , StrUtils.join(new ArrayList<>(handlers.keySet()) , ',' ));
-    if (handlers.get("") == null && !handlers.alias("/select", "")) {
-      if (handlers.get("") == null && !handlers.alias("standard", "")) {
-        log.warn("no default request handler is registered (either '/select' or 'standard')");
-      }
-    }
-  }
-
-  private PluginInfo applyInitParams(SolrConfig config, PluginInfo info) {
-    List<InitParams> ags = new ArrayList<>();
-    String p = info.attributes.get(InitParams.TYPE);
-    if(p!=null) {
-      for (String arg : StrUtils.splitSmart(p, ',')) {
-        if(config.getInitParams().containsKey(arg)) ags.add(config.getInitParams().get(arg));
-        else log.warn("INVALID paramSet {} in requestHandler {}", arg, info.toString());
-      }
-    }
-    for (InitParams args : config.getInitParams().values())
-      if(args.matchPath(info.name)) ags.add(args);
-    if(!ags.isEmpty()){
-      info = info.copy();
-      for (InitParams initParam : ags) {
-        initParam.apply(info);
-      }
-    }
-    return info;
-  }
-
-  public void close() {
-    handlers.close();
-  }
-}
-
-
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/RequestParams.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/RequestParams.java b/solr/core/src/java/org/apache/solr/core/RequestParams.java
deleted file mode 100644
index fbb2555..0000000
--- a/solr/core/src/java/org/apache/solr/core/RequestParams.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.MapSerializable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.noggit.JSONParser;
-import org.noggit.ObjectBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Collections.singletonMap;
-import static org.apache.solr.common.util.Utils.getDeepCopy;
-
-/**
- * The class encapsulates the request time parameters . This is immutable and any changes performed
- * returns a copy of the Object with the changed values
- */
-public class RequestParams implements MapSerializable {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final Map data;
-  private final Map<String, ParamSet> paramsets = new LinkedHashMap<>();
-  private final int znodeVersion;
-
-  public RequestParams(Map data, int znodeVersion) {
-    if (data == null) data = Collections.EMPTY_MAP;
-    this.data = data;
-    Map paramsets = (Map) data.get(NAME);
-    if (paramsets != null) {
-      for (Object o : paramsets.entrySet()) {
-        Map.Entry e = (Map.Entry) o;
-        if (e.getValue() instanceof Map) {
-          Map value = (Map) e.getValue();
-          this.paramsets.put((String) e.getKey(), createParamSet(value, 0l));
-        }
-      }
-    }
-    this.znodeVersion = znodeVersion;
-  }
-
-  public static ParamSet createParamSet(Map map, Long version) {
-    Map copy = getDeepCopy(map, 3);
-    Map meta = (Map) copy.remove("");
-    if (meta == null && version != null) {
-      meta = Collections.singletonMap("v", version);
-    }
-    Map invariants = (Map) copy.remove(INVARIANTS);
-    Map appends = (Map) copy.remove(APPENDS);
-    return new ParamSet(copy, invariants, appends, meta);
-  }
-
-  /**
-   * This converts Lists to arrays of strings. Because Solr expects
-   * params to be String[]
-   */
-  private static Map getMapCopy(Map value) {
-    Map copy = new LinkedHashMap<>();
-    for (Object o1 : value.entrySet()) {
-      Map.Entry entry = (Map.Entry) o1;
-      if ("".equals(entry.getKey())) {
-        copy.put(entry.getKey(), entry.getValue());
-        continue;
-      }
-      if (entry.getValue() != null) {
-        if (entry.getValue() instanceof List) {
-          List l = (List) entry.getValue();
-          String[] sarr = new String[l.size()];
-          for (int i = 0; i < l.size(); i++) {
-            if (l.get(i) != null) sarr[i] = String.valueOf(l.get(i));
-          }
-          copy.put(entry.getKey(), sarr);
-        } else {
-          copy.put(entry.getKey(), String.valueOf(entry.getValue()));
-        }
-      } else {
-        copy.put(entry.getKey(), entry.getValue());
-      }
-    }
-    return copy;
-  }
-
-  public ParamSet getParams(String name) {
-    return paramsets.get(name);
-  }
-
-  public VersionedParams getParams(String name, String type) {
-    ParamSet paramSet = paramsets.get(name);
-    return paramSet == null ? null : paramSet.getParams(type);
-  }
-
-  public int getZnodeVersion() {
-    return znodeVersion;
-  }
-
-  @Override
-  public Map<String, Object> toMap(Map<String, Object> map) {
-    return getMapWithVersion(data, znodeVersion);
-  }
-
-  public static Map<String, Object> getMapWithVersion(Map<String, Object> data, int znodeVersion) {
-    Map result = new LinkedHashMap();
-    result.put(ConfigOverlay.ZNODEVER, znodeVersion);
-    result.putAll(data);
-    return result;
-  }
-
-  public RequestParams setParams(String name, ParamSet paramSet) {
-    Map deepCopy = getDeepCopy(data, 3);
-    Map p = (Map) deepCopy.get(NAME);
-    if (p == null) deepCopy.put(NAME, p = new LinkedHashMap());
-    if (paramSet == null) p.remove(name);
-    else p.put(name, paramSet.toMap(new LinkedHashMap<>()));
-    return new RequestParams(deepCopy, znodeVersion);
-  }
-
-  public static RequestParams getFreshRequestParams(SolrResourceLoader loader, RequestParams requestParams) {
-    if (loader instanceof ZkSolrResourceLoader) {
-      ZkSolrResourceLoader resourceLoader = (ZkSolrResourceLoader) loader;
-      try {
-        Stat stat = resourceLoader.getZkController().getZkClient().exists(resourceLoader.getConfigSetZkPath() + "/" + RequestParams.RESOURCE, null, true);
-        log.debug("latest version of {} in ZK  is : {}", resourceLoader.getConfigSetZkPath() + "/" + RequestParams.RESOURCE, stat == null ? "" : stat.getVersion());
-        if (stat == null) {
-          requestParams = new RequestParams(Collections.EMPTY_MAP, -1);
-        } else if (requestParams == null || stat.getVersion() > requestParams.getZnodeVersion()) {
-          Object[] o = getMapAndVersion(loader, RequestParams.RESOURCE);
-          requestParams = new RequestParams((Map) o[0], (Integer) o[1]);
-          log.info("request params refreshed to version {}", requestParams.getZnodeVersion());
-        }
-      } catch (KeeperException | InterruptedException e) {
-        SolrZkClient.checkInterrupted(e);
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-      }
-
-    } else {
-      Object[] o = getMapAndVersion(loader, RequestParams.RESOURCE);
-      requestParams = new RequestParams((Map) o[0], (Integer) o[1]);
-    }
-
-    return requestParams;
-
-  }
-
-
-  private static Object[] getMapAndVersion(SolrResourceLoader loader, String name) {
-    try (InputStream in = loader.openResource(name)) {
-      int version = 0; //will be always 0 for file based resourceloader
-      if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
-        version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
-        log.info("conf resource {} loaded . version : {} ", name, version);
-      }
-      try {
-        Map m = (Map) ObjectBuilder.getVal(new JSONParser(new InputStreamReader(in, StandardCharsets.UTF_8)));
-        return new Object[]{m, version};
-      } catch (IOException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error parsing conf resource " + name, e);
-      }
-
-    } catch (IOException e) {
-      //no problem no overlay.json file
-      return new Object[]{Collections.EMPTY_MAP, -1};
-    }
-  }
-
-
-  public byte[] toByteArray() {
-    return Utils.toJSON(data);
-  }
-
-  public static final String USEPARAM = "useParams";
-  public static final String NAME = "params";
-  public static final String RESOURCE = "params.json";
-  public static final String APPENDS = "_appends_";
-  public static final String INVARIANTS = "_invariants_";
-
-  public static class ParamSet implements MapSerializable {
-    private final Map defaults, appends, invariants;
-    Map<String, VersionedParams> paramsMap;
-    public final Map meta;
-
-    ParamSet(Map defaults, Map invariants, Map appends, Map meta) {
-      this.defaults = defaults;
-      this.invariants = invariants;
-      this.appends = appends;
-      ImmutableMap.Builder<String, VersionedParams> builder = ImmutableMap.<String, VersionedParams>builder().put(PluginInfo.DEFAULTS,
-          new VersionedParams(defaults, this));
-      if (appends != null) builder.put(PluginInfo.APPENDS, new VersionedParams(appends, this));
-      if (invariants != null) builder.put(PluginInfo.INVARIANTS, new VersionedParams(invariants, this));
-      paramsMap = builder.build();
-      this.meta = meta;
-    }
-
-    public Long getVersion() {
-      return meta == null ? Long.valueOf(0l) : (Long) meta.get("v");
-    }
-
-    @Override
-    public Map<String, Object> toMap(Map<String, Object> result) {
-      result.putAll(defaults);
-      if (appends != null) result.put(APPENDS, appends);
-      if (invariants != null) result.put(INVARIANTS, invariants);
-      if(meta != null) result.put("", meta);
-      return result;
-    }
-
-
-    public ParamSet update(Map map) {
-      ParamSet p = createParamSet(map, null);
-      return new ParamSet(
-          mergeMaps(getDeepCopy(defaults, 2), p.defaults),
-          mergeMaps(getDeepCopy(invariants, 2), p.invariants),
-          mergeMaps(getDeepCopy(appends, 2), p.appends),
-          mergeMaps(getDeepCopy(meta, 2), singletonMap("v", getVersion() + 1))
-      );
-    }
-
-    private static Map mergeMaps(Map m1, Map m2) {
-      if (m1 == null && m2 == null) return null;
-      if (m1 == null) return m2;
-      if (m2 == null) return m1;
-      m1.putAll(m2);
-      return m1;
-    }
-
-    public VersionedParams getParams(String type) {
-      return paramsMap.get(type);
-    }
-  }
-
-  public static class VersionedParams extends MapSolrParams {
-    final ParamSet paramSet;
-
-    public VersionedParams(Map map, ParamSet paramSet) {
-      super(getMapCopy(map));
-      this.paramSet = paramSet;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java b/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java
deleted file mode 100644
index 5adc161..0000000
--- a/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
-import java.util.Locale;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
-import org.apache.lucene.codecs.lucene80.Lucene80Codec;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Per-field CodecFactory implementation, extends Lucene's 
- * and returns postings format implementations according to the 
- * schema configuration.
- * <br>
- * Also, a string argument with name <code>compressionMode</code> can be
- * provided to chose between the different compression options for
- * stored fields
- * 
- * @lucene.experimental
- */
-public class SchemaCodecFactory extends CodecFactory implements SolrCoreAware {
-  
-  /**
-   * Key to use in init arguments to set the compression mode in the codec.
-   */
-  public static final String COMPRESSION_MODE = "compressionMode";
-  
-  public static final Mode SOLR_DEFAULT_COMPRESSION_MODE = Mode.BEST_SPEED;
-  
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private Codec codec;
-  private volatile SolrCore core;
-  
-  
-  // TODO: we need to change how solr does this?
-  // rather than a string like "Direct" you need to be able to pass parameters
-  // and everything to a field in the schema, e.g. we should provide factories for 
-  // the Lucene's core formats (Memory, Direct, ...) and such.
-  //
-  // So I think a FieldType should return PostingsFormat, not a String.
-  // how it constructs this from the XML... i don't care.
-
-  @Override
-  public void inform(SolrCore core) {
-    this.core = core;
-  }
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    assert codec == null;
-    String compressionModeStr = (String)args.get(COMPRESSION_MODE);
-    Mode compressionMode;
-    if (compressionModeStr != null) {
-      try {
-        compressionMode = Mode.valueOf(compressionModeStr.toUpperCase(Locale.ROOT));
-      } catch (IllegalArgumentException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, 
-            "Invalid compressionMode: '" + compressionModeStr + 
-            "'. Value must be one of " + Arrays.toString(Mode.values()));
-      }
-      log.debug("Using compressionMode: " + compressionMode);
-    } else {
-      compressionMode = SOLR_DEFAULT_COMPRESSION_MODE;
-      log.debug("Using default compressionMode: " + compressionMode);
-    }
-    codec = new Lucene80Codec(compressionMode) {
-      @Override
-      public PostingsFormat getPostingsFormatForField(String field) {
-        final SchemaField schemaField = core.getLatestSchema().getFieldOrNull(field);
-        if (schemaField != null) {
-          String postingsFormatName = schemaField.getType().getPostingsFormat();
-          if (postingsFormatName != null) {
-            return PostingsFormat.forName(postingsFormatName);
-          }
-        }
-        return super.getPostingsFormatForField(field);
-      }
-      @Override
-      public DocValuesFormat getDocValuesFormatForField(String field) {
-        final SchemaField schemaField = core.getLatestSchema().getFieldOrNull(field);
-        if (schemaField != null) {
-          String docValuesFormatName = schemaField.getType().getDocValuesFormat();
-          if (docValuesFormatName != null) {
-            return DocValuesFormat.forName(docValuesFormatName);
-          }
-        }
-        return super.getDocValuesFormatForField(field);
-      }
-    };
-  }
-
-  @Override
-  public Codec getCodec() {
-    assert core != null : "inform must be called first";
-    return codec;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/ShutdownAwareDirectory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ShutdownAwareDirectory.java b/solr/core/src/java/org/apache/solr/core/ShutdownAwareDirectory.java
deleted file mode 100644
index effe677..0000000
--- a/solr/core/src/java/org/apache/solr/core/ShutdownAwareDirectory.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-
-import org.apache.lucene.store.Directory;
-
-/**
- * {@link CachingDirectoryFactory} will call this method
- * rather than {@link Directory#close()} on shutdown if
- * a Directory implements this interface.
- */
-public interface ShutdownAwareDirectory {
-  public void closeOnShutdown() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SimpleFSDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SimpleFSDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/SimpleFSDirectoryFactory.java
deleted file mode 100644
index 0784d21..0000000
--- a/solr/core/src/java/org/apache/solr/core/SimpleFSDirectoryFactory.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.SimpleFSDirectory;
-
-
-/**
- * Factory to instantiate {@link org.apache.lucene.store.SimpleFSDirectory}
- *
- **/
-public class SimpleFSDirectoryFactory extends StandardDirectoryFactory {
-
-  @Override
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    // we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
-    return new SimpleFSDirectory(new File(path).toPath(), lockFactory);
-  }
-
-  @Override
-  public boolean isAbsolute(String path) {
-    return new File(path).isAbsolute();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SimpleTextCodecFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SimpleTextCodecFactory.java b/solr/core/src/java/org/apache/solr/core/SimpleTextCodecFactory.java
deleted file mode 100644
index de0124f..0000000
--- a/solr/core/src/java/org/apache/solr/core/SimpleTextCodecFactory.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.core;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
-import org.apache.solr.common.util.NamedList;
-
-public class SimpleTextCodecFactory extends CodecFactory {
-  private Codec codec;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    assert codec == null;
-    codec = new SimpleTextCodec();
-  }
-
-  @Override
-  public Codec getCodec() {
-    return codec;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/SolrConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
deleted file mode 100644
index fcae44e..0000000
--- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java
+++ /dev/null
@@ -1,963 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.CommonParams.PATH;
-import static org.apache.solr.common.util.Utils.makeMap;
-import static org.apache.solr.core.ConfigOverlay.ZNODEVER;
-import static org.apache.solr.core.SolrConfig.PluginOpts.LAZY;
-import static org.apache.solr.core.SolrConfig.PluginOpts.MULTI_OK;
-import static org.apache.solr.core.SolrConfig.PluginOpts.NOOP;
-import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_CLASS;
-import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME;
-import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME_IN_OVERLAY;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.UUID;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.xpath.XPathConstants;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.util.Version;
-import org.apache.solr.client.solrj.io.stream.expr.Expressible;
-import org.apache.solr.cloud.RecoveryStrategy;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.MapSerializable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.QueryResponseWriter;
-import org.apache.solr.response.transform.TransformerFactory;
-import org.apache.solr.rest.RestManager;
-import org.apache.solr.schema.IndexSchemaFactory;
-import org.apache.solr.search.CacheConfig;
-import org.apache.solr.search.FastLRUCache;
-import org.apache.solr.search.QParserPlugin;
-import org.apache.solr.search.SolrCache;
-import org.apache.solr.search.ValueSourceParser;
-import org.apache.solr.search.stats.StatsCache;
-import org.apache.solr.servlet.SolrRequestParsers;
-import org.apache.solr.spelling.QueryConverter;
-import org.apache.solr.update.SolrIndexConfig;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
-import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
-import org.apache.solr.util.DOMUtil;
-import org.noggit.JSONParser;
-import org.noggit.ObjectBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
-
-import com.google.common.collect.ImmutableList;
-
-
-/**
- * Provides a static reference to a Config object modeling the main
- * configuration data for a a Solr instance -- typically found in
- * "solrconfig.xml".
- */
-public class SolrConfig extends Config implements MapSerializable {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String DEFAULT_CONF_FILE = "solrconfig.xml";
-  private RequestParams requestParams;
-
-  public enum PluginOpts {
-    MULTI_OK,
-    REQUIRE_NAME,
-    REQUIRE_NAME_IN_OVERLAY,
-    REQUIRE_CLASS,
-    LAZY,
-    // EnumSet.of and/or EnumSet.copyOf(Collection) are annoying
-    // because of type determination
-    NOOP
-  }
-
-  private int multipartUploadLimitKB;
-
-  private int formUploadLimitKB;
-
-  private boolean enableRemoteStreams;
-  private boolean enableStreamBody;
-
-  private boolean handleSelect;
-
-  private boolean addHttpRequestToContext;
-
-  private final SolrRequestParsers solrRequestParsers;
-
-  /**
-   * Creates a default instance from the solrconfig.xml.
-   */
-  public SolrConfig()
-      throws ParserConfigurationException, IOException, SAXException {
-    this((SolrResourceLoader) null, DEFAULT_CONF_FILE, null);
-  }
-
-  /**
-   * Creates a configuration instance from a configuration name.
-   * A default resource loader will be created (@see SolrResourceLoader)
-   *
-   * @param name the configuration name used by the loader
-   */
-  public SolrConfig(String name)
-      throws ParserConfigurationException, IOException, SAXException {
-    this((SolrResourceLoader) null, name, null);
-  }
-
-  /**
-   * Creates a configuration instance from a configuration name and stream.
-   * A default resource loader will be created (@see SolrResourceLoader).
-   * If the stream is null, the resource loader will open the configuration stream.
-   * If the stream is not null, no attempt to load the resource will occur (the name is not used).
-   *
-   * @param name the configuration name
-   * @param is   the configuration stream
-   */
-  public SolrConfig(String name, InputSource is)
-      throws ParserConfigurationException, IOException, SAXException {
-    this((SolrResourceLoader) null, name, is);
-  }
-
-  /**
-   * Creates a configuration instance from an instance directory, configuration name and stream.
-   *
-   * @param instanceDir the directory used to create the resource loader
-   * @param name        the configuration name used by the loader if the stream is null
-   * @param is          the configuration stream
-   */
-  public SolrConfig(Path instanceDir, String name, InputSource is)
-      throws ParserConfigurationException, IOException, SAXException {
-    this(new SolrResourceLoader(instanceDir), name, is);
-  }
-
-  public static SolrConfig readFromResourceLoader(SolrResourceLoader loader, String name) {
-    try {
-      return new SolrConfig(loader, name, null);
-    } catch (Exception e) {
-      String resource;
-      if (loader instanceof ZkSolrResourceLoader) {
-        resource = name;
-      } else {
-        resource = Paths.get(loader.getConfigDir()).resolve(name).toString();
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading solr config from " + resource, e);
-    }
-  }
-
-  /**
-   * Creates a configuration instance from a resource loader, a configuration name and a stream.
-   * If the stream is null, the resource loader will open the configuration stream.
-   * If the stream is not null, no attempt to load the resource will occur (the name is not used).
-   *
-   * @param loader the resource loader
-   * @param name   the configuration name
-   * @param is     the configuration stream
-   */
-  public SolrConfig(SolrResourceLoader loader, String name, InputSource is)
-      throws ParserConfigurationException, IOException, SAXException {
-    super(loader, name, is, "/config/");
-    getOverlay();//just in case it is not initialized
-    getRequestParams();
-    initLibs();
-    luceneMatchVersion = getLuceneVersion("luceneMatchVersion");
-    String indexConfigPrefix;
-
-    // Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_4_0_0.
-    // For older solrconfig.xml's we allow the old sections, but never mixed with the new <indexConfig>
-    boolean hasDeprecatedIndexConfig = (getNode("indexDefaults", false) != null) || (getNode("mainIndex", false) != null);
-    if (hasDeprecatedIndexConfig) {
-      throw new SolrException(ErrorCode.FORBIDDEN, "<indexDefaults> and <mainIndex> configuration sections are discontinued. Use <indexConfig> instead.");
-    } else {
-      indexConfigPrefix = "indexConfig";
-    }
-    assertWarnOrFail("The <nrtMode> config has been discontinued and NRT mode is always used by Solr." +
-            " This config will be removed in future versions.", getNode(indexConfigPrefix + "/nrtMode", false) == null,
-        true
-    );
-    assertWarnOrFail("Solr no longer supports forceful unlocking via the 'unlockOnStartup' option.  "+
-                     "This is no longer necessary for the default lockType except in situations where "+
-                     "it would be dangerous and should not be done.  For other lockTypes and/or "+
-                     "directoryFactory options it may also be dangerous and users must resolve "+
-                     "problematic locks manually.",
-                     null == getNode(indexConfigPrefix + "/unlockOnStartup", false),
-                     true // 'fail' in trunk
-                     );
-                     
-    // Parse indexConfig section, using mainIndex as backup in case old config is used
-    indexConfig = new SolrIndexConfig(this, "indexConfig", null);
-
-    booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", BooleanQuery.getMaxClauseCount());
-    log.info("Using Lucene MatchVersion: {}", luceneMatchVersion);
-
-    // Warn about deprecated / discontinued parameters
-    // boolToFilterOptimizer has had no effect since 3.1
-    if (get("query/boolTofilterOptimizer", null) != null)
-      log.warn("solrconfig.xml: <boolTofilterOptimizer> is currently not implemented and has no effect.");
-    if (get("query/HashDocSet", null) != null)
-      log.warn("solrconfig.xml: <HashDocSet> is deprecated and no longer recommended used.");
-
-// TODO: Old code - in case somebody wants to re-enable. Also see SolrIndexSearcher#search()
-//    filtOptEnabled = getBool("query/boolTofilterOptimizer/@enabled", false);
-//    filtOptCacheSize = getInt("query/boolTofilterOptimizer/@cacheSize",32);
-//    filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f);
-
-    useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false);
-    queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1));
-    queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE);
-    enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);
-    
-    useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true);
-
-    filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache");
-    queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache");
-    documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache");
-    CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache");
-    if (conf == null) {
-      Map<String, String> args = new HashMap<>();
-      args.put(NAME, "fieldValueCache");
-      args.put("size", "10000");
-      args.put("initialSize", "10");
-      args.put("showItems", "-1");
-      conf = new CacheConfig(FastLRUCache.class, args, null);
-    }
-    fieldValueCacheConfig = conf;
-    useColdSearcher = getBool("query/useColdSearcher", false);
-    dataDir = get("dataDir", null);
-    if (dataDir != null && dataDir.length() == 0) dataDir = null;
-
-
-    org.apache.solr.search.SolrIndexSearcher.initRegenerators(this);
-
-    hashSetInverseLoadFactor = 1.0f / getFloat("//HashDocSet/@loadFactor", 0.75f);
-    hashDocSetMaxSize = getInt("//HashDocSet/@maxSize", 3000);
-
-    if (get("jmx", null) != null) {
-      log.warn("solrconfig.xml: <jmx> is no longer supported, use solr.xml:/metrics/reporter section instead");
-    }
-
-    httpCachingConfig = new HttpCachingConfig(this);
-
-    maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1);
-    slowQueryThresholdMillis = getInt("query/slowQueryThresholdMillis", -1);
-    for (SolrPluginInfo plugin : plugins) loadPluginInfo(plugin);
-
-    Map<String, CacheConfig> userCacheConfigs = CacheConfig.getMultipleConfigs(this, "query/cache");
-    List<PluginInfo> caches = getPluginInfos(SolrCache.class.getName());
-    if (!caches.isEmpty()) {
-      for (PluginInfo c : caches) {
-        userCacheConfigs.put(c.name, CacheConfig.getConfig(this, "cache", c.attributes, null));
-      }
-    }
-    this.userCacheConfigs = Collections.unmodifiableMap(userCacheConfigs);
-
-    updateHandlerInfo = loadUpdatehandlerInfo();
-
-    multipartUploadLimitKB = getInt(
-        "requestDispatcher/requestParsers/@multipartUploadLimitInKB", Integer.MAX_VALUE);
-    if (multipartUploadLimitKB == -1) multipartUploadLimitKB = Integer.MAX_VALUE;
-
-    formUploadLimitKB = getInt(
-        "requestDispatcher/requestParsers/@formdataUploadLimitInKB", Integer.MAX_VALUE);
-    if (formUploadLimitKB == -1) formUploadLimitKB = Integer.MAX_VALUE;
-
-    enableRemoteStreams = getBool(
-        "requestDispatcher/requestParsers/@enableRemoteStreaming", false);
-
-    enableStreamBody = getBool(
-        "requestDispatcher/requestParsers/@enableStreamBody", false);
-
-    handleSelect = getBool(
-        "requestDispatcher/@handleSelect", !luceneMatchVersion.onOrAfter(Version.LUCENE_7_0_0));
-
-    addHttpRequestToContext = getBool(
-        "requestDispatcher/requestParsers/@addHttpRequestToContext", false);
-
-    List<PluginInfo> argsInfos = getPluginInfos(InitParams.class.getName());
-    if (argsInfos != null) {
-      Map<String, InitParams> argsMap = new HashMap<>();
-      for (PluginInfo p : argsInfos) {
-        InitParams args = new InitParams(p);
-        argsMap.put(args.name == null ? String.valueOf(args.hashCode()) : args.name, args);
-      }
-      this.initParams = Collections.unmodifiableMap(argsMap);
-
-    }
-
-    solrRequestParsers = new SolrRequestParsers(this);
-    log.debug("Loaded SolrConfig: {}", name);
-  }
-
-  public static final List<SolrPluginInfo> plugins = ImmutableList.<SolrPluginInfo>builder()
-      .add(new SolrPluginInfo(SolrRequestHandler.class, SolrRequestHandler.TYPE, REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY))
-      .add(new SolrPluginInfo(QParserPlugin.class, "queryParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-      .add(new SolrPluginInfo(Expressible.class, "expressible", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-      .add(new SolrPluginInfo(QueryResponseWriter.class, "queryResponseWriter", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY))
-      .add(new SolrPluginInfo(ValueSourceParser.class, "valueSourceParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-      .add(new SolrPluginInfo(TransformerFactory.class, "transformer", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-      .add(new SolrPluginInfo(SearchComponent.class, "searchComponent", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-      .add(new SolrPluginInfo(UpdateRequestProcessorFactory.class, "updateProcessor", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-      .add(new SolrPluginInfo(SolrCache.class, "cache", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
-          // TODO: WTF is up with queryConverter???
-          // it apparently *only* works as a singleton? - SOLR-4304
-          // and even then -- only if there is a single SpellCheckComponent
-          // because of queryConverter.setIndexAnalyzer
-      .add(new SolrPluginInfo(QueryConverter.class, "queryConverter", REQUIRE_NAME, REQUIRE_CLASS))
-      .add(new SolrPluginInfo(PluginBag.RuntimeLib.class, "runtimeLib", REQUIRE_NAME, MULTI_OK))
-          // this is hackish, since it picks up all SolrEventListeners,
-          // regardless of when/how/why they are used (or even if they are
-          // declared outside of the appropriate context) but there's no nice
-          // way around that in the PluginInfo framework
-      .add(new SolrPluginInfo(InitParams.class, InitParams.TYPE, MULTI_OK, REQUIRE_NAME_IN_OVERLAY))
-      .add(new SolrPluginInfo(SolrEventListener.class, "//listener", REQUIRE_CLASS, MULTI_OK, REQUIRE_NAME_IN_OVERLAY))
-
-      .add(new SolrPluginInfo(DirectoryFactory.class, "directoryFactory", REQUIRE_CLASS))
-      .add(new SolrPluginInfo(RecoveryStrategy.Builder.class, "recoveryStrategy"))
-      .add(new SolrPluginInfo(IndexDeletionPolicy.class, "indexConfig/deletionPolicy", REQUIRE_CLASS))
-      .add(new SolrPluginInfo(CodecFactory.class, "codecFactory", REQUIRE_CLASS))
-      .add(new SolrPluginInfo(IndexReaderFactory.class, "indexReaderFactory", REQUIRE_CLASS))
-      .add(new SolrPluginInfo(UpdateRequestProcessorChain.class, "updateRequestProcessorChain", MULTI_OK))
-      .add(new SolrPluginInfo(UpdateLog.class, "updateHandler/updateLog"))
-      .add(new SolrPluginInfo(IndexSchemaFactory.class, "schemaFactory", REQUIRE_CLASS))
-      .add(new SolrPluginInfo(RestManager.class, "restManager"))
-      .add(new SolrPluginInfo(StatsCache.class, "statsCache", REQUIRE_CLASS))
-      .build();
-  public static final Map<String, SolrPluginInfo> classVsSolrPluginInfo;
-
-  static {
-    // Raise the Lucene static limit so we can control this with higher granularity.  See SOLR-10921
-    BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE-1);
-
-    Map<String, SolrPluginInfo> map = new HashMap<>();
-    for (SolrPluginInfo plugin : plugins) map.put(plugin.clazz.getName(), plugin);
-    classVsSolrPluginInfo = Collections.unmodifiableMap(map);
-  }
-
-  {
-    // non-static setMaxClauseCount because the test framework sometimes reverts the value on us and
-    // the static setting above is only executed once.  This re-sets the value every time a SolrConfig
-    // object is created. See SOLR-10921
-    BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE-1);
-  }
-
-
-  public static class SolrPluginInfo {
-
-    public final Class clazz;
-    public final String tag;
-    public final Set<PluginOpts> options;
-
-
-    private SolrPluginInfo(Class clz, String tag, PluginOpts... opts) {
-      this.clazz = clz;
-      this.tag = tag;
-      this.options = opts == null ? Collections.EMPTY_SET : EnumSet.of(NOOP, opts);
-    }
-
-    public String getCleanTag() {
-      return tag.replaceAll("/", "");
-    }
-
-    public String getTagCleanLower() {
-      return getCleanTag().toLowerCase(Locale.ROOT);
-
-    }
-  }
-
-  public static ConfigOverlay getConfigOverlay(SolrResourceLoader loader) {
-    InputStream in = null;
-    InputStreamReader isr = null;
-    try {
-      try {
-        in = loader.openResource(ConfigOverlay.RESOURCE_NAME);
-      } catch (IOException e) {
-        // TODO: we should be explicitly looking for file not found exceptions
-        // and logging if it's not the expected IOException
-        // hopefully no problem, assume no overlay.json file
-        return new ConfigOverlay(Collections.EMPTY_MAP, -1);
-      }
-      
-      int version = 0; // will be always 0 for file based resourceLoader
-      if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
-        version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
-        log.debug("Config overlay loaded. version : {} ", version);
-      }
-      isr = new InputStreamReader(in, StandardCharsets.UTF_8);
-      Map m = (Map) ObjectBuilder.getVal(new JSONParser(isr));
-      return new ConfigOverlay(m, version);
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading config overlay", e);
-    } finally {
-      IOUtils.closeQuietly(isr);
-      IOUtils.closeQuietly(in);
-    }
-  }
-
-  private Map<String, InitParams> initParams = Collections.emptyMap();
-
-  public Map<String, InitParams> getInitParams() {
-    return initParams;
-  }
-
-  protected UpdateHandlerInfo loadUpdatehandlerInfo() {
-    return new UpdateHandlerInfo(get("updateHandler/@class", null),
-        getInt("updateHandler/autoCommit/maxDocs", -1),
-        getInt("updateHandler/autoCommit/maxTime", -1),
-        convertHeapOptionStyleConfigStringToBytes(get("updateHandler/autoCommit/maxSize", "")),
-        getBool("updateHandler/indexWriter/closeWaitsForMerges", true),
-        getBool("updateHandler/autoCommit/openSearcher", true),
-        getInt("updateHandler/autoSoftCommit/maxDocs", -1),
-        getInt("updateHandler/autoSoftCommit/maxTime", -1),
-        getBool("updateHandler/commitWithin/softCommit", true));
-  }
-
-  /**
-   * Converts a Java heap option-like config string to bytes. Valid suffixes are: 'k', 'm', 'g'
-   * (case insensitive). If there is no suffix, the default unit is bytes.
-   * For example, 50k = 50KB, 20m = 20MB, 4g = 4GB, 300 = 300 bytes
-   * @param configStr the config setting to parse
-   * @return the size, in bytes. -1 if the given config string is empty
-   */
-  protected static long convertHeapOptionStyleConfigStringToBytes(String configStr) {
-    if (configStr.isEmpty()) {
-      return -1;
-    }
-    long multiplier = 1;
-    String numericValueStr = configStr;
-    char suffix = Character.toLowerCase(configStr.charAt(configStr.length() - 1));
-    if (Character.isLetter(suffix)) {
-      if (suffix == 'k') {
-        multiplier = FileUtils.ONE_KB;
-      }
-      else if (suffix == 'm') {
-        multiplier = FileUtils.ONE_MB;
-      }
-      else if (suffix == 'g') {
-        multiplier = FileUtils.ONE_GB;
-      } else {
-        throw new RuntimeException("Invalid suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). "
-            + "No suffix means the amount is in bytes. ");
-      }
-      numericValueStr = configStr.substring(0, configStr.length() - 1);
-    }
-    try {
-      return Long.parseLong(numericValueStr) * multiplier;
-    } catch (NumberFormatException e) {
-      throw new RuntimeException("Invalid format. The config setting should be a long with an "
-          + "optional letter suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). "
-          + "No suffix means the amount is in bytes.");
-    }
-  }
-
-  private void loadPluginInfo(SolrPluginInfo pluginInfo) {
-    boolean requireName = pluginInfo.options.contains(REQUIRE_NAME);
-    boolean requireClass = pluginInfo.options.contains(REQUIRE_CLASS);
-
-    List<PluginInfo> result = readPluginInfos(pluginInfo.tag, requireName, requireClass);
-
-    if (1 < result.size() && !pluginInfo.options.contains(MULTI_OK)) {
-      throw new SolrException
-          (SolrException.ErrorCode.SERVER_ERROR,
-              "Found " + result.size() + " configuration sections when at most "
-                  + "1 is allowed matching expression: " + pluginInfo.getCleanTag());
-    }
-    if (!result.isEmpty()) pluginStore.put(pluginInfo.clazz.getName(), result);
-  }
-
-  public List<PluginInfo> readPluginInfos(String tag, boolean requireName, boolean requireClass) {
-    ArrayList<PluginInfo> result = new ArrayList<>();
-    NodeList nodes = (NodeList) evaluate(tag, XPathConstants.NODESET);
-    for (int i = 0; i < nodes.getLength(); i++) {
-      PluginInfo pluginInfo = new PluginInfo(nodes.item(i), "[solrconfig.xml] " + tag, requireName, requireClass);
-      if (pluginInfo.isEnabled()) result.add(pluginInfo);
-    }
-    return result;
-  }
-
-  public SolrRequestParsers getRequestParsers() {
-    return solrRequestParsers;
-  }
-
-  /* The set of materialized parameters: */
-  public final int booleanQueryMaxClauseCount;
-  // SolrIndexSearcher - nutch optimizer -- Disabled since 3.1
-//  public final boolean filtOptEnabled;
-//  public final int filtOptCacheSize;
-//  public final float filtOptThreshold;
-  // SolrIndexSearcher - caches configurations
-  public final CacheConfig filterCacheConfig;
-  public final CacheConfig queryResultCacheConfig;
-  public final CacheConfig documentCacheConfig;
-  public final CacheConfig fieldValueCacheConfig;
-  public final Map<String, CacheConfig> userCacheConfigs;
-  // SolrIndexSearcher - more...
-  public final boolean useFilterForSortedQuery;
-  public final int queryResultWindowSize;
-  public final int queryResultMaxDocsCached;
-  public final boolean enableLazyFieldLoading;
-  
-  public final boolean useRangeVersionsForPeerSync;
-  
-  // DocSet
-  public final float hashSetInverseLoadFactor;
-  public final int hashDocSetMaxSize;
-  // IndexConfig settings
-  public final SolrIndexConfig indexConfig;
-
-  protected UpdateHandlerInfo updateHandlerInfo;
-
-  private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>();
-
-  public final int maxWarmingSearchers;
-  public final boolean useColdSearcher;
-  public final Version luceneMatchVersion;
-  protected String dataDir;
-  public final int slowQueryThresholdMillis;  // threshold above which a query is considered slow
-
-  private final HttpCachingConfig httpCachingConfig;
-
-  public HttpCachingConfig getHttpCachingConfig() {
-    return httpCachingConfig;
-  }
-
-  public static class HttpCachingConfig implements MapSerializable {
-
-    /**
-     * config xpath prefix for getting HTTP Caching options
-     */
-    private final static String CACHE_PRE
-        = "requestDispatcher/httpCaching/";
-
-    /**
-     * For extracting Expires "ttl" from <cacheControl> config
-     */
-    private final static Pattern MAX_AGE
-        = Pattern.compile("\\bmax-age=(\\d+)");
-
-    @Override
-    public Map<String, Object> toMap(Map<String, Object> map) {
-      return makeMap("never304", never304,
-          "etagSeed", etagSeed,
-          "lastModFrom", lastModFrom.name().toLowerCase(Locale.ROOT),
-          "cacheControl", cacheControlHeader);
-    }
-
-    public static enum LastModFrom {
-      OPENTIME, DIRLASTMOD, BOGUS;
-
-      /**
-       * Input must not be null
-       */
-      public static LastModFrom parse(final String s) {
-        try {
-          return valueOf(s.toUpperCase(Locale.ROOT));
-        } catch (Exception e) {
-          log.warn("Unrecognized value for lastModFrom: " + s, e);
-          return BOGUS;
-        }
-      }
-    }
-
-    private final boolean never304;
-    private final String etagSeed;
-    private final String cacheControlHeader;
-    private final Long maxAge;
-    private final LastModFrom lastModFrom;
-
-    private HttpCachingConfig(SolrConfig conf) {
-
-      never304 = conf.getBool(CACHE_PRE + "@never304", false);
-
-      etagSeed = conf.get(CACHE_PRE + "@etagSeed", "Solr");
-
-
-      lastModFrom = LastModFrom.parse(conf.get(CACHE_PRE + "@lastModFrom",
-          "openTime"));
-
-      cacheControlHeader = conf.get(CACHE_PRE + "cacheControl", null);
-
-      Long tmp = null; // maxAge
-      if (null != cacheControlHeader) {
-        try {
-          final Matcher ttlMatcher = MAX_AGE.matcher(cacheControlHeader);
-          final String ttlStr = ttlMatcher.find() ? ttlMatcher.group(1) : null;
-          tmp = (null != ttlStr && !"".equals(ttlStr))
-              ? Long.valueOf(ttlStr)
-              : null;
-        } catch (Exception e) {
-          log.warn("Ignoring exception while attempting to " +
-              "extract max-age from cacheControl config: " +
-              cacheControlHeader, e);
-        }
-      }
-      maxAge = tmp;
-
-    }
-
-    public boolean isNever304() {
-      return never304;
-    }
-
-    public String getEtagSeed() {
-      return etagSeed;
-    }
-
-    /**
-     * null if no Cache-Control header
-     */
-    public String getCacheControlHeader() {
-      return cacheControlHeader;
-    }
-
-    /**
-     * null if no max age limitation
-     */
-    public Long getMaxAge() {
-      return maxAge;
-    }
-
-    public LastModFrom getLastModFrom() {
-      return lastModFrom;
-    }
-  }
-
-  public static class UpdateHandlerInfo implements MapSerializable {
-    public final String className;
-    public final int autoCommmitMaxDocs, autoCommmitMaxTime,
-        autoSoftCommmitMaxDocs, autoSoftCommmitMaxTime;
-    public final long autoCommitMaxSizeBytes;
-    public final boolean indexWriterCloseWaitsForMerges;
-    public final boolean openSearcher;  // is opening a new searcher part of hard autocommit?
-    public final boolean commitWithinSoftCommit;
-
-    /**
-     * @param autoCommmitMaxDocs       set -1 as default
-     * @param autoCommmitMaxTime       set -1 as default
-     * @param autoCommitMaxSize        set -1 as default
-     */
-    public UpdateHandlerInfo(String className, int autoCommmitMaxDocs, int autoCommmitMaxTime, long autoCommitMaxSize, boolean indexWriterCloseWaitsForMerges, boolean openSearcher,
-                             int autoSoftCommmitMaxDocs, int autoSoftCommmitMaxTime, boolean commitWithinSoftCommit) {
-      this.className = className;
-      this.autoCommmitMaxDocs = autoCommmitMaxDocs;
-      this.autoCommmitMaxTime = autoCommmitMaxTime;
-      this.autoCommitMaxSizeBytes = autoCommitMaxSize;
-      this.indexWriterCloseWaitsForMerges = indexWriterCloseWaitsForMerges;
-      this.openSearcher = openSearcher;
-
-      this.autoSoftCommmitMaxDocs = autoSoftCommmitMaxDocs;
-      this.autoSoftCommmitMaxTime = autoSoftCommmitMaxTime;
-
-      this.commitWithinSoftCommit = commitWithinSoftCommit;
-    }
-
-
-    @Override
-    public Map<String, Object> toMap(Map<String, Object> map) {
-      LinkedHashMap result = new LinkedHashMap();
-      result.put("indexWriter", makeMap("closeWaitsForMerges", indexWriterCloseWaitsForMerges));
-      result.put("commitWithin", makeMap("softCommit", commitWithinSoftCommit));
-      result.put("autoCommit", makeMap(
-          "maxDocs", autoCommmitMaxDocs,
-          "maxTime", autoCommmitMaxTime,
-          "openSearcher", openSearcher
-      ));
-      result.put("autoSoftCommit",
-          makeMap("maxDocs", autoSoftCommmitMaxDocs,
-              "maxTime", autoSoftCommmitMaxTime));
-      return result;
-    }
-  }
-
-//  public Map<String, List<PluginInfo>> getUpdateProcessorChainInfo() { return updateProcessorChainInfo; }
-
-  public UpdateHandlerInfo getUpdateHandlerInfo() {
-    return updateHandlerInfo;
-  }
-
-  public String getDataDir() {
-    return dataDir;
-  }
-
-  /**
-   * SolrConfig keeps a repository of plugins by the type. The known interfaces are the types.
-   *
-   * @param type The key is FQN of the plugin class there are a few  known types : SolrFormatter, SolrFragmenter
-   *             SolrRequestHandler,QParserPlugin, QueryResponseWriter,ValueSourceParser,
-   *             SearchComponent, QueryConverter, SolrEventListener, DirectoryFactory,
-   *             IndexDeletionPolicy, IndexReaderFactory, {@link TransformerFactory}
-   */
-  public List<PluginInfo> getPluginInfos(String type) {
-    List<PluginInfo> result = pluginStore.get(type);
-    SolrPluginInfo info = classVsSolrPluginInfo.get(type);
-    if (info != null &&
-        (info.options.contains(REQUIRE_NAME) || info.options.contains(REQUIRE_NAME_IN_OVERLAY))) {
-      Map<String, Map> infos = overlay.getNamedPlugins(info.getCleanTag());
-      if (!infos.isEmpty()) {
-        LinkedHashMap<String, PluginInfo> map = new LinkedHashMap<>();
-        if (result != null) for (PluginInfo pluginInfo : result) {
-          //just create a UUID for the time being so that map key is not null
-          String name = pluginInfo.name == null ?
-              UUID.randomUUID().toString().toLowerCase(Locale.ROOT) :
-              pluginInfo.name;
-          map.put(name, pluginInfo);
-        }
-        for (Map.Entry<String, Map> e : infos.entrySet()) {
-          map.put(e.getKey(), new PluginInfo(info.getCleanTag(), e.getValue()));
-        }
-        result = new ArrayList<>(map.values());
-      }
-    }
-    return result == null ? Collections.<PluginInfo>emptyList() : result;
-  }
-
-  public PluginInfo getPluginInfo(String type) {
-    List<PluginInfo> result = pluginStore.get(type);
-    if (result == null || result.isEmpty()) {
-      return null;
-    }
-    if (1 == result.size()) {
-      return result.get(0);
-    }
-
-    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-        "Multiple plugins configured for type: " + type);
-  }
-
-  private void initLibs() {
-    NodeList nodes = (NodeList) evaluate("lib", XPathConstants.NODESET);
-    if (nodes == null || nodes.getLength() == 0) return;
-
-    log.debug("Adding specified lib dirs to ClassLoader");
-    SolrResourceLoader loader = getResourceLoader();
-    List<URL> urls = new ArrayList<>();
-
-    for (int i = 0; i < nodes.getLength(); i++) {
-      Node node = nodes.item(i);
-      String baseDir = DOMUtil.getAttr(node, "dir");
-      String path = DOMUtil.getAttr(node, PATH);
-      if (null != baseDir) {
-        // :TODO: add support for a simpler 'glob' mutually exclusive of regex
-        Path dir = loader.getInstancePath().resolve(baseDir);
-        String regex = DOMUtil.getAttr(node, "regex");
-        try {
-          if (regex == null)
-            urls.addAll(SolrResourceLoader.getURLs(dir));
-          else
-            urls.addAll(SolrResourceLoader.getFilteredURLs(dir, regex));
-        } catch (IOException e) {
-          log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e.getMessage());
-        }
-      } else if (null != path) {
-        final Path dir = loader.getInstancePath().resolve(path);
-        try {
-          urls.add(dir.toUri().toURL());
-        } catch (MalformedURLException e) {
-          log.warn("Couldn't add file {} to classpath: {}", dir, e.getMessage());
-        }
-      } else {
-        throw new RuntimeException("lib: missing mandatory attributes: 'dir' or 'path'");
-      }
-    }
-
-    if (urls.size() > 0) {
-      loader.addToClassLoader(urls);
-      loader.reloadLuceneSPI();
-    }
-  }
-
-  public int getMultipartUploadLimitKB() {
-    return multipartUploadLimitKB;
-  }
-
-  public int getFormUploadLimitKB() {
-    return formUploadLimitKB;
-  }
-
-  public boolean isHandleSelect() {
-    return handleSelect;
-  }
-
-  public boolean isAddHttpRequestToContext() {
-    return addHttpRequestToContext;
-  }
-
-  public boolean isEnableRemoteStreams() {
-    return enableRemoteStreams;
-  }
-
-  public boolean isEnableStreamBody() {
-    return enableStreamBody;
-  }
-
-  @Override
-  public int getInt(String path) {
-    return getInt(path, 0);
-  }
-
-  @Override
-  public int getInt(String path, int def) {
-    Object val = overlay.getXPathProperty(path);
-    if (val != null) return Integer.parseInt(val.toString());
-    return super.getInt(path, def);
-  }
-
-  @Override
-  public boolean getBool(String path, boolean def) {
-    Object val = overlay.getXPathProperty(path);
-    if (val != null) return Boolean.parseBoolean(val.toString());
-    return super.getBool(path, def);
-  }
-
-  @Override
-  public String get(String path) {
-    Object val = overlay.getXPathProperty(path, true);
-    return val != null ? val.toString() : super.get(path);
-  }
-
-  @Override
-  public String get(String path, String def) {
-    Object val = overlay.getXPathProperty(path, true);
-    return val != null ? val.toString() : super.get(path, def);
-
-  }
-
-  @Override
-  public Map<String, Object> toMap(Map<String, Object> result) {
-    if (getZnodeVersion() > -1) result.put(ZNODEVER, getZnodeVersion());
-    result.put("luceneMatchVersion", luceneMatchVersion);
-    result.put("updateHandler", getUpdateHandlerInfo());
-    Map m = new LinkedHashMap();
-    result.put("query", m);
-    m.put("useFilterForSortedQuery", useFilterForSortedQuery);
-    m.put("queryResultWindowSize", queryResultWindowSize);
-    m.put("queryResultMaxDocsCached", queryResultMaxDocsCached);
-    m.put("enableLazyFieldLoading", enableLazyFieldLoading);
-    m.put("maxBooleanClauses", booleanQueryMaxClauseCount);
-    for (SolrPluginInfo plugin : plugins) {
-      List<PluginInfo> infos = getPluginInfos(plugin.clazz.getName());
-      if (infos == null || infos.isEmpty()) continue;
-      String tag = plugin.getCleanTag();
-      tag = tag.replace("/", "");
-      if (plugin.options.contains(PluginOpts.REQUIRE_NAME)) {
-        LinkedHashMap items = new LinkedHashMap();
-        for (PluginInfo info : infos) items.put(info.name, info);
-        for (Map.Entry e : overlay.getNamedPlugins(plugin.tag).entrySet()) items.put(e.getKey(), e.getValue());
-        result.put(tag, items);
-      } else {
-        if (plugin.options.contains(MULTI_OK)) {
-          ArrayList<MapSerializable> l = new ArrayList<>();
-          for (PluginInfo info : infos) l.add(info);
-          result.put(tag, l);
-        } else {
-          result.put(tag, infos.get(0));
-        }
-
-      }
-
-    }
-
-
-    addCacheConfig(m, filterCacheConfig, queryResultCacheConfig, documentCacheConfig, fieldValueCacheConfig);
-    m = new LinkedHashMap();
-    result.put("requestDispatcher", m);
-    m.put("handleSelect", handleSelect);
-    if (httpCachingConfig != null) m.put("httpCaching", httpCachingConfig);
-    m.put("requestParsers", makeMap("multipartUploadLimitKB", multipartUploadLimitKB,
-        "formUploadLimitKB", formUploadLimitKB,
-        "addHttpRequestToContext", addHttpRequestToContext));
-    if (indexConfig != null) result.put("indexConfig", indexConfig);
-
-    m = new LinkedHashMap();
-    result.put("peerSync", m);
-    m.put("useRangeVersions", useRangeVersionsForPeerSync);
-
-    //TODO there is more to add
-
-    return result;
-  }
-
-  private void addCacheConfig(Map queryMap, CacheConfig... cache) {
-    if (cache == null) return;
-    for (CacheConfig config : cache) if (config != null) queryMap.put(config.getNodeName(), config);
-
-  }
-
-  @Override
-  protected Properties getSubstituteProperties() {
-    Map<String, Object> p = getOverlay().getUserProps();
-    if (p == null || p.isEmpty()) return super.getSubstituteProperties();
-    Properties result = new Properties(super.getSubstituteProperties());
-    result.putAll(p);
-    return result;
-  }
-
-  private ConfigOverlay overlay;
-
-  public ConfigOverlay getOverlay() {
-    if (overlay == null) {
-      overlay = getConfigOverlay(getResourceLoader());
-    }
-    return overlay;
-  }
-
-  public RequestParams getRequestParams() {
-    if (requestParams == null) {
-      return refreshRequestParams();
-    }
-    return requestParams;
-  }
-
-
-  public RequestParams refreshRequestParams() {
-    requestParams = RequestParams.getFreshRequestParams(getResourceLoader(), requestParams);
-    log.debug("current version of requestparams : {}", requestParams.getZnodeVersion());
-    return requestParams;
-  }
-
-}


[05/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
deleted file mode 100644
index e937370..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ /dev/null
@@ -1,1481 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.LeafFieldComparator;
-import org.apache.lucene.search.MatchNoDocsQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorable;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.grouping.GroupDocs;
-import org.apache.lucene.search.grouping.SearchGroup;
-import org.apache.lucene.search.grouping.TopGroups;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.InPlaceMergeSorter;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CursorMarkParams;
-import org.apache.solr.common.params.GroupParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.BasicResultContext;
-import org.apache.solr.response.ResultContext;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.CursorMark;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocListAndSet;
-import org.apache.solr.search.DocSlice;
-import org.apache.solr.search.Grouping;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.QParserPlugin;
-import org.apache.solr.search.QueryCommand;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.QueryResult;
-import org.apache.solr.search.RankQuery;
-import org.apache.solr.search.ReturnFields;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SolrReturnFields;
-import org.apache.solr.search.SortSpec;
-import org.apache.solr.search.SortSpecParsing;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.search.grouping.CommandHandler;
-import org.apache.solr.search.grouping.GroupingSpecification;
-import org.apache.solr.search.grouping.distributed.ShardRequestFactory;
-import org.apache.solr.search.grouping.distributed.ShardResponseProcessor;
-import org.apache.solr.search.grouping.distributed.command.QueryCommand.Builder;
-import org.apache.solr.search.grouping.distributed.command.SearchGroupsFieldCommand;
-import org.apache.solr.search.grouping.distributed.command.TopGroupsFieldCommand;
-import org.apache.solr.search.grouping.distributed.requestfactory.SearchGroupsRequestFactory;
-import org.apache.solr.search.grouping.distributed.requestfactory.StoredFieldsShardRequestFactory;
-import org.apache.solr.search.grouping.distributed.requestfactory.TopGroupsShardRequestFactory;
-import org.apache.solr.search.grouping.distributed.responseprocessor.SearchGroupShardResponseProcessor;
-import org.apache.solr.search.grouping.distributed.responseprocessor.StoredFieldsShardResponseProcessor;
-import org.apache.solr.search.grouping.distributed.responseprocessor.TopGroupsShardResponseProcessor;
-import org.apache.solr.search.grouping.distributed.shardresultserializer.SearchGroupsResultTransformer;
-import org.apache.solr.search.grouping.distributed.shardresultserializer.TopGroupsResultTransformer;
-import org.apache.solr.search.grouping.endresulttransformer.EndResultTransformer;
-import org.apache.solr.search.grouping.endresulttransformer.GroupedEndResultTransformer;
-import org.apache.solr.search.grouping.endresulttransformer.MainEndResultTransformer;
-import org.apache.solr.search.grouping.endresulttransformer.SimpleEndResultTransformer;
-import org.apache.solr.search.stats.StatsCache;
-import org.apache.solr.util.SolrPluginUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * TODO!
- * 
- *
- * @since solr 1.3
- */
-public class QueryComponent extends SearchComponent
-{
-  public static final String COMPONENT_NAME = "query";
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException
-  {
-
-    SolrQueryRequest req = rb.req;
-    SolrParams params = req.getParams();
-    if (!params.getBool(COMPONENT_NAME, true)) {
-      return;
-    }
-    SolrQueryResponse rsp = rb.rsp;
-
-    // Set field flags    
-    ReturnFields returnFields = new SolrReturnFields( req );
-    rsp.setReturnFields( returnFields );
-    int flags = 0;
-    if (returnFields.wantsScore()) {
-      flags |= SolrIndexSearcher.GET_SCORES;
-    }
-    rb.setFieldFlags( flags );
-
-    String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
-
-    // get it from the response builder to give a different component a chance
-    // to set it.
-    String queryString = rb.getQueryString();
-    if (queryString == null) {
-      // this is the normal way it's set.
-      queryString = params.get( CommonParams.Q );
-      rb.setQueryString(queryString);
-    }
-
-    try {
-      QParser parser = QParser.getParser(rb.getQueryString(), defType, req);
-      Query q = parser.getQuery();
-      if (q == null) {
-        // normalize a null query to a query that matches nothing
-        q = new MatchNoDocsQuery();
-      }
-
-      rb.setQuery( q );
-
-      String rankQueryString = rb.req.getParams().get(CommonParams.RQ);
-      if(rankQueryString != null) {
-        QParser rqparser = QParser.getParser(rankQueryString, req);
-        Query rq = rqparser.getQuery();
-        if(rq instanceof RankQuery) {
-          RankQuery rankQuery = (RankQuery)rq;
-          rb.setRankQuery(rankQuery);
-          MergeStrategy mergeStrategy = rankQuery.getMergeStrategy();
-          if(mergeStrategy != null) {
-            rb.addMergeStrategy(mergeStrategy);
-            if(mergeStrategy.handlesMergeFields()) {
-              rb.mergeFieldHandler = mergeStrategy;
-            }
-          }
-        } else {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"rq parameter must be a RankQuery");
-        }
-      }
-
-      rb.setSortSpec( parser.getSortSpec(true) );
-      rb.setQparser(parser);
-
-      final String cursorStr = rb.req.getParams().get(CursorMarkParams.CURSOR_MARK_PARAM);
-      if (null != cursorStr) {
-        final CursorMark cursorMark = new CursorMark(rb.req.getSchema(),
-                                                     rb.getSortSpec());
-        cursorMark.parseSerializedTotem(cursorStr);
-        rb.setCursorMark(cursorMark);
-      }
-
-      String[] fqs = req.getParams().getParams(CommonParams.FQ);
-      if (fqs!=null && fqs.length!=0) {
-        List<Query> filters = rb.getFilters();
-        // if filters already exists, make a copy instead of modifying the original
-        filters = filters == null ? new ArrayList<>(fqs.length) : new ArrayList<>(filters);
-        for (String fq : fqs) {
-          if (fq != null && fq.trim().length()!=0) {
-            QParser fqp = QParser.getParser(fq, req);
-            fqp.setIsFilter(true);
-            filters.add(fqp.getQuery());
-          }
-        }
-        // only set the filters if they are not empty otherwise
-        // fq=&someotherParam= will trigger all docs filter for every request 
-        // if filter cache is disabled
-        if (!filters.isEmpty()) {
-          rb.setFilters( filters );
-        }
-      }
-    } catch (SyntaxError e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-    }
-
-    if (params.getBool(GroupParams.GROUP, false)) {
-      prepareGrouping(rb);
-    } else {
-      //Validate only in case of non-grouping search.
-      if(rb.getSortSpec().getCount() < 0) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'rows' parameter cannot be negative");
-      }
-    }
-
-    //Input validation.
-    if (rb.getSortSpec().getOffset() < 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative");
-    }
-  }
-
-  protected void prepareGrouping(ResponseBuilder rb) throws IOException {
-
-    SolrQueryRequest req = rb.req;
-    SolrParams params = req.getParams();
-
-    if (null != rb.getCursorMark()) {
-      // It's hard to imagine, conceptually, what it would mean to combine
-      // grouping with a cursor - so for now we just don't allow the combination at all
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not use Grouping with " +
-                              CursorMarkParams.CURSOR_MARK_PARAM);
-    }
-
-    SolrIndexSearcher searcher = rb.req.getSearcher();
-    GroupingSpecification groupingSpec = new GroupingSpecification();
-    rb.setGroupingSpec(groupingSpec);
-
-    final SortSpec sortSpec = rb.getSortSpec();
-
-    //TODO: move weighting of sort
-    final SortSpec groupSortSpec = searcher.weightSortSpec(sortSpec, Sort.RELEVANCE);
-
-    String withinGroupSortStr = params.get(GroupParams.GROUP_SORT);
-    //TODO: move weighting of sort
-    final SortSpec withinGroupSortSpec;
-    if (withinGroupSortStr != null) {
-      SortSpec parsedWithinGroupSortSpec = SortSpecParsing.parseSortSpec(withinGroupSortStr, req);
-      withinGroupSortSpec = searcher.weightSortSpec(parsedWithinGroupSortSpec, Sort.RELEVANCE);
-    } else {
-      withinGroupSortSpec = new SortSpec(
-          groupSortSpec.getSort(),
-          groupSortSpec.getSchemaFields(),
-          groupSortSpec.getCount(),
-          groupSortSpec.getOffset());
-    }
-    withinGroupSortSpec.setOffset(params.getInt(GroupParams.GROUP_OFFSET, 0));
-    withinGroupSortSpec.setCount(params.getInt(GroupParams.GROUP_LIMIT, 1));
-
-    groupingSpec.setWithinGroupSortSpec(withinGroupSortSpec);
-    groupingSpec.setGroupSortSpec(groupSortSpec);
-
-    String formatStr = params.get(GroupParams.GROUP_FORMAT, Grouping.Format.grouped.name());
-    Grouping.Format responseFormat;
-    try {
-       responseFormat = Grouping.Format.valueOf(formatStr);
-    } catch (IllegalArgumentException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "Illegal %s parameter", GroupParams.GROUP_FORMAT));
-    }
-    groupingSpec.setResponseFormat(responseFormat);
-
-    groupingSpec.setFields(params.getParams(GroupParams.GROUP_FIELD));
-    groupingSpec.setQueries(params.getParams(GroupParams.GROUP_QUERY));
-    groupingSpec.setFunctions(params.getParams(GroupParams.GROUP_FUNC));
-    groupingSpec.setIncludeGroupCount(params.getBool(GroupParams.GROUP_TOTAL_COUNT, false));
-    groupingSpec.setMain(params.getBool(GroupParams.GROUP_MAIN, false));
-    groupingSpec.setNeedScore((rb.getFieldFlags() & SolrIndexSearcher.GET_SCORES) != 0);
-    groupingSpec.setTruncateGroups(params.getBool(GroupParams.GROUP_TRUNCATE, false));
-  }
-
-
-
-  /**
-   * Actually run the query
-   */
-  @Override
-  public void process(ResponseBuilder rb) throws IOException
-  {
-    log.debug("process: {}", rb.req.getParams());
-  
-    SolrQueryRequest req = rb.req;
-    SolrParams params = req.getParams();
-    if (!params.getBool(COMPONENT_NAME, true)) {
-      return;
-    }
-
-    StatsCache statsCache = req.getCore().getStatsCache();
-    
-    int purpose = params.getInt(ShardParams.SHARDS_PURPOSE, ShardRequest.PURPOSE_GET_TOP_IDS);
-    if ((purpose & ShardRequest.PURPOSE_GET_TERM_STATS) != 0) {
-      SolrIndexSearcher searcher = req.getSearcher();
-      statsCache.returnLocalStats(rb, searcher);
-      return;
-    }
-    // check if we need to update the local copy of global dfs
-    if ((purpose & ShardRequest.PURPOSE_SET_TERM_STATS) != 0) {
-      // retrieve from request and update local cache
-      statsCache.receiveGlobalStats(req);
-    }
-
-    // Optional: This could also be implemented by the top-level searcher sending
-    // a filter that lists the ids... that would be transparent to
-    // the request handler, but would be more expensive (and would preserve score
-    // too if desired).
-    if (doProcessSearchByIds(rb)) {
-      return;
-    }
-
-    // -1 as flag if not set.
-    long timeAllowed = params.getLong(CommonParams.TIME_ALLOWED, -1L);
-    if (null != rb.getCursorMark() && 0 < timeAllowed) {
-      // fundamentally incompatible
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not search using both " +
-                              CursorMarkParams.CURSOR_MARK_PARAM + " and " + CommonParams.TIME_ALLOWED);
-    }
-
-    QueryCommand cmd = rb.createQueryCommand();
-    cmd.setTimeAllowed(timeAllowed);
-
-    req.getContext().put(SolrIndexSearcher.STATS_SOURCE, statsCache.get(req));
-    
-    QueryResult result = new QueryResult();
-
-    cmd.setSegmentTerminateEarly(params.getBool(CommonParams.SEGMENT_TERMINATE_EARLY, CommonParams.SEGMENT_TERMINATE_EARLY_DEFAULT));
-    if (cmd.getSegmentTerminateEarly()) {
-      result.setSegmentTerminatedEarly(Boolean.FALSE);
-    }
-
-    //
-    // grouping / field collapsing
-    //
-    GroupingSpecification groupingSpec = rb.getGroupingSpec();
-    if (groupingSpec != null) {
-      cmd.setSegmentTerminateEarly(false); // not supported, silently ignore any segmentTerminateEarly flag
-      try {
-        if (params.getBool(GroupParams.GROUP_DISTRIBUTED_FIRST, false)) {
-          doProcessGroupedDistributedSearchFirstPhase(rb, cmd, result);
-          return;
-        } else if (params.getBool(GroupParams.GROUP_DISTRIBUTED_SECOND, false)) {
-          doProcessGroupedDistributedSearchSecondPhase(rb, cmd, result);
-          return;
-        }
-
-        doProcessGroupedSearch(rb, cmd, result);
-        return;
-      } catch (SyntaxError e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-      }
-    }
-
-    // normal search result
-    doProcessUngroupedSearch(rb, cmd, result);
-  }
-
-  protected void doFieldSortValues(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException
-  {
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-    // The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't
-    // currently have an option to return sort field values.  Because of this, we
-    // take the documents given and re-derive the sort values.
-    //
-    // TODO: See SOLR-5595
-    boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES,false);
-    if(fsv){
-      NamedList<Object[]> sortVals = new NamedList<>(); // order is important for the sort fields
-      IndexReaderContext topReaderContext = searcher.getTopReaderContext();
-      List<LeafReaderContext> leaves = topReaderContext.leaves();
-      LeafReaderContext currentLeaf = null;
-      if (leaves.size()==1) {
-        // if there is a single segment, use that subReader and avoid looking up each time
-        currentLeaf = leaves.get(0);
-        leaves=null;
-      }
-
-      DocList docList = rb.getResults().docList;
-
-      // sort ids from lowest to highest so we can access them in order
-      int nDocs = docList.size();
-      final long[] sortedIds = new long[nDocs];
-      final float[] scores = new float[nDocs]; // doc scores, parallel to sortedIds
-      DocList docs = rb.getResults().docList;
-      DocIterator it = docs.iterator();
-      for (int i=0; i<nDocs; i++) {
-        sortedIds[i] = (((long)it.nextDoc()) << 32) | i;
-        scores[i] = docs.hasScores() ? it.score() : Float.NaN;
-      }
-
-      // sort ids and scores together
-      new InPlaceMergeSorter() {
-        @Override
-        protected void swap(int i, int j) {
-          long tmpId = sortedIds[i];
-          float tmpScore = scores[i];
-          sortedIds[i] = sortedIds[j];
-          scores[i] = scores[j];
-          sortedIds[j] = tmpId;
-          scores[j] = tmpScore;
-        }
-
-        @Override
-        protected int compare(int i, int j) {
-          return Long.compare(sortedIds[i], sortedIds[j]);
-        }
-      }.sort(0, sortedIds.length);
-
-      SortSpec sortSpec = rb.getSortSpec();
-      Sort sort = searcher.weightSort(sortSpec.getSort());
-      SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort();
-      List<SchemaField> schemaFields = sortSpec.getSchemaFields();
-
-      for (int fld = 0; fld < schemaFields.size(); fld++) {
-        SchemaField schemaField = schemaFields.get(fld);
-        FieldType ft = null == schemaField? null : schemaField.getType();
-        SortField sortField = sortFields[fld];
-
-        SortField.Type type = sortField.getType();
-        // :TODO: would be simpler to always serialize every position of SortField[]
-        if (type==SortField.Type.SCORE || type==SortField.Type.DOC) continue;
-
-        FieldComparator<?> comparator = sortField.getComparator(1,0);
-        LeafFieldComparator leafComparator = null;
-        Object[] vals = new Object[nDocs];
-
-        int lastIdx = -1;
-        int idx = 0;
-
-        for (int i = 0; i < sortedIds.length; ++i) {
-          long idAndPos = sortedIds[i];
-          float score = scores[i];
-          int doc = (int)(idAndPos >>> 32);
-          int position = (int)idAndPos;
-
-          if (leaves != null) {
-            idx = ReaderUtil.subIndex(doc, leaves);
-            currentLeaf = leaves.get(idx);
-            if (idx != lastIdx) {
-              // we switched segments.  invalidate leafComparator.
-              lastIdx = idx;
-              leafComparator = null;
-            }
-          }
-
-          if (leafComparator == null) {
-            leafComparator = comparator.getLeafComparator(currentLeaf);
-          }
-
-          doc -= currentLeaf.docBase;  // adjust for what segment this is in
-          leafComparator.setScorer(new ScoreAndDoc(doc, score));
-          leafComparator.copy(0, doc);
-          Object val = comparator.value(0);
-          if (null != ft) val = ft.marshalSortValue(val);
-          vals[position] = val;
-        }
-
-        sortVals.add(sortField.getField(), vals);
-      }
-
-      rsp.add("sort_values", sortVals);
-    }
-  }
-
-  protected void doPrefetch(ResponseBuilder rb) throws IOException
-  {
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-    //pre-fetch returned documents
-    if (!req.getParams().getBool(ShardParams.IS_SHARD,false) && rb.getResults().docList != null && rb.getResults().docList.size()<=50) {
-      SolrPluginUtils.optimizePreFetchDocs(rb, rb.getResults().docList, rb.getQuery(), req, rsp);
-    }
-  }
-
-  @Override
-  public int distributedProcess(ResponseBuilder rb) throws IOException {
-    if (rb.grouping()) {
-      return groupedDistributedProcess(rb);
-    } else {
-      return regularDistributedProcess(rb);
-    }
-  }
-
-  protected int groupedDistributedProcess(ResponseBuilder rb) {
-    int nextStage = ResponseBuilder.STAGE_DONE;
-    ShardRequestFactory shardRequestFactory = null;
-
-    if (rb.stage < ResponseBuilder.STAGE_PARSE_QUERY) {
-      nextStage = ResponseBuilder.STAGE_PARSE_QUERY;
-    } else if (rb.stage == ResponseBuilder.STAGE_PARSE_QUERY) {
-      createDistributedStats(rb);
-      nextStage = ResponseBuilder.STAGE_TOP_GROUPS;
-    } else if (rb.stage < ResponseBuilder.STAGE_TOP_GROUPS) {
-      nextStage = ResponseBuilder.STAGE_TOP_GROUPS;
-    } else if (rb.stage == ResponseBuilder.STAGE_TOP_GROUPS) {
-      shardRequestFactory = new SearchGroupsRequestFactory();
-      nextStage = ResponseBuilder.STAGE_EXECUTE_QUERY;
-    } else if (rb.stage < ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      nextStage = ResponseBuilder.STAGE_EXECUTE_QUERY;
-    } else if (rb.stage == ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      shardRequestFactory = new TopGroupsShardRequestFactory();
-      nextStage = ResponseBuilder.STAGE_GET_FIELDS;
-    } else if (rb.stage < ResponseBuilder.STAGE_GET_FIELDS) {
-      nextStage = ResponseBuilder.STAGE_GET_FIELDS;
-    } else if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
-      shardRequestFactory = new StoredFieldsShardRequestFactory();
-      nextStage = ResponseBuilder.STAGE_DONE;
-    }
-
-    if (shardRequestFactory != null) {
-      for (ShardRequest shardRequest : shardRequestFactory.constructRequest(rb)) {
-        rb.addRequest(this, shardRequest);
-      }
-    }
-    return nextStage;
-  }
-
-  protected int regularDistributedProcess(ResponseBuilder rb) {
-    if (rb.stage < ResponseBuilder.STAGE_PARSE_QUERY)
-      return ResponseBuilder.STAGE_PARSE_QUERY;
-    if (rb.stage == ResponseBuilder.STAGE_PARSE_QUERY) {
-      createDistributedStats(rb);
-      return ResponseBuilder.STAGE_EXECUTE_QUERY;
-    }
-    if (rb.stage < ResponseBuilder.STAGE_EXECUTE_QUERY) return ResponseBuilder.STAGE_EXECUTE_QUERY;
-    if (rb.stage == ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      createMainQuery(rb);
-      return ResponseBuilder.STAGE_GET_FIELDS;
-    }
-    if (rb.stage < ResponseBuilder.STAGE_GET_FIELDS) return ResponseBuilder.STAGE_GET_FIELDS;
-    if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS && !rb.onePassDistributedQuery) {
-      createRetrieveDocs(rb);
-      return ResponseBuilder.STAGE_DONE;
-    }
-    return ResponseBuilder.STAGE_DONE;
-  }
-
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-    if (rb.grouping()) {
-      handleGroupedResponses(rb, sreq);
-    } else {
-      handleRegularResponses(rb, sreq);
-    }
-  }
-
-  protected void handleGroupedResponses(ResponseBuilder rb, ShardRequest sreq) {
-    ShardResponseProcessor responseProcessor = null;
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_GROUPS) != 0) {
-      responseProcessor = new SearchGroupShardResponseProcessor();
-    } else if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0) {
-      responseProcessor = new TopGroupsShardResponseProcessor();
-    } else if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0) {
-      responseProcessor = new StoredFieldsShardResponseProcessor();
-    }
-
-    if (responseProcessor != null) {
-      responseProcessor.process(rb, sreq);
-    }
-  }
-
-  protected void handleRegularResponses(ResponseBuilder rb, ShardRequest sreq) {
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0) {
-      mergeIds(rb, sreq);
-    }
-
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_TERM_STATS) != 0) {
-      updateStats(rb, sreq);
-    }
-
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0) {
-      returnFields(rb, sreq);
-    }
-  }
-
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    if (rb.stage != ResponseBuilder.STAGE_GET_FIELDS) {
-      return;
-    }
-    if (rb.grouping()) {
-      groupedFinishStage(rb);
-    } else {
-      regularFinishStage(rb);
-    }
-  }
-
-  protected static final EndResultTransformer MAIN_END_RESULT_TRANSFORMER = new MainEndResultTransformer();
-  protected static final EndResultTransformer SIMPLE_END_RESULT_TRANSFORMER = new SimpleEndResultTransformer();
-
-  @SuppressWarnings("unchecked")
-  protected void groupedFinishStage(final ResponseBuilder rb) {
-    // To have same response as non-distributed request.
-    GroupingSpecification groupSpec = rb.getGroupingSpec();
-    if (rb.mergedTopGroups.isEmpty()) {
-      for (String field : groupSpec.getFields()) {
-        rb.mergedTopGroups.put(field, new TopGroups(null, null, 0, 0, new GroupDocs[]{}, Float.NaN));
-      }
-      rb.resultIds = new HashMap<>();
-    }
-
-    EndResultTransformer.SolrDocumentSource solrDocumentSource = doc -> {
-      ShardDoc solrDoc = (ShardDoc) doc;
-      return rb.retrievedDocuments.get(solrDoc.id);
-    };
-    EndResultTransformer endResultTransformer;
-    if (groupSpec.isMain()) {
-      endResultTransformer = MAIN_END_RESULT_TRANSFORMER;
-    } else if (Grouping.Format.grouped == groupSpec.getResponseFormat()) {
-      endResultTransformer = new GroupedEndResultTransformer(rb.req.getSearcher());
-    } else if (Grouping.Format.simple == groupSpec.getResponseFormat() && !groupSpec.isMain()) {
-      endResultTransformer = SIMPLE_END_RESULT_TRANSFORMER;
-    } else {
-      return;
-    }
-    Map<String, Object> combinedMap = new LinkedHashMap<>();
-    combinedMap.putAll(rb.mergedTopGroups);
-    combinedMap.putAll(rb.mergedQueryCommandResults);
-    endResultTransformer.transform(combinedMap, rb, solrDocumentSource);
-  }
-
-  protected void regularFinishStage(ResponseBuilder rb) {
-    // We may not have been able to retrieve all the docs due to an
-    // index change.  Remove any null documents.
-    for (Iterator<SolrDocument> iter = rb.getResponseDocs().iterator(); iter.hasNext();) {
-      if (iter.next() == null) {
-        iter.remove();
-        rb.getResponseDocs().setNumFound(rb.getResponseDocs().getNumFound()-1);
-      }
-    }
-
-    rb.rsp.addResponse(rb.getResponseDocs());
-    if (null != rb.getNextCursorMark()) {
-      rb.rsp.add(CursorMarkParams.CURSOR_MARK_NEXT,
-                 rb.getNextCursorMark().getSerializedTotem());
-    }
-  }
-
-  protected void createDistributedStats(ResponseBuilder rb) {
-    StatsCache cache = rb.req.getCore().getStatsCache();
-    if ( (rb.getFieldFlags() & SolrIndexSearcher.GET_SCORES)!=0 || rb.getSortSpec().includesScore()) {
-      ShardRequest sreq = cache.retrieveStatsRequest(rb);
-      if (sreq != null) {
-        rb.addRequest(this, sreq);
-      }
-    }
-  }
-
-  protected void updateStats(ResponseBuilder rb, ShardRequest sreq) {
-    StatsCache cache = rb.req.getCore().getStatsCache();
-    cache.mergeToGlobalStats(rb.req, sreq.responses);
-  }
-
-  protected void createMainQuery(ResponseBuilder rb) {
-    ShardRequest sreq = new ShardRequest();
-    sreq.purpose = ShardRequest.PURPOSE_GET_TOP_IDS;
-
-    String keyFieldName = rb.req.getSchema().getUniqueKeyField().getName();
-
-    // one-pass algorithm if only id and score fields are requested, but not if fl=score since that's the same as fl=*,score
-    ReturnFields fields = rb.rsp.getReturnFields();
-
-    // distrib.singlePass=true forces a one-pass query regardless of requested fields
-    boolean distribSinglePass = rb.req.getParams().getBool(ShardParams.DISTRIB_SINGLE_PASS, false);
-
-    if(distribSinglePass || (fields != null && fields.wantsField(keyFieldName)
-        && fields.getRequestedFieldNames() != null  
-        && (!fields.hasPatternMatching() && Arrays.asList(keyFieldName, "score").containsAll(fields.getRequestedFieldNames())))) {
-      sreq.purpose |= ShardRequest.PURPOSE_GET_FIELDS;
-      rb.onePassDistributedQuery = true;
-    }
-
-    sreq.params = new ModifiableSolrParams(rb.req.getParams());
-    // TODO: base on current params or original params?
-
-    // don't pass through any shards param
-    sreq.params.remove(ShardParams.SHARDS);
-
-    // set the start (offset) to 0 for each shard request so we can properly merge
-    // results from the start.
-    if(rb.shards_start > -1) {
-      // if the client set shards.start set this explicitly
-      sreq.params.set(CommonParams.START,rb.shards_start);
-    } else {
-      sreq.params.set(CommonParams.START, "0");
-    }
-    // TODO: should we even use the SortSpec?  That's obtained from the QParser, and
-    // perhaps we shouldn't attempt to parse the query at this level?
-    // Alternate Idea: instead of specifying all these things at the upper level,
-    // we could just specify that this is a shard request.
-    if(rb.shards_rows > -1) {
-      // if the client set shards.rows set this explicity
-      sreq.params.set(CommonParams.ROWS,rb.shards_rows);
-    } else {
-      sreq.params.set(CommonParams.ROWS, rb.getSortSpec().getOffset() + rb.getSortSpec().getCount());
-    }
-
-    sreq.params.set(ResponseBuilder.FIELD_SORT_VALUES,"true");
-
-    boolean shardQueryIncludeScore = (rb.getFieldFlags() & SolrIndexSearcher.GET_SCORES) != 0 || rb.getSortSpec().includesScore();
-    StringBuilder additionalFL = new StringBuilder();
-    boolean additionalAdded = false;
-    if (distribSinglePass)  {
-      String[] fls = rb.req.getParams().getParams(CommonParams.FL);
-      if (fls != null && fls.length > 0 && (fls.length != 1 || !fls[0].isEmpty())) {
-        // If the outer request contains actual FL's use them...
-        sreq.params.set(CommonParams.FL, fls);
-        if (!fields.wantsField(keyFieldName))  {
-          additionalAdded = addFL(additionalFL, keyFieldName, additionalAdded);
-        }
-      } else {
-        // ... else we need to explicitly ask for all fields, because we are going to add
-        // additional fields below
-        sreq.params.set(CommonParams.FL, "*");
-      }
-      if (!fields.wantsScore() && shardQueryIncludeScore) {
-        additionalAdded = addFL(additionalFL, "score", additionalAdded);
-      }
-    } else {
-      // reset so that only unique key is requested in shard requests
-      sreq.params.set(CommonParams.FL, rb.req.getSchema().getUniqueKeyField().getName());
-      if (shardQueryIncludeScore) {
-        additionalAdded = addFL(additionalFL, "score", additionalAdded);
-      }
-    }
-
-    // TODO: should this really sendGlobalDfs if just includeScore?
-
-    if (shardQueryIncludeScore) {
-      StatsCache statsCache = rb.req.getCore().getStatsCache();
-      statsCache.sendGlobalStats(rb, sreq);
-    }
-
-    if (additionalAdded) sreq.params.add(CommonParams.FL, additionalFL.toString());
-
-    rb.addRequest(this, sreq);
-  }
-  
-  protected boolean addFL(StringBuilder fl, String field, boolean additionalAdded) {
-    if (additionalAdded) fl.append(",");
-    fl.append(field);
-    return true;
-  }
-
-  protected void mergeIds(ResponseBuilder rb, ShardRequest sreq) {
-      List<MergeStrategy> mergeStrategies = rb.getMergeStrategies();
-      if(mergeStrategies != null) {
-        Collections.sort(mergeStrategies, MergeStrategy.MERGE_COMP);
-        boolean idsMerged = false;
-        for(MergeStrategy mergeStrategy : mergeStrategies) {
-          mergeStrategy.merge(rb, sreq);
-          if(mergeStrategy.mergesIds()) {
-            idsMerged = true;
-          }
-        }
-
-        if(idsMerged) {
-          return; //ids were merged above so return.
-        }
-      }
-
-      SortSpec ss = rb.getSortSpec();
-      Sort sort = ss.getSort();
-
-      SortField[] sortFields = null;
-      if(sort != null) sortFields = sort.getSort();
-      else {
-        sortFields = new SortField[]{SortField.FIELD_SCORE};
-      }
- 
-      IndexSchema schema = rb.req.getSchema();
-      SchemaField uniqueKeyField = schema.getUniqueKeyField();
-
-
-      // id to shard mapping, to eliminate any accidental dups
-      HashMap<Object,String> uniqueDoc = new HashMap<>();
-
-      // Merge the docs via a priority queue so we don't have to sort *all* of the
-      // documents... we only need to order the top (rows+start)
-      final ShardFieldSortedHitQueue queue = new ShardFieldSortedHitQueue(sortFields, ss.getOffset() + ss.getCount(), rb.req.getSearcher());
-
-      NamedList<Object> shardInfo = null;
-      if(rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
-        shardInfo = new SimpleOrderedMap<>();
-        rb.rsp.getValues().add(ShardParams.SHARDS_INFO,shardInfo);
-      }
-      
-      long numFound = 0;
-      Float maxScore=null;
-      boolean partialResults = false;
-      Boolean segmentTerminatedEarly = null;
-      for (ShardResponse srsp : sreq.responses) {
-        SolrDocumentList docs = null;
-        NamedList<?> responseHeader = null;
-
-        if(shardInfo!=null) {
-          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
-          
-          if (srsp.getException() != null) {
-            Throwable t = srsp.getException();
-            if(t instanceof SolrServerException) {
-              t = ((SolrServerException)t).getCause();
-            }
-            nl.add("error", t.toString() );
-            StringWriter trace = new StringWriter();
-            t.printStackTrace(new PrintWriter(trace));
-            nl.add("trace", trace.toString() );
-            if (srsp.getShardAddress() != null) {
-              nl.add("shardAddress", srsp.getShardAddress());
-            }
-          }
-          else {
-            responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
-            final Object rhste = (responseHeader == null ? null : responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
-            if (rhste != null) {
-              nl.add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, rhste);
-            }
-            docs = (SolrDocumentList)srsp.getSolrResponse().getResponse().get("response");
-            nl.add("numFound", docs.getNumFound());
-            nl.add("maxScore", docs.getMaxScore());
-            nl.add("shardAddress", srsp.getShardAddress());
-          }
-          if(srsp.getSolrResponse()!=null) {
-            nl.add("time", srsp.getSolrResponse().getElapsedTime());
-          }
-
-          shardInfo.add(srsp.getShard(), nl);
-        }
-        // now that we've added the shard info, let's only proceed if we have no error.
-        if (srsp.getException() != null) {
-          partialResults = true;
-          continue;
-        }
-
-        if (docs == null) { // could have been initialized in the shards info block above
-          docs = (SolrDocumentList)srsp.getSolrResponse().getResponse().get("response");
-        }
-        
-        if (responseHeader == null) { // could have been initialized in the shards info block above
-          responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
-        }
-
-        if (responseHeader != null) {
-          if (Boolean.TRUE.equals(responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
-            partialResults = true;
-          }
-          if (!Boolean.TRUE.equals(segmentTerminatedEarly)) {
-            final Object ste = responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-            if (Boolean.TRUE.equals(ste)) {
-              segmentTerminatedEarly = Boolean.TRUE;
-            } else if (Boolean.FALSE.equals(ste)) {
-              segmentTerminatedEarly = Boolean.FALSE;
-            }
-          }
-        }
-        
-        // calculate global maxScore and numDocsFound
-        if (docs.getMaxScore() != null) {
-          maxScore = maxScore==null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
-        }
-        numFound += docs.getNumFound();
-
-        NamedList sortFieldValues = (NamedList)(srsp.getSolrResponse().getResponse().get("sort_values"));
-        NamedList unmarshalledSortFieldValues = unmarshalSortValues(ss, sortFieldValues, schema);
-
-        // go through every doc in this response, construct a ShardDoc, and
-        // put it in the priority queue so it can be ordered.
-        for (int i=0; i<docs.size(); i++) {
-          SolrDocument doc = docs.get(i);
-          Object id = doc.getFieldValue(uniqueKeyField.getName());
-
-          String prevShard = uniqueDoc.put(id, srsp.getShard());
-          if (prevShard != null) {
-            // duplicate detected
-            numFound--;
-
-            // For now, just always use the first encountered since we can't currently
-            // remove the previous one added to the priority queue.  If we switched
-            // to the Java5 PriorityQueue, this would be easier.
-            continue;
-            // make which duplicate is used deterministic based on shard
-            // if (prevShard.compareTo(srsp.shard) >= 0) {
-            //  TODO: remove previous from priority queue
-            //  continue;
-            // }
-          }
-
-          ShardDoc shardDoc = new ShardDoc();
-          shardDoc.id = id;
-          shardDoc.shard = srsp.getShard();
-          shardDoc.orderInShard = i;
-          Object scoreObj = doc.getFieldValue("score");
-          if (scoreObj != null) {
-            if (scoreObj instanceof String) {
-              shardDoc.score = Float.parseFloat((String)scoreObj);
-            } else {
-              shardDoc.score = (Float)scoreObj;
-            }
-          }
-
-          shardDoc.sortFieldValues = unmarshalledSortFieldValues;
-
-          queue.insertWithOverflow(shardDoc);
-        } // end for-each-doc-in-response
-      } // end for-each-response
-      
-      // The queue now has 0 -> queuesize docs, where queuesize <= start + rows
-      // So we want to pop the last documents off the queue to get
-      // the docs offset -> queuesize
-      int resultSize = queue.size() - ss.getOffset();
-      resultSize = Math.max(0, resultSize);  // there may not be any docs in range
-
-      Map<Object,ShardDoc> resultIds = new HashMap<>();
-      for (int i=resultSize-1; i>=0; i--) {
-        ShardDoc shardDoc = queue.pop();
-        shardDoc.positionInResponse = i;
-        // Need the toString() for correlation with other lists that must
-        // be strings (like keys in highlighting, explain, etc)
-        resultIds.put(shardDoc.id.toString(), shardDoc);
-      }
-
-      // Add hits for distributed requests
-      // https://issues.apache.org/jira/browse/SOLR-3518
-      rb.rsp.addToLog("hits", numFound);
-
-      SolrDocumentList responseDocs = new SolrDocumentList();
-      if (maxScore!=null) responseDocs.setMaxScore(maxScore);
-      responseDocs.setNumFound(numFound);
-      responseDocs.setStart(ss.getOffset());
-      // size appropriately
-      for (int i=0; i<resultSize; i++) responseDocs.add(null);
-
-      // save these results in a private area so we can access them
-      // again when retrieving stored fields.
-      // TODO: use ResponseBuilder (w/ comments) or the request context?
-      rb.resultIds = resultIds;
-      rb.setResponseDocs(responseDocs);
-
-      populateNextCursorMarkFromMergedShards(rb);
-
-      if (partialResults) {
-        if(rb.rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY) == null) {
-          rb.rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
-        }
-      }
-      if (segmentTerminatedEarly != null) {
-        final Object existingSegmentTerminatedEarly = rb.rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-        if (existingSegmentTerminatedEarly == null) {
-          rb.rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, segmentTerminatedEarly);
-        } else if (!Boolean.TRUE.equals(existingSegmentTerminatedEarly) && Boolean.TRUE.equals(segmentTerminatedEarly)) {
-          rb.rsp.getResponseHeader().remove(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-          rb.rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, segmentTerminatedEarly);
-        }
-      }
-  }
-
-  /**
-   * Inspects the state of the {@link ResponseBuilder} and populates the next 
-   * {@link ResponseBuilder#setNextCursorMark} as appropriate based on the merged 
-   * sort values from individual shards
-   *
-   * @param rb A <code>ResponseBuilder</code> that already contains merged 
-   *           <code>ShardDocs</code> in <code>resultIds</code>, may or may not be 
-   *           part of a Cursor based request (method will NOOP if not needed)
-   */
-  protected void populateNextCursorMarkFromMergedShards(ResponseBuilder rb) {
-
-    final CursorMark lastCursorMark = rb.getCursorMark();
-    if (null == lastCursorMark) {
-      // Not a cursor based request
-      return; // NOOP
-    }
-
-    assert null != rb.resultIds : "resultIds was not set in ResponseBuilder";
-
-    Collection<ShardDoc> docsOnThisPage = rb.resultIds.values();
-
-    if (0 == docsOnThisPage.size()) {
-      // nothing more matching query, re-use existing totem so user can "resume" 
-      // search later if it makes sense for this sort.
-      rb.setNextCursorMark(lastCursorMark);
-      return;
-    }
-
-    ShardDoc lastDoc = null;
-    // ShardDoc and rb.resultIds are weird structures to work with...
-    for (ShardDoc eachDoc : docsOnThisPage) {
-      if (null == lastDoc || lastDoc.positionInResponse  < eachDoc.positionInResponse) {
-        lastDoc = eachDoc;
-      }
-    }
-    SortField[] sortFields = lastCursorMark.getSortSpec().getSort().getSort();
-    List<Object> nextCursorMarkValues = new ArrayList<>(sortFields.length);
-    for (SortField sf : sortFields) {
-      if (sf.getType().equals(SortField.Type.SCORE)) {
-        nextCursorMarkValues.add(lastDoc.score);
-      } else {
-        assert null != sf.getField() : "SortField has null field";
-        List<Object> fieldVals = (List<Object>) lastDoc.sortFieldValues.get(sf.getField());
-        nextCursorMarkValues.add(fieldVals.get(lastDoc.orderInShard));
-      }
-    }
-    CursorMark nextCursorMark = lastCursorMark.createNext(nextCursorMarkValues);
-    assert null != nextCursorMark : "null nextCursorMark";
-    rb.setNextCursorMark(nextCursorMark);
-  }
-
-  protected NamedList unmarshalSortValues(SortSpec sortSpec, 
-                                        NamedList sortFieldValues, 
-                                        IndexSchema schema) {
-    NamedList unmarshalledSortValsPerField = new NamedList();
-
-    if (0 == sortFieldValues.size()) return unmarshalledSortValsPerField;
-    
-    List<SchemaField> schemaFields = sortSpec.getSchemaFields();
-    SortField[] sortFields = sortSpec.getSort().getSort();
-
-    int marshalledFieldNum = 0;
-    for (int sortFieldNum = 0; sortFieldNum < sortFields.length; sortFieldNum++) {
-      final SortField sortField = sortFields[sortFieldNum];
-      final SortField.Type type = sortField.getType();
-
-      // :TODO: would be simpler to always serialize every position of SortField[]
-      if (type==SortField.Type.SCORE || type==SortField.Type.DOC) continue;
-
-      final String sortFieldName = sortField.getField();
-      final String valueFieldName = sortFieldValues.getName(marshalledFieldNum);
-      assert sortFieldName.equals(valueFieldName)
-        : "sortFieldValues name key does not match expected SortField.getField";
-
-      List sortVals = (List)sortFieldValues.getVal(marshalledFieldNum);
-
-      final SchemaField schemaField = schemaFields.get(sortFieldNum);
-      if (null == schemaField) {
-        unmarshalledSortValsPerField.add(sortField.getField(), sortVals);
-      } else {
-        FieldType fieldType = schemaField.getType();
-        List unmarshalledSortVals = new ArrayList();
-        for (Object sortVal : sortVals) {
-          unmarshalledSortVals.add(fieldType.unmarshalSortValue(sortVal));
-        }
-        unmarshalledSortValsPerField.add(sortField.getField(), unmarshalledSortVals);
-      }
-      marshalledFieldNum++;
-    }
-    return unmarshalledSortValsPerField;
-  }
-
-  protected void createRetrieveDocs(ResponseBuilder rb) {
-
-    // TODO: in a system with nTiers > 2, we could be passed "ids" here
-    // unless those requests always go to the final destination shard
-
-    // for each shard, collect the documents for that shard.
-    HashMap<String, Collection<ShardDoc>> shardMap = new HashMap<>();
-    for (ShardDoc sdoc : rb.resultIds.values()) {
-      Collection<ShardDoc> shardDocs = shardMap.get(sdoc.shard);
-      if (shardDocs == null) {
-        shardDocs = new ArrayList<>();
-        shardMap.put(sdoc.shard, shardDocs);
-      }
-      shardDocs.add(sdoc);
-    }
-
-    SchemaField uniqueField = rb.req.getSchema().getUniqueKeyField();
-
-    // Now create a request for each shard to retrieve the stored fields
-    for (Collection<ShardDoc> shardDocs : shardMap.values()) {
-      ShardRequest sreq = new ShardRequest();
-      sreq.purpose = ShardRequest.PURPOSE_GET_FIELDS;
-
-      sreq.shards = new String[] {shardDocs.iterator().next().shard};
-
-      sreq.params = new ModifiableSolrParams();
-
-      // add original params
-      sreq.params.add( rb.req.getParams());
-
-      // no need for a sort, we already have order
-      sreq.params.remove(CommonParams.SORT);
-      sreq.params.remove(CursorMarkParams.CURSOR_MARK_PARAM);
-
-      // we already have the field sort values
-      sreq.params.remove(ResponseBuilder.FIELD_SORT_VALUES);
-
-      if(!rb.rsp.getReturnFields().wantsField(uniqueField.getName())) {
-        sreq.params.add(CommonParams.FL, uniqueField.getName());
-      }
-    
-      ArrayList<String> ids = new ArrayList<>(shardDocs.size());
-      for (ShardDoc shardDoc : shardDocs) {
-        // TODO: depending on the type, we may need more tha a simple toString()?
-        ids.add(shardDoc.id.toString());
-      }
-      sreq.params.add(ShardParams.IDS, StrUtils.join(ids, ','));
-
-      rb.addRequest(this, sreq);
-    }
-
-  }
-
-
-  protected void returnFields(ResponseBuilder rb, ShardRequest sreq) {
-    // Keep in mind that this could also be a shard in a multi-tiered system.
-    // TODO: if a multi-tiered system, it seems like some requests
-    // could/should bypass middlemen (like retrieving stored fields)
-    // TODO: merge fsv to if requested
-
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) != 0) {
-      boolean returnScores = (rb.getFieldFlags() & SolrIndexSearcher.GET_SCORES) != 0;
-
-      String keyFieldName = rb.req.getSchema().getUniqueKeyField().getName();
-      boolean removeKeyField = !rb.rsp.getReturnFields().wantsField(keyFieldName);
-      if (rb.rsp.getReturnFields().getFieldRenames().get(keyFieldName) != null) {
-        // if id was renamed we need to use the new name
-        keyFieldName = rb.rsp.getReturnFields().getFieldRenames().get(keyFieldName);
-      }
-
-      for (ShardResponse srsp : sreq.responses) {
-        if (srsp.getException() != null) {
-          // Don't try to get the documents if there was an exception in the shard
-          if(rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
-            @SuppressWarnings("unchecked")
-            NamedList<Object> shardInfo = (NamedList<Object>) rb.rsp.getValues().get(ShardParams.SHARDS_INFO);
-            @SuppressWarnings("unchecked")
-            SimpleOrderedMap<Object> nl = (SimpleOrderedMap<Object>) shardInfo.get(srsp.getShard());
-            if (nl.get("error") == null) {
-              // Add the error to the shards info section if it wasn't added before
-              Throwable t = srsp.getException();
-              if(t instanceof SolrServerException) {
-                t = ((SolrServerException)t).getCause();
-              }
-              nl.add("error", t.toString() );
-              StringWriter trace = new StringWriter();
-              t.printStackTrace(new PrintWriter(trace));
-              nl.add("trace", trace.toString() );
-            }
-          }
-          
-          continue;
-        }
-        SolrDocumentList docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
-        for (SolrDocument doc : docs) {
-          Object id = doc.getFieldValue(keyFieldName);
-          ShardDoc sdoc = rb.resultIds.get(id.toString());
-          if (sdoc != null) {
-            if (returnScores) {
-              doc.setField("score", sdoc.score);
-            } else {
-              // Score might have been added (in createMainQuery) to shard-requests (and therefore in shard-response-docs)
-              // Remove score if the outer request did not ask for it returned
-              doc.remove("score");
-            }
-            if (removeKeyField) {
-              doc.removeFields(keyFieldName);
-            }
-            rb.getResponseDocs().set(sdoc.positionInResponse, doc);
-          }
-        }
-      }
-    }
-  }
-
-  /////////////////////////////////////////////
-  ///  SolrInfoBean
-  ////////////////////////////////////////////
-
-  @Override
-  public String getDescription() {
-    return "query";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-
-  private boolean doProcessSearchByIds(ResponseBuilder rb) throws IOException {
-
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-
-    SolrParams params = req.getParams();
-
-    String ids = params.get(ShardParams.IDS);
-    if (ids == null) {
-      return false;
-    }
-
-    SolrIndexSearcher searcher = req.getSearcher();
-    IndexSchema schema = searcher.getSchema();
-    SchemaField idField = schema.getUniqueKeyField();
-    List<String> idArr = StrUtils.splitSmart(ids, ",", true);
-    int[] luceneIds = new int[idArr.size()];
-    int docs = 0;
-    if (idField.getType().isPointField()) {
-      for (int i=0; i<idArr.size(); i++) {
-        int id = searcher.search(
-            idField.getType().getFieldQuery(null, idField, idArr.get(i)), 1).scoreDocs[0].doc;
-        if (id >= 0) {
-          luceneIds[docs++] = id;
-        }
-      }
-    } else {
-      for (int i=0; i<idArr.size(); i++) {
-        int id = searcher.getFirstMatch(
-            new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
-        if (id >= 0)
-          luceneIds[docs++] = id;
-      }
-    }
-
-    DocListAndSet res = new DocListAndSet();
-    res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0);
-    if (rb.isNeedDocSet()) {
-      // TODO: create a cache for this!
-      List<Query> queries = new ArrayList<>();
-      queries.add(rb.getQuery());
-      List<Query> filters = rb.getFilters();
-      if (filters != null) queries.addAll(filters);
-      res.docSet = searcher.getDocSet(queries);
-    }
-    rb.setResults(res);
-
-    ResultContext ctx = new BasicResultContext(rb);
-    rsp.addResponse(ctx);
-    return true;
-  }
-
-  private void doProcessGroupedDistributedSearchFirstPhase(ResponseBuilder rb, QueryCommand cmd, QueryResult result) throws IOException {
-
-    GroupingSpecification groupingSpec = rb.getGroupingSpec();
-    assert null != groupingSpec : "GroupingSpecification is null";
-
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-
-    SolrIndexSearcher searcher = req.getSearcher();
-    IndexSchema schema = searcher.getSchema();
-
-    CommandHandler.Builder topsGroupsActionBuilder = new CommandHandler.Builder()
-        .setQueryCommand(cmd)
-        .setNeedDocSet(false) // Order matters here
-        .setIncludeHitCount(true)
-        .setSearcher(searcher);
-
-    for (String field : groupingSpec.getFields()) {
-      topsGroupsActionBuilder.addCommandField(new SearchGroupsFieldCommand.Builder()
-          .setField(schema.getField(field))
-          .setGroupSort(groupingSpec.getGroupSort())
-          .setTopNGroups(cmd.getOffset() + cmd.getLen())
-          .setIncludeGroupCount(groupingSpec.isIncludeGroupCount())
-          .build()
-      );
-    }
-
-    CommandHandler commandHandler = topsGroupsActionBuilder.build();
-    commandHandler.execute();
-    SearchGroupsResultTransformer serializer = new SearchGroupsResultTransformer(searcher);
-
-    rsp.add("firstPhase", commandHandler.processResult(result, serializer));
-    rsp.add("totalHitCount", commandHandler.getTotalHitCount());
-    rb.setResult(result);
-  }
-
-  private void doProcessGroupedDistributedSearchSecondPhase(ResponseBuilder rb, QueryCommand cmd, QueryResult result) throws IOException, SyntaxError {
-
-    GroupingSpecification groupingSpec = rb.getGroupingSpec();
-    assert null != groupingSpec : "GroupingSpecification is null";
-
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-
-    SolrParams params = req.getParams();
-
-    SolrIndexSearcher searcher = req.getSearcher();
-    IndexSchema schema = searcher.getSchema();
-
-    boolean needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
-
-    CommandHandler.Builder secondPhaseBuilder = new CommandHandler.Builder()
-        .setQueryCommand(cmd)
-        .setTruncateGroups(groupingSpec.isTruncateGroups() && groupingSpec.getFields().length > 0)
-        .setSearcher(searcher);
-
-    int docsToCollect = Grouping.getMax(groupingSpec.getWithinGroupOffset(), groupingSpec.getWithinGroupLimit(), searcher.maxDoc());
-    docsToCollect = Math.max(docsToCollect, 1);
-
-    for (String field : groupingSpec.getFields()) {
-      SchemaField schemaField = schema.getField(field);
-      String[] topGroupsParam = params.getParams(GroupParams.GROUP_DISTRIBUTED_TOPGROUPS_PREFIX + field);
-      if (topGroupsParam == null) {
-        topGroupsParam = new String[0];
-      }
-
-      List<SearchGroup<BytesRef>> topGroups = new ArrayList<>(topGroupsParam.length);
-      for (String topGroup : topGroupsParam) {
-        SearchGroup<BytesRef> searchGroup = new SearchGroup<>();
-        if (!topGroup.equals(TopGroupsShardRequestFactory.GROUP_NULL_VALUE)) {
-          BytesRefBuilder builder = new BytesRefBuilder();
-          schemaField.getType().readableToIndexed(topGroup, builder);
-          searchGroup.groupValue = builder.get();
-        }
-        topGroups.add(searchGroup);
-      }
-
-      secondPhaseBuilder.addCommandField(
-          new TopGroupsFieldCommand.Builder()
-              .setQuery(cmd.getQuery())
-              .setField(schemaField)
-              .setGroupSort(groupingSpec.getGroupSort())
-              .setSortWithinGroup(groupingSpec.getSortWithinGroup())
-              .setFirstPhaseGroups(topGroups)
-              .setMaxDocPerGroup(docsToCollect)
-              .setNeedScores(needScores)
-              .setNeedMaxScore(needScores)
-              .build()
-      );
-    }
-
-    for (String query : groupingSpec.getQueries()) {
-      secondPhaseBuilder.addCommandField(new Builder()
-          .setDocsToCollect(docsToCollect)
-          .setSort(groupingSpec.getGroupSort())
-          .setQuery(query, rb.req)
-          .setDocSet(searcher)
-          .build()
-      );
-    }
-
-    CommandHandler commandHandler = secondPhaseBuilder.build();
-    commandHandler.execute();
-    TopGroupsResultTransformer serializer = new TopGroupsResultTransformer(rb);
-    rsp.add("secondPhase", commandHandler.processResult(result, serializer));
-    rb.setResult(result);
-  }
-
-  private void doProcessGroupedSearch(ResponseBuilder rb, QueryCommand cmd, QueryResult result) throws IOException, SyntaxError {
-
-    GroupingSpecification groupingSpec = rb.getGroupingSpec();
-    assert null != groupingSpec : "GroupingSpecification is null";
-
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-
-    SolrParams params = req.getParams();
-
-    SolrIndexSearcher searcher = req.getSearcher();
-
-    int maxDocsPercentageToCache = params.getInt(GroupParams.GROUP_CACHE_PERCENTAGE, 0);
-    boolean cacheSecondPassSearch = maxDocsPercentageToCache >= 1 && maxDocsPercentageToCache <= 100;
-    Grouping.TotalCount defaultTotalCount = groupingSpec.isIncludeGroupCount() ?
-        Grouping.TotalCount.grouped : Grouping.TotalCount.ungrouped;
-    int limitDefault = cmd.getLen(); // this is normally from "rows"
-    Grouping grouping =
-        new Grouping(searcher, result, cmd, cacheSecondPassSearch, maxDocsPercentageToCache, groupingSpec.isMain());
-    grouping.setGroupSort(groupingSpec.getGroupSort())
-        .setWithinGroupSort(groupingSpec.getSortWithinGroup())
-        .setDefaultFormat(groupingSpec.getResponseFormat())
-        .setLimitDefault(limitDefault)
-        .setDefaultTotalCount(defaultTotalCount)
-        .setDocsPerGroupDefault(groupingSpec.getWithinGroupLimit())
-        .setGroupOffsetDefault(groupingSpec.getWithinGroupOffset())
-        .setGetGroupedDocSet(groupingSpec.isTruncateGroups());
-
-    if (groupingSpec.getFields() != null) {
-      for (String field : groupingSpec.getFields()) {
-        grouping.addFieldCommand(field, rb.req);
-      }
-    }
-
-    if (groupingSpec.getFunctions() != null) {
-      for (String groupByStr : groupingSpec.getFunctions()) {
-        grouping.addFunctionCommand(groupByStr, rb.req);
-      }
-    }
-
-    if (groupingSpec.getQueries() != null) {
-      for (String groupByStr : groupingSpec.getQueries()) {
-        grouping.addQueryCommand(groupByStr, rb.req);
-      }
-    }
-
-    if( rb.isNeedDocList() || rb.isDebug() ){
-      // we need a single list of the returned docs
-      cmd.setFlags(SolrIndexSearcher.GET_DOCLIST);
-    }
-
-    grouping.execute();
-    if (grouping.isSignalCacheWarning()) {
-      rsp.add(
-          "cacheWarning",
-          String.format(Locale.ROOT, "Cache limit of %d percent relative to maxdoc has exceeded. Please increase cache size or disable caching.", maxDocsPercentageToCache)
-      );
-    }
-    rb.setResult(result);
-
-    if (grouping.mainResult != null) {
-      ResultContext ctx = new BasicResultContext(rb, grouping.mainResult);
-      rsp.addResponse(ctx);
-      rsp.getToLog().add("hits", grouping.mainResult.matches());
-    } else if (!grouping.getCommands().isEmpty()) { // Can never be empty since grouping.execute() checks for this.
-      rsp.add("grouped", result.groupedResults);
-      rsp.getToLog().add("hits", grouping.getCommands().get(0).getMatches());
-    }
-  }
-
-  private void doProcessUngroupedSearch(ResponseBuilder rb, QueryCommand cmd, QueryResult result) throws IOException {
-
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-
-    SolrIndexSearcher searcher = req.getSearcher();
-
-    searcher.search(result, cmd);
-    rb.setResult(result);
-
-    ResultContext ctx = new BasicResultContext(rb);
-    rsp.addResponse(ctx);
-    rsp.getToLog().add("hits", rb.getResults().docList.matches());
-
-    if ( ! rb.req.getParams().getBool(ShardParams.IS_SHARD,false) ) {
-      if (null != rb.getNextCursorMark()) {
-        rb.rsp.add(CursorMarkParams.CURSOR_MARK_NEXT,
-                   rb.getNextCursorMark().getSerializedTotem());
-      }
-    }
-
-    if(rb.mergeFieldHandler != null) {
-      rb.mergeFieldHandler.handleMergeFields(rb, searcher);
-    } else {
-      doFieldSortValues(rb, searcher);
-    }
-
-    doPrefetch(rb);
-  }
-
-  /**
-   * Fake scorer for a single document
-   *
-   * TODO: when SOLR-5595 is fixed, this wont be needed, as we dont need to recompute sort values here from the comparator
-   */
-  protected static class ScoreAndDoc extends Scorable {
-    final int docid;
-    final float score;
-
-    ScoreAndDoc(int docid, float score) {
-      this.docid = docid;
-      this.score = score;
-    }
-
-    @Override
-    public int docID() {
-      return docid;
-    }
-
-    @Override
-    public float score() throws IOException {
-      return score;
-    }
-  }
-}


[04/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
deleted file mode 100644
index 77d1e97..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
+++ /dev/null
@@ -1,1134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import javax.xml.xpath.XPathFactory;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.WeakHashMap;
-
-import com.carrotsearch.hppc.IntIntHashMap;
-import com.carrotsearch.hppc.cursors.IntIntCursor;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Throwables;
-import com.google.common.collect.Collections2;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Maps;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.BoostQuery;
-import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.FieldComparatorSource;
-import org.apache.lucene.search.SimpleFieldComparator;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.QueryElevationParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.Config;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.transform.ElevatedMarkerFactory;
-import org.apache.solr.response.transform.ExcludedMarkerFactory;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SortSpec;
-import org.apache.solr.search.grouping.GroupingSpecification;
-import org.apache.solr.util.DOMUtil;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.VersionedFile;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
-
-/**
- * A component to elevate some documents to the top of the result set.
- *
- * @since solr 1.3
- */
-@SuppressWarnings("WeakerAccess")
-public class QueryElevationComponent extends SearchComponent implements SolrCoreAware {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  // Constants used in solrconfig.xml
-  @VisibleForTesting
-  static final String FIELD_TYPE = "queryFieldType";
-  @VisibleForTesting
-  static final String CONFIG_FILE = "config-file";
-  private static final String EXCLUDE = "exclude";
-
-  /** @see #getBoostDocs(SolrIndexSearcher, Set, Map) */
-  private static final String BOOSTED_DOCIDS = "BOOSTED_DOCIDS";
-
-  /** Key to {@link SolrQueryRequest#getContext()} for a {@code Set<BytesRef>} of included IDs in configured
-   * order (so-called priority). */
-  public static final String BOOSTED = "BOOSTED";
-  /** Key to {@link SolrQueryRequest#getContext()} for a {@code Set<BytesRef>} of excluded IDs. */
-  public static final String EXCLUDED = "EXCLUDED";
-
-  private static final boolean DEFAULT_FORCE_ELEVATION = false;
-  private static final boolean DEFAULT_USE_CONFIGURED_ELEVATED_ORDER = true;
-  private static final boolean DEFAULT_SUBSET_MATCH = false;
-  private static final String DEFAULT_EXCLUDE_MARKER_FIELD_NAME = "excluded";
-  private static final String DEFAULT_EDITORIAL_MARKER_FIELD_NAME = "elevated";
-
-  protected SolrParams initArgs;
-  protected Analyzer queryAnalyzer;
-  protected SchemaField uniqueKeyField;
-  /** @see QueryElevationParams#FORCE_ELEVATION */
-  protected boolean forceElevation;
-  /** @see QueryElevationParams#USE_CONFIGURED_ELEVATED_ORDER */
-  protected boolean useConfiguredElevatedOrder;
-
-  protected boolean initialized;
-
-  /**
-   * For each IndexReader, keep an ElevationProvider when the configuration is loaded from the data directory.
-   * The key is null if loaded from the config directory, and is never re-loaded.
-   */
-  private final Map<IndexReader, ElevationProvider> elevationProviderCache = new WeakHashMap<>();
-
-  @Override
-  public void init(NamedList args) {
-    this.initArgs = args.toSolrParams();
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    initialized = false;
-    try {
-      parseFieldType(core);
-      setUniqueKeyField(core);
-      parseExcludedMarkerFieldName(core);
-      parseEditorialMarkerFieldName(core);
-      parseForceElevation();
-      parseUseConfiguredOrderForElevations();
-      loadElevationConfiguration(core);
-      initialized = true;
-    } catch (InitializationException e) {
-      assert !initialized;
-      handleInitializationException(e, e.exceptionCause);
-    } catch (Exception e) {
-      assert !initialized;
-      handleInitializationException(e, InitializationExceptionCause.OTHER);
-    }
-  }
-
-  private void parseFieldType(SolrCore core) throws InitializationException {
-    String a = initArgs.get(FIELD_TYPE);
-    if (a != null) {
-      FieldType ft = core.getLatestSchema().getFieldTypes().get(a);
-      if (ft == null) {
-        throw new InitializationException("Parameter " + FIELD_TYPE + " defines an unknown field type \"" + a + "\"", InitializationExceptionCause.UNKNOWN_FIELD_TYPE);
-      }
-      queryAnalyzer = ft.getQueryAnalyzer();
-    }
-  }
-
-  private void setUniqueKeyField(SolrCore core) throws InitializationException {
-    uniqueKeyField = core.getLatestSchema().getUniqueKeyField();
-    if (uniqueKeyField == null) {
-      throw new InitializationException("This component requires the schema to have a uniqueKeyField", InitializationExceptionCause.MISSING_UNIQUE_KEY_FIELD);
-    }
-  }
-
-  private void parseExcludedMarkerFieldName(SolrCore core) {
-    String markerName = initArgs.get(QueryElevationParams.EXCLUDE_MARKER_FIELD_NAME, DEFAULT_EXCLUDE_MARKER_FIELD_NAME);
-    core.addTransformerFactory(markerName, new ExcludedMarkerFactory());
-  }
-
-  private void parseEditorialMarkerFieldName(SolrCore core) {
-    String markerName = initArgs.get(QueryElevationParams.EDITORIAL_MARKER_FIELD_NAME, DEFAULT_EDITORIAL_MARKER_FIELD_NAME);
-    core.addTransformerFactory(markerName, new ElevatedMarkerFactory());
-  }
-
-  private void parseForceElevation() {
-    forceElevation = initArgs.getBool(QueryElevationParams.FORCE_ELEVATION, DEFAULT_FORCE_ELEVATION);
-  }
-
-  private void parseUseConfiguredOrderForElevations() {
-    useConfiguredElevatedOrder = initArgs.getBool(QueryElevationParams.USE_CONFIGURED_ELEVATED_ORDER, DEFAULT_USE_CONFIGURED_ELEVATED_ORDER);
-  }
-
-  /**
-   * (Re)Loads elevation configuration.
-   *
-   * @param core The core holding this component.
-   * @return The number of elevation rules parsed.
-   */
-  @SuppressWarnings("WeakerAccess")
-  protected int loadElevationConfiguration(SolrCore core) throws Exception {
-    synchronized (elevationProviderCache) {
-      elevationProviderCache.clear();
-      String configFileName = initArgs.get(CONFIG_FILE);
-      if (configFileName == null) {
-        // Throw an exception which is handled by handleInitializationException().
-        // If not overridden handleInitializationException() simply skips this exception.
-        throw new InitializationException("Missing component parameter " + CONFIG_FILE + " - it has to define the path to the elevation configuration file", InitializationExceptionCause.NO_CONFIG_FILE_DEFINED);
-      }
-      boolean configFileExists = false;
-      ElevationProvider elevationProvider = NO_OP_ELEVATION_PROVIDER;
-
-      // check if using ZooKeeper
-      ZkController zkController = core.getCoreContainer().getZkController();
-      if (zkController != null) {
-        // TODO : shouldn't have to keep reading the config name when it has been read before
-        configFileExists = zkController.configFileExists(zkController.getZkStateReader().readConfigName(core.getCoreDescriptor().getCloudDescriptor().getCollectionName()), configFileName);
-      } else {
-        File fC = new File(core.getResourceLoader().getConfigDir(), configFileName);
-        File fD = new File(core.getDataDir(), configFileName);
-        if (fC.exists() == fD.exists()) {
-          InitializationException e = new InitializationException("Missing config file \"" + configFileName + "\" - either " + fC.getAbsolutePath() + " or " + fD.getAbsolutePath() + " must exist, but not both", InitializationExceptionCause.MISSING_CONFIG_FILE);
-          elevationProvider = handleConfigLoadingException(e, true);
-          elevationProviderCache.put(null, elevationProvider);
-        } else if (fC.exists()) {
-          if (fC.length() == 0) {
-            InitializationException e = new InitializationException("Empty config file \"" + configFileName + "\" - " + fC.getAbsolutePath(), InitializationExceptionCause.EMPTY_CONFIG_FILE);
-            elevationProvider = handleConfigLoadingException(e, true);
-          } else {
-            configFileExists = true;
-            log.info("Loading QueryElevation from: " + fC.getAbsolutePath());
-            Config cfg = new Config(core.getResourceLoader(), configFileName);
-            elevationProvider = loadElevationProvider(cfg);
-          }
-          elevationProviderCache.put(null, elevationProvider);
-        }
-      }
-      //in other words, we think this is in the data dir, not the conf dir
-      if (!configFileExists) {
-        // preload the first data
-        RefCounted<SolrIndexSearcher> searchHolder = null;
-        try {
-          searchHolder = core.getNewestSearcher(false);
-          if (searchHolder == null) {
-            elevationProvider = NO_OP_ELEVATION_PROVIDER;
-          } else {
-            IndexReader reader = searchHolder.get().getIndexReader();
-            elevationProvider = getElevationProvider(reader, core);
-          }
-        } finally {
-          if (searchHolder != null) searchHolder.decref();
-        }
-      }
-      return elevationProvider.size();
-    }
-  }
-
-  /**
-   * Handles the exception that occurred while initializing this component.
-   * If this method does not throw an exception, this component silently fails to initialize
-   * and is muted with field {@link #initialized} which becomes {@code false}.
-   */
-  protected void handleInitializationException(Exception exception, InitializationExceptionCause cause) {
-    if (cause != InitializationExceptionCause.NO_CONFIG_FILE_DEFINED) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error initializing " + QueryElevationComponent.class.getSimpleName(), exception);
-    }
-  }
-
-  /**
-   * Handles an exception that occurred while loading the configuration resource.
-   *
-   * @param e                   The exception caught.
-   * @param resourceAccessIssue <code>true</code> if the exception has been thrown
-   *                            because the resource could not be accessed (missing or cannot be read)
-   *                            or the config file is empty; <code>false</code> if the resource has
-   *                            been found and accessed but the error occurred while loading the resource
-   *                            (invalid format, incomplete or corrupted).
-   * @return The {@link ElevationProvider} to use if the exception is absorbed. If {@code null}
-   *         is returned, the {@link #NO_OP_ELEVATION_PROVIDER} is used but not cached in
-   *         the {@link ElevationProvider} cache.
-   * @throws E If the exception is not absorbed.
-   */
-  protected <E extends Exception> ElevationProvider handleConfigLoadingException(E e, boolean resourceAccessIssue) throws E {
-    throw e;
-  }
-
-  /**
-   * Gets the {@link ElevationProvider} from the data dir or from the cache.
-   *
-   * @return The cached or loaded {@link ElevationProvider}.
-   * @throws java.io.IOException                  If the configuration resource cannot be found, or if an I/O error occurs while analyzing the triggering queries.
-   * @throws org.xml.sax.SAXException                 If the configuration resource is not a valid XML content.
-   * @throws javax.xml.parsers.ParserConfigurationException If the configuration resource is not a valid XML configuration.
-   * @throws RuntimeException             If the configuration resource is not an XML content of the expected format
-   *                                      (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}).
-   */
-  @VisibleForTesting
-  ElevationProvider getElevationProvider(IndexReader reader, SolrCore core) throws Exception {
-    synchronized (elevationProviderCache) {
-      ElevationProvider elevationProvider;
-      elevationProvider = elevationProviderCache.get(null);
-      if (elevationProvider != null) return elevationProvider;
-
-      elevationProvider = elevationProviderCache.get(reader);
-      if (elevationProvider == null) {
-        Exception loadingException = null;
-        boolean resourceAccessIssue = false;
-        try {
-          elevationProvider = loadElevationProvider(core);
-        } catch (IOException e) {
-          loadingException = e;
-          resourceAccessIssue = true;
-        } catch (Exception e) {
-          loadingException = e;
-        }
-        boolean shouldCache = true;
-        if (loadingException != null) {
-          elevationProvider = handleConfigLoadingException(loadingException, resourceAccessIssue);
-          if (elevationProvider == null) {
-            elevationProvider = NO_OP_ELEVATION_PROVIDER;
-            shouldCache = false;
-          }
-        }
-        if (shouldCache) {
-          elevationProviderCache.put(reader, elevationProvider);
-        }
-      }
-      assert elevationProvider != null;
-      return elevationProvider;
-    }
-  }
-
-  /**
-   * Loads the {@link ElevationProvider} from the data dir.
-   *
-   * @return The loaded {@link ElevationProvider}.
-   * @throws java.io.IOException                  If the configuration resource cannot be found, or if an I/O error occurs while analyzing the triggering queries.
-   * @throws org.xml.sax.SAXException                 If the configuration resource is not a valid XML content.
-   * @throws javax.xml.parsers.ParserConfigurationException If the configuration resource is not a valid XML configuration.
-   * @throws RuntimeException             If the configuration resource is not an XML content of the expected format
-   *                                      (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}).
-   */
-  private ElevationProvider loadElevationProvider(SolrCore core) throws IOException, SAXException, ParserConfigurationException {
-    String configFileName = initArgs.get(CONFIG_FILE);
-    if (configFileName == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "QueryElevationComponent must specify argument: " + CONFIG_FILE);
-    }
-    log.info("Loading QueryElevation from data dir: " + configFileName);
-
-    Config cfg;
-    ZkController zkController = core.getCoreContainer().getZkController();
-    if (zkController != null) {
-      cfg = new Config(core.getResourceLoader(), configFileName, null, null);
-    } else {
-      InputStream is = VersionedFile.getLatestFile(core.getDataDir(), configFileName);
-      cfg = new Config(core.getResourceLoader(), configFileName, new InputSource(is), null);
-    }
-    ElevationProvider elevationProvider = loadElevationProvider(cfg);
-    assert elevationProvider != null;
-    return elevationProvider;
-  }
-
-  /**
-   * Loads the {@link ElevationProvider}.
-   *
-   * @throws java.io.IOException      If an I/O error occurs while analyzing the triggering queries.
-   * @throws RuntimeException If the config does not provide an XML content of the expected format
-   *                          (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}).
-   */
-  @SuppressWarnings("WeakerAccess")
-  protected ElevationProvider loadElevationProvider(Config config) throws IOException {
-    Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap = new LinkedHashMap<>();
-    XPath xpath = XPathFactory.newInstance().newXPath();
-    NodeList nodes = (NodeList) config.evaluate("elevate/query", XPathConstants.NODESET);
-    for (int i = 0; i < nodes.getLength(); i++) {
-      Node node = nodes.item(i);
-      String queryString = DOMUtil.getAttr(node, "text", "missing query 'text'");
-      String matchString = DOMUtil.getAttr(node, "match");
-      ElevatingQuery elevatingQuery = new ElevatingQuery(queryString, parseMatchPolicy(matchString));
-
-      NodeList children;
-      try {
-        children = (NodeList) xpath.evaluate("doc", node, XPathConstants.NODESET);
-      } catch (XPathExpressionException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "query requires '<doc .../>' child");
-      }
-
-      if (children.getLength() == 0) { // weird
-        continue;
-      }
-      ElevationBuilder elevationBuilder = new ElevationBuilder();
-      for (int j = 0; j < children.getLength(); j++) {
-        Node child = children.item(j);
-        String id = DOMUtil.getAttr(child, "id", "missing 'id'");
-        String e = DOMUtil.getAttr(child, EXCLUDE, null);
-        if (e != null) {
-          if (Boolean.valueOf(e)) {
-            elevationBuilder.addExcludedIds(Collections.singleton(id));
-            continue;
-          }
-        }
-        elevationBuilder.addElevatedIds(Collections.singletonList(id));
-      }
-
-      // It is allowed to define multiple times different elevations for the same query. In this case the elevations
-      // are merged in the ElevationBuilder (they will be triggered at the same time).
-      ElevationBuilder previousElevationBuilder = elevationBuilderMap.get(elevatingQuery);
-      if (previousElevationBuilder == null) {
-        elevationBuilderMap.put(elevatingQuery, elevationBuilder);
-      } else {
-        previousElevationBuilder.merge(elevationBuilder);
-      }
-    }
-    return createElevationProvider(queryAnalyzer, elevationBuilderMap);
-  }
-
-  private boolean parseMatchPolicy(String matchString) {
-    if (matchString == null) {
-      return DEFAULT_SUBSET_MATCH;
-    } else if (matchString.equalsIgnoreCase("exact")) {
-      return false;
-    } else if (matchString.equalsIgnoreCase("subset")) {
-      return true;
-    } else {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "invalid value \"" + matchString + "\" for query match attribute");
-    }
-  }
-
-  //---------------------------------------------------------------------------------
-  // SearchComponent
-  //---------------------------------------------------------------------------------
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    if (!initialized || !rb.req.getParams().getBool(QueryElevationParams.ENABLE, true)) {
-      return;
-    }
-
-    Elevation elevation = getElevation(rb);
-    if (elevation != null) {
-      setQuery(rb, elevation);
-      setSort(rb, elevation);
-    }
-
-    if (rb.isDebug() && rb.isDebugQuery()) {
-      addDebugInfo(rb, elevation);
-    }
-  }
-
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-    // Do nothing -- the real work is modifying the input query
-  }
-
-  protected Elevation getElevation(ResponseBuilder rb) {
-    SolrParams localParams = rb.getQparser().getLocalParams();
-    String queryString = localParams == null ? rb.getQueryString() : localParams.get(QueryParsing.V);
-    if (queryString == null || rb.getQuery() == null) {
-      return null;
-    }
-
-    SolrParams params = rb.req.getParams();
-    String paramElevatedIds = params.get(QueryElevationParams.IDS);
-    String paramExcludedIds = params.get(QueryElevationParams.EXCLUDE);
-    try {
-      if (paramElevatedIds != null || paramExcludedIds != null) {
-        List<String> elevatedIds = paramElevatedIds != null ? StrUtils.splitSmart(paramElevatedIds,",", true) : Collections.emptyList();
-        List<String> excludedIds = paramExcludedIds != null ? StrUtils.splitSmart(paramExcludedIds, ",", true) : Collections.emptyList();
-        return new ElevationBuilder().addElevatedIds(elevatedIds).addExcludedIds(excludedIds).build();
-      } else {
-        IndexReader reader = rb.req.getSearcher().getIndexReader();
-        return getElevationProvider(reader, rb.req.getCore()).getElevationForQuery(queryString);
-      }
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error loading elevation", e);
-    }
-  }
-
-  private void setQuery(ResponseBuilder rb, Elevation elevation) {
-    rb.req.getContext().put(BOOSTED, elevation.elevatedIds);
-
-    // Change the query to insert forced documents
-    SolrParams params = rb.req.getParams();
-    if (params.getBool(QueryElevationParams.EXCLUSIVE, false)) {
-      // We only want these elevated results
-      rb.setQuery(new BoostQuery(elevation.includeQuery, 0f));
-    } else {
-      BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
-      queryBuilder.add(rb.getQuery(), BooleanClause.Occur.SHOULD);
-      queryBuilder.add(new BoostQuery(elevation.includeQuery, 0f), BooleanClause.Occur.SHOULD);
-      if (elevation.excludeQueries != null) {
-        if (params.getBool(QueryElevationParams.MARK_EXCLUDES, false)) {
-          // We are only going to mark items as excluded, not actually exclude them.
-          // This works with the EditorialMarkerFactory.
-          rb.req.getContext().put(EXCLUDED, elevation.excludedIds);
-        } else {
-          for (TermQuery tq : elevation.excludeQueries) {
-            queryBuilder.add(tq, BooleanClause.Occur.MUST_NOT);
-          }
-        }
-      }
-      rb.setQuery(queryBuilder.build());
-    }
-  }
-
-  private void setSort(ResponseBuilder rb, Elevation elevation) throws IOException {
-    if (elevation.elevatedIds.isEmpty()) {
-      return;
-    }
-    boolean forceElevation = rb.req.getParams().getBool(QueryElevationParams.FORCE_ELEVATION, this.forceElevation);
-    boolean useConfigured = rb.req.getParams().getBool(QueryElevationParams.USE_CONFIGURED_ELEVATED_ORDER, this.useConfiguredElevatedOrder);
-    final IntIntHashMap elevatedWithPriority = getBoostDocs(rb.req.getSearcher(), elevation.elevatedIds, rb.req.getContext());
-    ElevationComparatorSource comparator = new ElevationComparatorSource(elevatedWithPriority, useConfigured);
-    setSortSpec(rb, forceElevation, comparator);
-    setGroupingSpec(rb, forceElevation, comparator);
-  }
-
-  private void setSortSpec(ResponseBuilder rb, boolean forceElevation, ElevationComparatorSource comparator) {
-    // if the sort is 'score desc' use a custom sorting method to
-    // insert documents in their proper place
-    SortSpec sortSpec = rb.getSortSpec();
-    if (sortSpec.getSort() == null) {
-      sortSpec.setSortAndFields(
-              new Sort(
-                      new SortField("_elevate_", comparator, true),
-                      new SortField(null, SortField.Type.SCORE, false)),
-              Arrays.asList(new SchemaField[2]));
-    } else {
-      // Check if the sort is based on score
-      SortSpec modSortSpec = this.modifySortSpec(sortSpec, forceElevation, comparator);
-      if (null != modSortSpec) {
-        rb.setSortSpec(modSortSpec);
-      }
-    }
-  }
-
-  private void setGroupingSpec(ResponseBuilder rb, boolean forceElevation, ElevationComparatorSource comparator) {
-    // alter the sorting in the grouping specification if there is one
-    GroupingSpecification groupingSpec = rb.getGroupingSpec();
-    if(groupingSpec != null) {
-      SortSpec groupSortSpec = groupingSpec.getGroupSortSpec();
-      SortSpec modGroupSortSpec = this.modifySortSpec(groupSortSpec, forceElevation, comparator);
-      if (modGroupSortSpec != null) {
-        groupingSpec.setGroupSortSpec(modGroupSortSpec);
-      }
-      SortSpec withinGroupSortSpec = groupingSpec.getWithinGroupSortSpec();
-      SortSpec modWithinGroupSortSpec = this.modifySortSpec(withinGroupSortSpec, forceElevation, comparator);
-      if (modWithinGroupSortSpec != null) {
-        groupingSpec.setWithinGroupSortSpec(modWithinGroupSortSpec);
-      }
-    }
-  }
-
-  private SortSpec modifySortSpec(SortSpec current, boolean forceElevation, ElevationComparatorSource comparator) {
-    boolean modify = false;
-    SortField[] currentSorts = current.getSort().getSort();
-    List<SchemaField> currentFields = current.getSchemaFields();
-
-    ArrayList<SortField> sorts = new ArrayList<>(currentSorts.length + 1);
-    List<SchemaField> fields = new ArrayList<>(currentFields.size() + 1);
-
-    // Perhaps force it to always sort by score
-    if (forceElevation && currentSorts[0].getType() != SortField.Type.SCORE) {
-      sorts.add(new SortField("_elevate_", comparator, true));
-      fields.add(null);
-      modify = true;
-    }
-    for (int i = 0; i < currentSorts.length; i++) {
-      SortField sf = currentSorts[i];
-      if (sf.getType() == SortField.Type.SCORE) {
-        sorts.add(new SortField("_elevate_", comparator, !sf.getReverse()));
-        fields.add(null);
-        modify = true;
-      }
-      sorts.add(sf);
-      fields.add(currentFields.get(i));
-    }
-    return modify ?
-            new SortSpec(new Sort(sorts.toArray(new SortField[sorts.size()])),
-                    fields,
-                    current.getCount(),
-                    current.getOffset())
-            : null;
-  }
-
-  private void addDebugInfo(ResponseBuilder rb, Elevation elevation) {
-    List<String> match = null;
-    if (elevation != null) {
-      // Extract the elevated terms into a list
-      match = new ArrayList<>(elevation.includeQuery.clauses().size());
-      for (BooleanClause clause : elevation.includeQuery.clauses()) {
-        TermQuery tq = (TermQuery) clause.getQuery();
-        match.add(tq.getTerm().text());
-      }
-    }
-    SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<>();
-    dbg.add("q", rb.getQueryString());
-    dbg.add("match", match);
-    rb.addDebugInfo("queryBoosting", dbg);
-  }
-
-  //---------------------------------------------------------------------------------
-  // Boosted docs helper
-  //---------------------------------------------------------------------------------
-
-  /**
-   * Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value &gt; 0.
-   * @param indexSearcher the SolrIndexSearcher; required
-   * @param boosted are the set of uniqueKey values to be boosted in priority order.  If null; returns null.
-   * @param context the {@link SolrQueryRequest#getContext()} or null if none.  We'll cache our results here.
-   */
-  //TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED key?
-  public static IntIntHashMap getBoostDocs(SolrIndexSearcher indexSearcher, Set<BytesRef> boosted, Map context) throws IOException {
-
-    IntIntHashMap boostDocs = null;
-
-    if (boosted != null) {
-
-      //First see if it's already in the request context. Could have been put there by another caller.
-      if (context != null) {
-        boostDocs = (IntIntHashMap) context.get(BOOSTED_DOCIDS);
-        if (boostDocs != null) {
-          return boostDocs;
-        }
-      }
-
-      //Not in the context yet so load it.
-      boostDocs = new IntIntHashMap(boosted.size()); // docId to boost
-      int priority = boosted.size() + 1; // the corresponding priority for each boosted key (starts at this; decrements down)
-      for (BytesRef uniqueKey : boosted) {
-        priority--; // therefore first == bosted.size(); last will be 1
-        long segAndId = indexSearcher.lookupId(uniqueKey); // higher 32 bits == segment ID, low 32 bits == doc ID
-        if (segAndId == -1) { // not found
-          continue;
-        }
-        int seg = (int) (segAndId >> 32);
-        int localDocId = (int) segAndId;
-        final IndexReaderContext indexReaderContext = indexSearcher.getTopReaderContext().children().get(seg);
-        int docId = indexReaderContext.docBaseInParent + localDocId;
-        boostDocs.put(docId, priority);
-      }
-      assert priority == 1; // the last priority (lowest)
-    }
-
-    if (context != null) {
-      //noinspection unchecked
-      context.put(BOOSTED_DOCIDS, boostDocs);
-    }
-
-    return boostDocs;
-  }
-
-  //---------------------------------------------------------------------------------
-  // SolrInfoBean
-  //---------------------------------------------------------------------------------
-
-  @Override
-  public String getDescription() {
-    return "Query Boosting -- boost particular documents for a given query";
-  }
-
-  //---------------------------------------------------------------------------------
-  // Overrides
-  //---------------------------------------------------------------------------------
-
-  /**
-   * Creates the {@link ElevationProvider} to set during configuration loading. The same instance will be used later
-   * when elevating results for queries.
-   *
-   * @param queryAnalyzer to analyze and tokenize the query.
-   * @param elevationBuilderMap map of all {@link ElevatingQuery} and their corresponding {@link ElevationBuilder}.
-   * @return The created {@link ElevationProvider}.
-   */
-  protected ElevationProvider createElevationProvider(Analyzer queryAnalyzer, Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) {
-    return new MapElevationProvider(elevationBuilderMap);
-  }
-
-  //---------------------------------------------------------------------------------
-  // Query analysis and tokenization
-  //---------------------------------------------------------------------------------
-
-  /**
-   * Analyzes the provided query string and returns a space concatenation of the analyzed tokens.
-   */
-  public String analyzeQuery(String query) {
-    //split query terms with analyzer then join
-    StringBuilder norm = new StringBuilder();
-    try (TokenStream tokens = queryAnalyzer.tokenStream("", query)) {
-      tokens.reset();
-      CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class);
-      while (tokens.incrementToken()) {
-        norm.append(termAtt);
-      }
-      tokens.end();
-    } catch (IOException e) {
-      Throwables.propagate(e);
-    }
-    return norm.toString();
-  }
-
-  //---------------------------------------------------------------------------------
-  // Testing
-  //---------------------------------------------------------------------------------
-
-  /**
-   * Helpful for testing without loading config.xml.
-   *
-   * @param reader      The {@link org.apache.lucene.index.IndexReader}.
-   * @param queryString The query for which to elevate some documents. If the query has already been defined an
-   *                    elevation, this method overwrites it.
-   * @param subsetMatch <code>true</code> for query subset match; <code>false</code> for query exact match.
-   * @param elevatedIds The readable ids of the documents to set as top results for the provided query.
-   * @param excludedIds The readable ids of the document to exclude from results for the provided query.
-   * @throws java.io.IOException If there is a low-level I/O error.
-   */
-  @VisibleForTesting
-  void setTopQueryResults(IndexReader reader, String queryString, boolean subsetMatch,
-                          String[] elevatedIds, String[] excludedIds) throws IOException {
-    clearElevationProviderCache();
-    ElevatingQuery elevatingQuery = new ElevatingQuery(queryString, subsetMatch);
-    ElevationBuilder elevationBuilder = new ElevationBuilder();
-    elevationBuilder.addElevatedIds(elevatedIds == null ? Collections.emptyList() : Arrays.asList(elevatedIds));
-    elevationBuilder.addExcludedIds(excludedIds == null ? Collections.emptyList() : Arrays.asList(excludedIds));
-    Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap = ImmutableMap.of(elevatingQuery, elevationBuilder);
-    synchronized (elevationProviderCache) {
-      elevationProviderCache.computeIfAbsent(reader, k -> createElevationProvider(queryAnalyzer, elevationBuilderMap));
-    }
-  }
-
-  @VisibleForTesting
-  void clearElevationProviderCache() {
-    synchronized (elevationProviderCache) {
-        elevationProviderCache.clear();
-    }
-  }
-
-  //---------------------------------------------------------------------------------
-  // Exception
-  //---------------------------------------------------------------------------------
-
-  private static class InitializationException extends Exception {
-
-    private final InitializationExceptionCause exceptionCause;
-
-    InitializationException(String message, InitializationExceptionCause exceptionCause) {
-      super(message);
-      this.exceptionCause = exceptionCause;
-    }
-  }
-
-  protected enum InitializationExceptionCause {
-    /**
-     * The component parameter {@link #FIELD_TYPE} defines an unknown field type.
-     */
-    UNKNOWN_FIELD_TYPE,
-    /**
-     * This component requires the schema to have a uniqueKeyField, which it does not have.
-     */
-    MISSING_UNIQUE_KEY_FIELD,
-    /**
-     * Missing component parameter {@link #CONFIG_FILE} - it has to define the path to the elevation configuration file (e.g. elevate.xml).
-     */
-    NO_CONFIG_FILE_DEFINED,
-    /**
-     * The elevation configuration file (e.g. elevate.xml) cannot be found, or is defined in both conf/ and data/ directories.
-     */
-    MISSING_CONFIG_FILE,
-    /**
-     * The elevation configuration file (e.g. elevate.xml) is empty.
-     */
-    EMPTY_CONFIG_FILE,
-    /**
-     * Unclassified exception cause.
-     */
-    OTHER,
-  }
-
-  //---------------------------------------------------------------------------------
-  // Elevation classes
-  //---------------------------------------------------------------------------------
-
-  /**
-   * Provides the elevations defined for queries.
-   */
-  protected interface ElevationProvider {
-    /**
-     * Gets the elevation associated to the provided query.
-     * <p>
-     * By contract and by design, only one elevation may be associated
-     * to a given query (this can be safely verified by an assertion).
-     *
-     * @param queryString The query string (not {@link #analyzeQuery(String) analyzed} yet,
-     *              this {@link ElevationProvider} is in charge of analyzing it).
-     * @return The elevation associated with the query; or <code>null</code> if none.
-     */
-    Elevation getElevationForQuery(String queryString);
-
-    /**
-     * Gets the number of query elevations in this {@link ElevationProvider}.
-     */
-    @VisibleForTesting
-    int size();
-  }
-
-  /**
-   * {@link ElevationProvider} that returns no elevation.
-   */
-  @SuppressWarnings("WeakerAccess")
-  protected static final ElevationProvider NO_OP_ELEVATION_PROVIDER = new ElevationProvider() {
-    @Override
-    public Elevation getElevationForQuery(String queryString) {
-      return null;
-    }
-
-    @Override
-    public int size() {
-      return 0;
-    }
-  };
-
-  /**
-   * Simple query exact match {@link ElevationProvider}.
-   * <p>
-   * It does not support subset matching (see {@link #parseMatchPolicy(String)}).
-   * <p>
-   * Immutable.
-   */
-  @SuppressWarnings("WeakerAccess")
-  protected class MapElevationProvider implements ElevationProvider {
-
-    private final Map<String, Elevation> elevationMap;
-
-    public MapElevationProvider(Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) {
-      elevationMap = buildElevationMap(elevationBuilderMap);
-    }
-
-    private Map<String, Elevation> buildElevationMap(Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) {
-      Map<String, Elevation> elevationMap = Maps.newHashMapWithExpectedSize(elevationBuilderMap.size());
-      for (Map.Entry<ElevatingQuery, ElevationBuilder> entry : elevationBuilderMap.entrySet()) {
-        ElevatingQuery elevatingQuery = entry.getKey();
-        if (elevatingQuery.subsetMatch) {
-          throw new UnsupportedOperationException("Subset matching is not supported by " + getClass().getName());
-        }
-        String analyzedQuery = analyzeQuery(elevatingQuery.queryString);
-        Elevation elevation = entry.getValue().build();
-        Elevation duplicateElevation = elevationMap.put(analyzedQuery, elevation);
-        if (duplicateElevation != null) {
-          throw new IllegalArgumentException("Duplicate elevation for query.  Analyzed: \"" + analyzedQuery + "\"" +
-              " Original: \"" + elevatingQuery.queryString + "\"");
-        }
-      }
-      return Collections.unmodifiableMap(elevationMap);
-    }
-
-    @Override
-    public Elevation getElevationForQuery(String queryString) {
-      String analyzedQuery = analyzeQuery(queryString);
-      return elevationMap.get(analyzedQuery);
-    }
-
-    @Override
-    public int size() {
-      return elevationMap.size();
-    }
-  }
-
-  /**
-   * Query triggering elevation.
-   */
-  @SuppressWarnings("WeakerAccess")
-  protected static class ElevatingQuery {
-
-    public final String queryString;
-    public final boolean subsetMatch;
-
-    /**
-     * @param queryString The query to elevate documents for (not the analyzed form).
-     * @param subsetMatch Whether to match a subset of query terms.
-     */
-    protected ElevatingQuery(String queryString, boolean subsetMatch) throws IOException {
-      this.queryString = queryString;
-      this.subsetMatch = subsetMatch;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (!(o instanceof ElevatingQuery)) {
-        return false;
-      }
-      ElevatingQuery eq = (ElevatingQuery) o;
-      return queryString.equals(eq.queryString) && subsetMatch == eq.subsetMatch;
-    }
-
-    @Override
-    public int hashCode() {
-      return queryString.hashCode() + (subsetMatch ? 1 : 0);
-    }
-  }
-
-  /**
-   * Builds an {@link Elevation}. This class is used to start defining query elevations, but allowing the merge of
-   * multiple elevations for the same query.
-   */
-  @SuppressWarnings("WeakerAccess")
-  public class ElevationBuilder {
-
-    /**
-     * The ids of the elevated documents that should appear on top of search results; can be <code>null</code>.
-     * The order is retained.
-     */
-    private LinkedHashSet<BytesRef> elevatedIds;
-    /**
-     * The ids of the excluded documents that should not appear in search results; can be <code>null</code>.
-     */
-    private Set<BytesRef> excludedIds;
-
-    // for temporary/transient use when adding an elevated or excluded ID
-    private final BytesRefBuilder scratch = new BytesRefBuilder();
-
-    public ElevationBuilder addElevatedIds(List<String> ids) {
-      if (elevatedIds == null) {
-        elevatedIds = new LinkedHashSet<>(Math.max(10, ids.size()));
-      }
-      for (String id : ids) {
-        elevatedIds.add(toBytesRef(id));
-      }
-      return this;
-    }
-
-    public ElevationBuilder addExcludedIds(Collection<String> ids) {
-      if (excludedIds == null) {
-        excludedIds = new LinkedHashSet<>(Math.max(10, ids.size()));
-      }
-      for (String id : ids) {
-        excludedIds.add(toBytesRef(id));
-      }
-      return this;
-    }
-
-    public BytesRef toBytesRef(String id) {
-      uniqueKeyField.getType().readableToIndexed(id, scratch);
-      return scratch.toBytesRef();
-    }
-
-    public ElevationBuilder merge(ElevationBuilder elevationBuilder) {
-      if (elevatedIds == null) {
-        elevatedIds = elevationBuilder.elevatedIds;
-      } else if (elevationBuilder.elevatedIds != null) {
-        elevatedIds.addAll(elevationBuilder.elevatedIds);
-      }
-      if (excludedIds == null) {
-        excludedIds = elevationBuilder.excludedIds;
-      } else if (elevationBuilder.excludedIds != null) {
-        excludedIds.addAll(elevationBuilder.excludedIds);
-      }
-      return this;
-    }
-
-    public Elevation build() {
-      return new Elevation(elevatedIds, excludedIds, uniqueKeyField.getName());
-    }
-
-  }
-
-  /**
-   * Elevation of some documents in search results, with potential exclusion of others.
-   */
-  protected static class Elevation {
-
-    private static final BooleanQuery EMPTY_QUERY = new BooleanQuery.Builder().build();
-
-    public final Set<BytesRef> elevatedIds; // in configured order; not null
-    public final BooleanQuery includeQuery; // not null
-    public final Set<BytesRef> excludedIds; // not null
-    //just keep the term query, b/c we will not always explicitly exclude the item based on markExcludes query time param
-    public final TermQuery[] excludeQueries; //may be null
-
-    /**
-     * Constructs an elevation.
-     * @param elevatedIds           The ids of the elevated documents that should appear on top of search results; can be <code>null</code>.
-     *                              In configured order.
-     * @param excludedIds           The ids of the excluded documents that should not appear in search results; can be <code>null</code>.
-     * @param queryFieldName        The field name to use to create query terms.
-     */
-    public Elevation(Set<BytesRef> elevatedIds, Set<BytesRef> excludedIds,
-                      String queryFieldName) {
-      if (elevatedIds == null || elevatedIds.isEmpty()) {
-        includeQuery = EMPTY_QUERY;
-        this.elevatedIds = Collections.emptySet();
-      } else {
-        this.elevatedIds = new LinkedHashSet<>(elevatedIds);
-        BooleanQuery.Builder includeQueryBuilder = new BooleanQuery.Builder();
-        for (BytesRef elevatedId : elevatedIds) {
-          includeQueryBuilder.add(new TermQuery(new Term(queryFieldName, elevatedId)), BooleanClause.Occur.SHOULD);
-        }
-        includeQuery = includeQueryBuilder.build();
-      }
-
-      if (excludedIds == null || excludedIds.isEmpty()) {
-        this.excludedIds = Collections.emptySet();
-        excludeQueries = null;
-      } else {
-        this.excludedIds = ImmutableSet.copyOf(excludedIds);
-        List<TermQuery> excludeQueriesBuilder = new ArrayList<>(excludedIds.size());
-        for (BytesRef excludedId : excludedIds) {
-          excludeQueriesBuilder.add(new TermQuery(new Term(queryFieldName, excludedId)));
-        }
-        excludeQueries = excludeQueriesBuilder.toArray(new TermQuery[excludeQueriesBuilder.size()]);
-      }
-    }
-
-    @Override
-    public String toString() {
-      return "{elevatedIds=" + Collections2.transform(elevatedIds, BytesRef::utf8ToString) +
-          ", excludedIds=" + Collections2.transform(excludedIds, BytesRef::utf8ToString) + "}";
-    }
-  }
-
-  /** Elevates certain docs to the top. */
-  private class ElevationComparatorSource extends FieldComparatorSource {
-
-    private final IntIntHashMap elevatedWithPriority;
-    private final boolean useConfiguredElevatedOrder;
-    private final int[] sortedElevatedDocIds;
-
-    private ElevationComparatorSource(IntIntHashMap elevatedWithPriority, boolean useConfiguredElevatedOrder) {
-      this.elevatedWithPriority = elevatedWithPriority;
-      this.useConfiguredElevatedOrder = useConfiguredElevatedOrder;
-
-      // copy elevatedWithPriority keys (doc IDs) into sortedElevatedDocIds, sorted
-      sortedElevatedDocIds = new int[elevatedWithPriority.size()];
-      final Iterator<IntIntCursor> iterator = elevatedWithPriority.iterator();
-      for (int i = 0; i < sortedElevatedDocIds.length; i++) {
-        IntIntCursor next = iterator.next();
-        sortedElevatedDocIds[i] = next.key;
-      }
-      assert iterator.hasNext() == false;
-      Arrays.sort(sortedElevatedDocIds);
-    }
-
-    @Override
-    public FieldComparator<Integer> newComparator(String fieldName, final int numHits, int sortPos, boolean reversed) {
-      return new SimpleFieldComparator<Integer>() {
-        final int[] values = new int[numHits];
-        int bottomVal;
-        int topVal;
-
-        int docBase;
-        boolean hasElevatedDocsThisSegment;
-
-        @Override
-        protected void doSetNextReader(LeafReaderContext context) throws IOException {
-          docBase = context.docBase;
-          // ascertain if hasElevatedDocsThisSegment
-          final int idx = Arrays.binarySearch(sortedElevatedDocIds, docBase);
-          if (idx < 0) {
-            //first doc in segment isn't elevated (typical).  Maybe another is?
-            int nextIdx = -idx - 1;
-            if (nextIdx < sortedElevatedDocIds.length) {
-              int nextElevatedDocId = sortedElevatedDocIds[nextIdx];
-              if (nextElevatedDocId > docBase + context.reader().maxDoc()) {
-                hasElevatedDocsThisSegment = false;
-                return;
-              }
-            }
-          }
-          hasElevatedDocsThisSegment = true;
-        }
-
-        @Override
-        public int compare(int slot1, int slot2) {
-          return values[slot1] - values[slot2];  // values will be small enough that there is no overflow concern
-        }
-
-        @Override
-        public void setBottom(int slot) {
-          bottomVal = values[slot];
-        }
-
-        @Override
-        public void setTopValue(Integer value) {
-          topVal = value;
-        }
-
-        private int docVal(int doc) {
-          if (!hasElevatedDocsThisSegment) {
-            assert elevatedWithPriority.containsKey(docBase + doc) == false;
-            return -1;
-          } else if (useConfiguredElevatedOrder) {
-            return elevatedWithPriority.getOrDefault(docBase + doc, -1);
-          } else {
-            return elevatedWithPriority.containsKey(docBase + doc) ? 1 : -1;
-          }
-        }
-
-        @Override
-        public int compareBottom(int doc) {
-          return bottomVal - docVal(doc);
-        }
-
-        @Override
-        public void copy(int slot, int doc) {
-          values[slot] = docVal(doc);
-        }
-
-        @Override
-        public Integer value(int slot) {
-          return values[slot];
-        }
-
-        @Override
-        public int compareTop(int doc) {
-          final int docValue = docVal(doc);
-          return topVal - docValue;  // values will be small enough that there is no overflow concern
-        }
-      };
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java b/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
deleted file mode 100644
index 6f2fc26..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.lucene.search.Query;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.FacetParams.FacetRangeMethod;
-import org.apache.solr.common.params.FacetParams.FacetRangeOther;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.request.IntervalFacets;
-import org.apache.solr.request.SimpleFacets;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.schema.TrieField;
-import org.apache.solr.search.DocSet;
-import org.apache.solr.search.SyntaxError;
-
-/**
- * Processor for Range Facets
- */
-public class RangeFacetProcessor extends SimpleFacets {
-
-  public RangeFacetProcessor(SolrQueryRequest req, DocSet docs, SolrParams params, ResponseBuilder rb) {
-    super(req, docs, params, rb);
-  }
-
-  /**
-   * Returns a list of value constraints and the associated facet
-   * counts for each facet numerical field, range, and interval
-   * specified in the SolrParams
-   *
-   * @see org.apache.solr.common.params.FacetParams#FACET_RANGE
-   */
-  public NamedList<Object> getFacetRangeCounts() throws IOException, SyntaxError {
-    final NamedList<Object> resOuter = new SimpleOrderedMap<>();
-
-    List<RangeFacetRequest> rangeFacetRequests = Collections.emptyList();
-    try {
-      FacetComponent.FacetContext facetContext = FacetComponent.FacetContext.getFacetContext(req);
-      rangeFacetRequests = facetContext.getAllRangeFacetRequests();
-    } catch (IllegalStateException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to compute facet ranges, facet context is not set");
-    }
-
-    if (rangeFacetRequests.isEmpty()) return resOuter;
-    for (RangeFacetRequest rangeFacetRequest : rangeFacetRequests) {
-      getFacetRangeCounts(rangeFacetRequest, resOuter);
-    }
-
-    return resOuter;
-  }
-
-  /**
-   * Returns a list of value constraints and the associated facet counts
-   * for each facet range specified by the given {@link RangeFacetRequest}
-   */
-  public void getFacetRangeCounts(RangeFacetRequest rangeFacetRequest, NamedList<Object> resOuter)
-      throws IOException, SyntaxError {
-
-    final IndexSchema schema = searcher.getSchema();
-
-    final String key = rangeFacetRequest.getKey();
-    final String f = rangeFacetRequest.facetOn;
-    FacetRangeMethod method = rangeFacetRequest.getMethod();
-
-    final SchemaField sf = schema.getField(f);
-    final FieldType ft = sf.getType();
-
-    if (method.equals(FacetRangeMethod.DV)) {
-      assert ft instanceof TrieField || ft.isPointField();
-      resOuter.add(key, getFacetRangeCountsDocValues(rangeFacetRequest));
-    } else {
-      resOuter.add(key, getFacetRangeCounts(rangeFacetRequest));
-    }
-  }
-
-  private <T extends Comparable<T>> NamedList getFacetRangeCounts(final RangeFacetRequest rfr)
-      throws IOException, SyntaxError {
-
-    final NamedList<Object> res = new SimpleOrderedMap<>();
-    final NamedList<Integer> counts = new NamedList<>();
-    res.add("counts", counts);
-
-    // explicitly return the gap.
-    res.add("gap", rfr.getGapObj());
-
-    DocSet docSet = computeDocSet(docsOrig, rfr.getExcludeTags());
-
-    for (RangeFacetRequest.FacetRange range : rfr.getFacetRanges()) {
-      if (range.other != null) {
-        // these are added to top-level NamedList
-        // and we always include them regardless of mincount
-        res.add(range.other.toString(), rangeCount(docSet, rfr, range));
-      } else {
-        final int count = rangeCount(docSet, rfr, range);
-        if (count >= rfr.getMinCount()) {
-          counts.add(range.lower, count);
-        }
-      }
-    }
-
-    // explicitly return the start and end so all the counts
-    // (including before/after/between) are meaningful - even if mincount
-    // has removed the neighboring ranges
-    res.add("start", rfr.getStartObj());
-    res.add("end", rfr.getEndObj());
-
-    return res;
-  }
-
-  private <T extends Comparable<T>> NamedList<Object> getFacetRangeCountsDocValues(RangeFacetRequest rfr)
-      throws IOException, SyntaxError {
-
-    SchemaField sf = rfr.getSchemaField();
-    final NamedList<Object> res = new SimpleOrderedMap<>();
-    final NamedList<Integer> counts = new NamedList<>();
-    res.add("counts", counts);
-
-    ArrayList<IntervalFacets.FacetInterval> intervals = new ArrayList<>();
-
-    // explicitly return the gap.  compute this early so we are more
-    // likely to catch parse errors before attempting math
-    res.add("gap", rfr.getGapObj());
-
-    final int minCount = rfr.getMinCount();
-
-    boolean includeBefore = false;
-    boolean includeBetween = false;
-    boolean includeAfter = false;
-
-    Set<FacetRangeOther> others = rfr.getOthers();
-    // Intervals must be in order (see IntervalFacets.getSortedIntervals), if "BEFORE" or
-    // "BETWEEN" are set, they must be added first
-    // no matter what other values are listed, we don't do
-    // anything if "none" is specified.
-    if (!others.contains(FacetRangeOther.NONE)) {
-      if (others.contains(FacetRangeOther.ALL) || others.contains(FacetRangeOther.BEFORE)) {
-        // We'll add an interval later in this position
-        intervals.add(null);
-        includeBefore = true;
-      }
-
-      if (others.contains(FacetRangeOther.ALL) || others.contains(FacetRangeOther.BETWEEN)) {
-        // We'll add an interval later in this position
-        intervals.add(null);
-        includeBetween = true;
-      }
-
-      if (others.contains(FacetRangeOther.ALL) || others.contains(FacetRangeOther.AFTER)) {
-        includeAfter = true;
-      }
-    }
-
-    IntervalFacets.FacetInterval after = null;
-
-    for (RangeFacetRequest.FacetRange range : rfr.getFacetRanges()) {
-      try {
-        FacetRangeOther other = FacetRangeOther.get(range.name);
-        if (other != null) {
-          switch (other) {
-            case BEFORE:
-              assert range.lower == null;
-              intervals.set(0, new IntervalFacets.FacetInterval(sf, "*", range.upper, range.includeLower,
-                  range.includeUpper, FacetRangeOther.BEFORE.toString()));
-              break;
-            case AFTER:
-              assert range.upper == null;
-              after = new IntervalFacets.FacetInterval(sf, range.lower, "*",
-                  range.includeLower, range.includeUpper, FacetRangeOther.AFTER.toString());
-              break;
-            case BETWEEN:
-              intervals.set(includeBefore ? 1 : 0, new IntervalFacets.FacetInterval(sf, range.lower, range.upper,
-                  range.includeLower, range.includeUpper, FacetRangeOther.BETWEEN.toString()));
-              break;
-            case ALL:
-            case NONE:
-              break;
-          }
-        }
-        continue;
-      } catch (SolrException e) {
-        // safe to ignore
-      }
-
-      intervals.add(new IntervalFacets.FacetInterval(sf, range.lower, range.upper, range.includeLower, range.includeUpper, range.lower));
-    }
-
-    if (includeAfter) {
-      assert after != null;
-      intervals.add(after);
-    }
-
-    IntervalFacets.FacetInterval[] intervalsArray = intervals.toArray(new IntervalFacets.FacetInterval[intervals.size()]);
-    // don't use the ArrayList anymore
-    intervals = null;
-
-    new IntervalFacets(sf, searcher, computeDocSet(docsOrig, rfr.getExcludeTags()), intervalsArray);
-
-    int intervalIndex = 0;
-    int lastIntervalIndex = intervalsArray.length - 1;
-    // if the user requested "BEFORE", it will be the first of the intervals. Needs to be added to the
-    // response named list instead of with the counts
-    if (includeBefore) {
-      res.add(intervalsArray[intervalIndex].getKey(), intervalsArray[intervalIndex].getCount());
-      intervalIndex++;
-    }
-
-    // if the user requested "BETWEEN", it will be the first or second of the intervals (depending on if
-    // "BEFORE" was also requested). Needs to be added to the response named list instead of with the counts
-    if (includeBetween) {
-      res.add(intervalsArray[intervalIndex].getKey(), intervalsArray[intervalIndex].getCount());
-      intervalIndex++;
-    }
-
-    // if the user requested "AFTER", it will be the last of the intervals.
-    // Needs to be added to the response named list instead of with the counts
-    if (includeAfter) {
-      res.add(intervalsArray[lastIntervalIndex].getKey(), intervalsArray[lastIntervalIndex].getCount());
-      lastIntervalIndex--;
-    }
-    // now add all other intervals to the counts NL
-    while (intervalIndex <= lastIntervalIndex) {
-      IntervalFacets.FacetInterval interval = intervalsArray[intervalIndex];
-      if (interval.getCount() >= minCount) {
-        counts.add(interval.getKey(), interval.getCount());
-      }
-      intervalIndex++;
-    }
-
-    res.add("start", rfr.getStartObj());
-    res.add("end", rfr.getEndObj());
-    return res;
-  }
-
-  /**
-   * Macro for getting the numDocs of range over docs
-   *
-   * @see org.apache.solr.search.SolrIndexSearcher#numDocs
-   * @see org.apache.lucene.search.TermRangeQuery
-   */
-  protected int rangeCount(DocSet subset, RangeFacetRequest rfr, RangeFacetRequest.FacetRange fr) throws IOException, SyntaxError {
-    SchemaField schemaField = rfr.getSchemaField();
-    Query rangeQ = schemaField.getType().getRangeQuery(null, schemaField, fr.lower, fr.upper, fr.includeLower, fr.includeUpper);
-    if (rfr.isGroupFacet()) {
-      return getGroupedFacetQueryCount(rangeQ, subset);
-    } else {
-      return searcher.numDocs(rangeQ, subset);
-    }
-  }
-
-}
-


[34/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerValidationException.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerValidationException.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerValidationException.java
deleted file mode 100644
index 648e1e4..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerValidationException.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * This class represents errors found when validating trigger configuration.
- */
-public class TriggerValidationException extends Exception {
-  private final Map<String, String> details = new HashMap<>();
-  private final String name;
-
-  /**
-   * Create an exception.
-   * @param name name of the trigger / action / listener that caused the exception
-   * @param details details of invalid configuration - key is a property name,
-   *                value is an error message.
-   */
-  public TriggerValidationException(String name, Map<String, String> details) {
-    super();
-    this.name = name;
-    if (details != null) {
-      this.details.putAll(details);
-    }
-  }
-
-  /**
-   * Create an exception.
-   * @param name name of the trigger / action / listener that caused the exception
-   * @param keyValues zero or even number of arguments representing symbolic key
-   *                  (eg. property name) and the corresponding validation error message.
-   */
-  public TriggerValidationException(String name, String... keyValues) {
-    super();
-    this.name = name;
-    if (keyValues == null || keyValues.length == 0) {
-      return;
-    }
-    if (keyValues.length % 2 != 0) {
-      throw new IllegalArgumentException("number of arguments representing key & value pairs must be even");
-    }
-    for (int i = 0; i < keyValues.length; i += 2) {
-      details.put(keyValues[i], keyValues[i + 1]);
-    }
-  }
-
-  public Map<String, String> getDetails() {
-    return details;
-  }
-
-  @Override
-  public String toString() {
-    return "TriggerValidationException{" +
-        "name=" + name +
-        ", details='" + details + '\'' +
-        '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/package-info.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/package-info.java
deleted file mode 100644
index d3447aa..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package for classes related to autoscaling
- */
-package org.apache.solr.cloud.autoscaling;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
deleted file mode 100644
index 80f2445..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class ClusterStateMutator {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final SolrCloudManager dataProvider;
-  protected final DistribStateManager stateManager;
-
-  public ClusterStateMutator(SolrCloudManager dataProvider) {
-    this.dataProvider = dataProvider;
-    this.stateManager = dataProvider.getDistribStateManager();
-  }
-
-  public ZkWriteCommand createCollection(ClusterState clusterState, ZkNodeProps message) {
-    String cName = message.getStr(NAME);
-    log.debug("building a new cName: " + cName);
-    if (clusterState.hasCollection(cName)) {
-      log.warn("Collection {} already exists. exit", cName);
-      return ZkStateWriter.NO_OP;
-    }
-
-    Map<String, Object> routerSpec = DocRouter.getRouterSpec(message);
-    String routerName = routerSpec.get(NAME) == null ? DocRouter.DEFAULT_NAME : (String) routerSpec.get(NAME);
-    DocRouter router = DocRouter.getDocRouter(routerName);
-
-    Object messageShardsObj = message.get("shards");
-
-    Map<String, Slice> slices;
-    if (messageShardsObj instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore)
-      slices = Slice.loadAllFromMap((Map<String, Object>)messageShardsObj);
-    } else {
-      List<String> shardNames = new ArrayList<>();
-
-      if (router instanceof ImplicitDocRouter) {
-        getShardNames(shardNames, message.getStr("shards", DocRouter.DEFAULT_NAME));
-      } else {
-        int numShards = message.getInt(ZkStateReader.NUM_SHARDS_PROP, -1);
-        if (numShards < 1)
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "numShards is a required parameter for 'compositeId' router");
-        getShardNames(numShards, shardNames);
-      }
-      List<DocRouter.Range> ranges = router.partitionRange(shardNames.size(), router.fullRange());//maybe null
-
-      slices = new LinkedHashMap<>();
-      for (int i = 0; i < shardNames.size(); i++) {
-        String sliceName = shardNames.get(i);
-
-        Map<String, Object> sliceProps = new LinkedHashMap<>(1);
-        sliceProps.put(Slice.RANGE, ranges == null ? null : ranges.get(i));
-
-        slices.put(sliceName, new Slice(sliceName, null, sliceProps));
-      }
-    }
-
-    Map<String, Object> collectionProps = new HashMap<>();
-
-    for (Map.Entry<String, Object> e : OverseerCollectionMessageHandler.COLLECTION_PROPS_AND_DEFAULTS.entrySet()) {
-      Object val = message.get(e.getKey());
-      if (val == null) {
-        val = OverseerCollectionMessageHandler.COLLECTION_PROPS_AND_DEFAULTS.get(e.getKey());
-      }
-      if (val != null) collectionProps.put(e.getKey(), val);
-    }
-    collectionProps.put(DocCollection.DOC_ROUTER, routerSpec);
-
-    if (message.getStr("fromApi") == null) {
-      collectionProps.put("autoCreated", "true");
-    }
-
-    //TODO default to 2; but need to debug why BasicDistributedZk2Test fails early on
-    String znode = message.getInt(DocCollection.STATE_FORMAT, 1) == 1 ? null
-        : ZkStateReader.getCollectionPath(cName);
-
-    DocCollection newCollection = new DocCollection(cName,
-        slices, collectionProps, router, -1, znode);
-
-    return new ZkWriteCommand(cName, newCollection);
-  }
-
-  public ZkWriteCommand deleteCollection(ClusterState clusterState, ZkNodeProps message) {
-    final String collection = message.getStr(NAME);
-    if (!CollectionMutator.checkKeyExistence(message, NAME)) return ZkStateWriter.NO_OP;
-    DocCollection coll = clusterState.getCollectionOrNull(collection);
-    if (coll == null) return ZkStateWriter.NO_OP;
-
-    return new ZkWriteCommand(coll.getName(), null);
-  }
-
-  public static ClusterState newState(ClusterState state, String name, DocCollection collection) {
-    ClusterState newClusterState = null;
-    if (collection == null) {
-      newClusterState = state.copyWith(name, null);
-    } else {
-      newClusterState = state.copyWith(name, collection);
-    }
-    return newClusterState;
-  }
-
-  public static void getShardNames(Integer numShards, List<String> shardNames) {
-    if (numShards == null)
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "numShards" + " is a required param");
-    for (int i = 0; i < numShards; i++) {
-      final String sliceName = "shard" + (i + 1);
-      shardNames.add(sliceName);
-    }
-
-  }
-
-  public static void getShardNames(List<String> shardNames, String shards) {
-    if (shards == null)
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "shards" + " is a required param");
-    for (String s : shards.split(",")) {
-      if (s == null || s.trim().isEmpty()) continue;
-      shardNames.add(s.trim());
-    }
-    if (shardNames.isEmpty())
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "shards" + " is a required param");
-  }
-
-  /*
-       * Return an already assigned id or null if not assigned
-       */
-  public static String getAssignedId(final DocCollection collection, final String nodeName) {
-    Collection<Slice> slices = collection != null ? collection.getSlices() : null;
-    if (slices != null) {
-      for (Slice slice : slices) {
-        if (slice.getReplicasMap().get(nodeName) != null) {
-          return slice.getName();
-        }
-      }
-    }
-    return null;
-  }
-
-  public static String getAssignedCoreNodeName(DocCollection collection, String forNodeName, String forCoreName) {
-    Collection<Slice> slices = collection != null ? collection.getSlices() : null;
-    if (slices != null) {
-      for (Slice slice : slices) {
-        for (Replica replica : slice.getReplicas()) {
-          String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-          String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-
-          if (nodeName.equals(forNodeName) && core.equals(forCoreName)) {
-            return replica.getName();
-          }
-        }
-      }
-    }
-    return null;
-  }
-
-  public ZkWriteCommand migrateStateFormat(ClusterState clusterState, ZkNodeProps message) {
-    final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!CollectionMutator.checkKeyExistence(message, ZkStateReader.COLLECTION_PROP)) return ZkStateWriter.NO_OP;
-    DocCollection coll = clusterState.getCollectionOrNull(collection);
-    if (coll == null || coll.getStateFormat() == 2) return ZkStateWriter.NO_OP;
-
-    return new ZkWriteCommand(coll.getName(),
-        new DocCollection(coll.getName(), coll.getSlicesMap(), coll.getProperties(), coll.getRouter(), 0,
-            ZkStateReader.getCollectionPath(collection)));
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
deleted file mode 100644
index 88e18e2..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class CollectionMutator {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final SolrCloudManager cloudManager;
-  protected final DistribStateManager stateManager;
-
-  public CollectionMutator(SolrCloudManager cloudManager) {
-    this.cloudManager = cloudManager;
-    this.stateManager = cloudManager.getDistribStateManager();
-  }
-
-  public ZkWriteCommand createShard(final ClusterState clusterState, ZkNodeProps message) {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    String shardId = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Slice slice = collection.getSlice(shardId);
-    if (slice == null) {
-      Map<String, Replica> replicas = Collections.EMPTY_MAP;
-      Map<String, Object> sliceProps = new HashMap<>();
-      String shardRange = message.getStr(ZkStateReader.SHARD_RANGE_PROP);
-      String shardState = message.getStr(ZkStateReader.SHARD_STATE_PROP);
-      String shardParent = message.getStr(ZkStateReader.SHARD_PARENT_PROP);
-      String shardParentZkSession = message.getStr("shard_parent_zk_session");
-      String shardParentNode = message.getStr("shard_parent_node");
-      sliceProps.put(Slice.RANGE, shardRange);
-      sliceProps.put(ZkStateReader.STATE_PROP, shardState);
-      if (shardParent != null) {
-        sliceProps.put(Slice.PARENT, shardParent);
-      }
-      if (shardParentZkSession != null) {
-        sliceProps.put("shard_parent_zk_session", shardParentZkSession);
-      }
-      if (shardParentNode != null)  {
-        sliceProps.put("shard_parent_node", shardParentNode);
-      }
-      collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps));
-      return new ZkWriteCommand(collectionName, collection);
-    } else {
-      log.error("Unable to create Shard: " + shardId + " because it already exists in collection: " + collectionName);
-      return ZkStateWriter.NO_OP;
-    }
-  }
-
-  public ZkWriteCommand deleteShard(final ClusterState clusterState, ZkNodeProps message) {
-    final String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-
-    log.info("Removing collection: " + collection + " shard: " + sliceId + " from clusterstate");
-
-    DocCollection coll = clusterState.getCollection(collection);
-
-    Map<String, Slice> newSlices = new LinkedHashMap<>(coll.getSlicesMap());
-    newSlices.remove(sliceId);
-
-    DocCollection newCollection = coll.copyWithSlices(newSlices);
-    return new ZkWriteCommand(collection, newCollection);
-  }
-
-  public ZkWriteCommand modifyCollection(final ClusterState clusterState, ZkNodeProps message) {
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    DocCollection coll = clusterState.getCollection(message.getStr(COLLECTION_PROP));
-    Map<String, Object> m = coll.shallowCopy();
-    boolean hasAnyOps = false;
-    for (String prop : CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES) {
-      if (message.containsKey(prop)) {
-        hasAnyOps = true;
-        if (message.get(prop) == null)  {
-          m.remove(prop);
-        } else  {
-          m.put(prop, message.get(prop));
-        }
-        if (prop == REPLICATION_FACTOR) { //SOLR-11676 : keep NRT_REPLICAS and REPLICATION_FACTOR in sync
-          m.put(NRT_REPLICAS, message.get(REPLICATION_FACTOR));
-        }
-      }
-    }
-
-    if (!hasAnyOps) {
-      return ZkStateWriter.NO_OP;
-    }
-
-    return new ZkWriteCommand(coll.getName(),
-        new DocCollection(coll.getName(), coll.getSlicesMap(), m, coll.getRouter(), coll.getZNodeVersion(), coll.getZNode()));
-  }
-
-  public static DocCollection updateSlice(String collectionName, DocCollection collection, Slice slice) {
-    DocCollection newCollection = null;
-    Map<String, Slice> slices;
-
-    if (collection == null) {
-      //  when updateSlice is called on a collection that doesn't exist, it's currently when a core is publishing itself
-      // without explicitly creating a collection.  In this current case, we assume custom sharding with an "implicit" router.
-      slices = new LinkedHashMap<>(1);
-      slices.put(slice.getName(), slice);
-      Map<String, Object> props = new HashMap<>(1);
-      props.put(DocCollection.DOC_ROUTER, Utils.makeMap(NAME, ImplicitDocRouter.NAME));
-      newCollection = new DocCollection(collectionName, slices, props, new ImplicitDocRouter());
-    } else {
-      slices = new LinkedHashMap<>(collection.getSlicesMap()); // make a shallow copy
-      slices.put(slice.getName(), slice);
-      newCollection = collection.copyWithSlices(slices);
-    }
-
-    return newCollection;
-  }
-
-  static boolean checkCollectionKeyExistence(ZkNodeProps message) {
-    return checkKeyExistence(message, ZkStateReader.COLLECTION_PROP);
-  }
-
-  static boolean checkKeyExistence(ZkNodeProps message, String key) {
-    String value = message.getStr(key);
-    if (value == null || value.trim().length() == 0) {
-      log.error("Skipping invalid Overseer message because it has no " + key + " specified: " + message);
-      return false;
-    }
-    return true;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java
deleted file mode 100644
index e7aa7b9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class NodeMutator {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public List<ZkWriteCommand> downNode(ClusterState clusterState, ZkNodeProps message) {
-    List<ZkWriteCommand> zkWriteCommands = new ArrayList<>();
-    String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
-
-    log.debug("DownNode state invoked for node: " + nodeName);
-
-    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
-      String collection = entry.getKey();
-      DocCollection docCollection = entry.getValue();
-
-      Map<String,Slice> slicesCopy = new LinkedHashMap<>(docCollection.getSlicesMap());
-
-      boolean needToUpdateCollection = false;
-      for (Entry<String, Slice> sliceEntry : slicesCopy.entrySet()) {
-        Slice slice = sliceEntry.getValue();
-        Map<String, Replica> newReplicas = slice.getReplicasCopy();
-
-        Collection<Replica> replicas = slice.getReplicas();
-        for (Replica replica : replicas) {
-          String rNodeName = replica.getNodeName();
-          if (rNodeName == null) {
-            throw new RuntimeException("Replica without node name! " + replica);
-          }
-          if (rNodeName.equals(nodeName)) {
-            log.debug("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
-            Map<String, Object> props = replica.shallowCopy();
-            props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
-            Replica newReplica = new Replica(replica.getName(), props);
-            newReplicas.put(replica.getName(), newReplica);
-            needToUpdateCollection = true;
-          }
-        }
-
-        Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy());
-        slicesCopy.put(slice.getName(), newSlice);
-      }
-
-      if (needToUpdateCollection) {
-        zkWriteCommands.add(new ZkWriteCommand(collection, docCollection.copyWithSlices(slicesCopy)));
-      }
-    }
-
-    return zkWriteCommands;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/OverseerAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/OverseerAction.java b/solr/core/src/java/org/apache/solr/cloud/overseer/OverseerAction.java
deleted file mode 100644
index 3fefc8f..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/OverseerAction.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.util.Locale;
-
-/**
- * Enum of actions supported by the overseer only.
- *
- * There are other actions supported which are public and defined
- * in {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
- */
-public enum OverseerAction {
-  LEADER,
-  DELETECORE,
-  ADDROUTINGRULE,
-  REMOVEROUTINGRULE,
-  UPDATESHARDSTATE,
-  STATE,
-  QUIT,
-  DOWNNODE;
-
-  public static OverseerAction get(String p) {
-    if (p != null) {
-      try {
-        return OverseerAction.valueOf(p.toUpperCase(Locale.ROOT));
-      } catch (Exception ex) {
-      }
-    }
-    return null;
-  }
-
-  public boolean isEqual(String s) {
-    return s != null && toString().equals(s.toUpperCase(Locale.ROOT));
-  }
-
-  public String toLower() {
-    return toString().toLowerCase(Locale.ROOT);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
deleted file mode 100644
index 68a42b9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.CloudUtil;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.api.collections.Assign;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.cloud.api.collections.SplitShardCmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TestInjection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
-import static org.apache.solr.cloud.overseer.CollectionMutator.checkKeyExistence;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class ReplicaMutator {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final SolrCloudManager cloudManager;
-  protected final DistribStateManager stateManager;
-
-  public ReplicaMutator(SolrCloudManager cloudManager) {
-    this.cloudManager = cloudManager;
-    this.stateManager = cloudManager.getDistribStateManager();
-  }
-
-  protected Replica setProperty(Replica replica, String key, String value) {
-    assert key != null;
-    assert value != null;
-
-    if (StringUtils.equalsIgnoreCase(replica.getStr(key), value))
-      return replica; // already the value we're going to set
-
-    Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
-    replicaProps.put(key, value);
-    return new Replica(replica.getName(), replicaProps);
-  }
-
-  protected Replica unsetProperty(Replica replica, String key) {
-    assert key != null;
-
-    if (!replica.containsKey(key)) return replica;
-    Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
-    replicaProps.remove(key);
-    return new Replica(replica.getName(), replicaProps);
-  }
-
-  protected Replica setLeader(Replica replica) {
-    return setProperty(replica, ZkStateReader.LEADER_PROP, "true");
-  }
-
-  protected Replica unsetLeader(Replica replica) {
-    return unsetProperty(replica, ZkStateReader.LEADER_PROP);
-  }
-
-  protected Replica setState(Replica replica, String state) {
-    assert state != null;
-
-    return setProperty(replica, ZkStateReader.STATE_PROP, state);
-  }
-
-  public ZkWriteCommand addReplicaProperty(ClusterState clusterState, ZkNodeProps message) {
-    if (!checkKeyExistence(message, ZkStateReader.COLLECTION_PROP) ||
-        !checkKeyExistence(message, ZkStateReader.SHARD_ID_PROP) ||
-        !checkKeyExistence(message, ZkStateReader.REPLICA_PROP) ||
-        !checkKeyExistence(message, ZkStateReader.PROPERTY_PROP) ||
-        !checkKeyExistence(message, ZkStateReader.PROPERTY_VALUE_PROP)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Overseer ADDREPLICAPROP requires " +
-              ZkStateReader.COLLECTION_PROP + " and " + ZkStateReader.SHARD_ID_PROP + " and " +
-              ZkStateReader.REPLICA_PROP + " and " + ZkStateReader.PROPERTY_PROP + " and " +
-              ZkStateReader.PROPERTY_VALUE_PROP + " no action taken.");
-    }
-
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
-    String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
-      property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
-    }
-    property = property.toLowerCase(Locale.ROOT);
-    String propVal = message.getStr(ZkStateReader.PROPERTY_VALUE_PROP);
-    String shardUnique = message.getStr(OverseerCollectionMessageHandler.SHARD_UNIQUE);
-
-    boolean isUnique = false;
-
-    if (SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property)) {
-      if (StringUtils.isNotBlank(shardUnique) && Boolean.parseBoolean(shardUnique) == false) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Overseer ADDREPLICAPROP for " +
-            property + " cannot have " + OverseerCollectionMessageHandler.SHARD_UNIQUE + " set to anything other than" +
-            "'true'. No action taken");
-      }
-      isUnique = true;
-    } else {
-      isUnique = Boolean.parseBoolean(shardUnique);
-    }
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Replica replica = collection.getReplica(replicaName);
-
-    if (replica == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not find collection/slice/replica " +
-          collectionName + "/" + sliceName + "/" + replicaName + " no action taken.");
-    }
-    log.info("Setting property {} with value {} for collection {}", property, propVal, collectionName);
-    log.debug("Full message: {}", message);
-    if (StringUtils.equalsIgnoreCase(replica.getStr(property), propVal)) return ZkStateWriter.NO_OP; // already the value we're going to set
-
-    // OK, there's no way we won't change the cluster state now
-    Map<String,Replica> replicas = collection.getSlice(sliceName).getReplicasCopy();
-    if (isUnique == false) {
-      replicas.get(replicaName).getProperties().put(property, propVal);
-    } else { // Set prop for this replica, but remove it for all others.
-      for (Replica rep : replicas.values()) {
-        if (rep.getName().equalsIgnoreCase(replicaName)) {
-          rep.getProperties().put(property, propVal);
-        } else {
-          rep.getProperties().remove(property);
-        }
-      }
-    }
-    Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy());
-    DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection,
-        newSlice);
-    return new ZkWriteCommand(collectionName, newCollection);
-  }
-
-  public ZkWriteCommand deleteReplicaProperty(ClusterState clusterState, ZkNodeProps message) {
-    if (checkKeyExistence(message, ZkStateReader.COLLECTION_PROP) == false ||
-        checkKeyExistence(message, ZkStateReader.SHARD_ID_PROP) == false ||
-        checkKeyExistence(message, ZkStateReader.REPLICA_PROP) == false ||
-        checkKeyExistence(message, ZkStateReader.PROPERTY_PROP) == false) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Overseer DELETEREPLICAPROP requires " +
-              ZkStateReader.COLLECTION_PROP + " and " + ZkStateReader.SHARD_ID_PROP + " and " +
-              ZkStateReader.REPLICA_PROP + " and " + ZkStateReader.PROPERTY_PROP + " no action taken.");
-    }
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
-    String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
-      property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
-    }
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Replica replica = collection.getReplica(replicaName);
-
-    if (replica == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not find collection/slice/replica " +
-          collectionName + "/" + sliceName + "/" + replicaName + " no action taken.");
-    }
-
-    log.info("Deleting property {} for collection: {} slice: {} replica: {}", property, collectionName, sliceName, replicaName);
-    log.debug("Full message: {}", message);
-    String curProp = replica.getStr(property);
-    if (curProp == null) return ZkStateWriter.NO_OP; // not there anyway, nothing to do.
-
-    Slice slice = collection.getSlice(sliceName);
-    DocCollection newCollection = SliceMutator.updateReplica(collection,
-        slice, replicaName, unsetProperty(replica, property));
-    return new ZkWriteCommand(collectionName, newCollection);
-  }
-
-  public ZkWriteCommand setState(ClusterState clusterState, ZkNodeProps message) {
-    if (Overseer.isLegacy(cloudManager.getClusterStateProvider())) {
-      return updateState(clusterState, message);
-    } else {
-      return updateStateNew(clusterState, message);
-    }
-  }
-
-  protected ZkWriteCommand updateState(final ClusterState prevState, ZkNodeProps message) {
-    final String cName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    Integer numShards = message.getInt(ZkStateReader.NUM_SHARDS_PROP, null);
-    log.debug("Update state numShards={} message={}", numShards, message);
-
-    List<String> shardNames = new ArrayList<>();
-
-    ZkWriteCommand writeCommand = null;
-    ClusterState newState = null;
-
-    //collection does not yet exist, create placeholders if num shards is specified
-    boolean collectionExists = prevState.hasCollection(cName);
-    if (!collectionExists && numShards != null) {
-      ClusterStateMutator.getShardNames(numShards, shardNames);
-      Map<String, Object> createMsg = Utils.makeMap(NAME, cName);
-      createMsg.putAll(message.getProperties());
-      writeCommand = new ClusterStateMutator(cloudManager).createCollection(prevState, new ZkNodeProps(createMsg));
-      DocCollection collection = writeCommand.collection;
-      newState = ClusterStateMutator.newState(prevState, cName, collection);
-    }
-    return updateState(newState != null ? newState : prevState,
-        message, cName, numShards, collectionExists);
-  }
-
-  private ZkWriteCommand updateState(final ClusterState prevState, ZkNodeProps message, String collectionName, Integer numShards, boolean collectionExists) {
-    String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    String coreNodeName = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
-    boolean forceSetState = message.getBool(ZkStateReader.FORCE_SET_STATE_PROP, true);
-
-    DocCollection collection = prevState.getCollectionOrNull(collectionName);
-    if (!forceSetState && !CloudUtil.replicaExists(prevState, collectionName, sliceName, coreNodeName)) {
-      log.info("Failed to update state because the replica does not exist, {}", message);
-      return ZkStateWriter.NO_OP;
-    }
-
-    if (coreNodeName == null) {
-      coreNodeName = ClusterStateMutator.getAssignedCoreNodeName(collection,
-          message.getStr(ZkStateReader.NODE_NAME_PROP), message.getStr(ZkStateReader.CORE_NAME_PROP));
-      if (coreNodeName != null) {
-        log.debug("node=" + coreNodeName + " is already registered");
-      } else {
-        if (!forceSetState) {
-          log.info("Failed to update state because the replica does not exist, {}", message);
-          return ZkStateWriter.NO_OP;
-        }
-        // if coreNodeName is null, auto assign one
-        coreNodeName = Assign.assignCoreNodeName(stateManager, collection);
-      }
-      message.getProperties().put(ZkStateReader.CORE_NODE_NAME_PROP,
-          coreNodeName);
-    }
-
-    // use the provided non null shardId
-    if (sliceName == null) {
-      //get shardId from ClusterState
-      sliceName = ClusterStateMutator.getAssignedId(collection, coreNodeName);
-      if (sliceName != null) {
-        log.debug("shard=" + sliceName + " is already registered");
-      }
-    }
-    if (sliceName == null) {
-      //request new shardId
-      if (collectionExists) {
-        // use existing numShards
-        numShards = collection.getSlices().size();
-        log.debug("Collection already exists with " + ZkStateReader.NUM_SHARDS_PROP + "=" + numShards);
-      }
-      sliceName = Assign.assignShard(collection, numShards);
-      log.info("Assigning new node to shard shard=" + sliceName);
-    }
-
-    Slice slice = collection != null ?  collection.getSlice(sliceName) : null;
-
-    Map<String, Object> replicaProps = new LinkedHashMap<>(message.getProperties());
-    if (slice != null) {
-      Replica oldReplica = slice.getReplica(coreNodeName);
-      if (oldReplica != null) {
-        if (oldReplica.containsKey(ZkStateReader.LEADER_PROP)) {
-          replicaProps.put(ZkStateReader.LEADER_PROP, oldReplica.get(ZkStateReader.LEADER_PROP));
-        }
-        replicaProps.put(ZkStateReader.REPLICA_TYPE, oldReplica.getType().toString());
-        // Move custom props over.
-        for (Map.Entry<String, Object> ent : oldReplica.getProperties().entrySet()) {
-          if (ent.getKey().startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
-            replicaProps.put(ent.getKey(), ent.getValue());
-          }
-        }
-      }
-    }
-
-    // we don't put these in the clusterstate
-    replicaProps.remove(ZkStateReader.NUM_SHARDS_PROP);
-    replicaProps.remove(ZkStateReader.CORE_NODE_NAME_PROP);
-    replicaProps.remove(ZkStateReader.SHARD_ID_PROP);
-    replicaProps.remove(ZkStateReader.COLLECTION_PROP);
-    replicaProps.remove(Overseer.QUEUE_OPERATION);
-
-    // remove any props with null values
-    Set<Map.Entry<String, Object>> entrySet = replicaProps.entrySet();
-    List<String> removeKeys = new ArrayList<>();
-    for (Map.Entry<String, Object> entry : entrySet) {
-      if (entry.getValue() == null) {
-        removeKeys.add(entry.getKey());
-      }
-    }
-    for (String removeKey : removeKeys) {
-      replicaProps.remove(removeKey);
-    }
-    replicaProps.remove(ZkStateReader.CORE_NODE_NAME_PROP);
-    // remove shard specific properties
-    String shardRange = (String) replicaProps.remove(ZkStateReader.SHARD_RANGE_PROP);
-    String shardState = (String) replicaProps.remove(ZkStateReader.SHARD_STATE_PROP);
-    String shardParent = (String) replicaProps.remove(ZkStateReader.SHARD_PARENT_PROP);
-
-
-    Replica replica = new Replica(coreNodeName, replicaProps);
-    
-    log.debug("Will update state for replica: {}", replica);
-
-    Map<String, Object> sliceProps = null;
-    Map<String, Replica> replicas;
-
-    if (slice != null) {
-      collection = checkAndCompleteShardSplit(prevState, collection, coreNodeName, sliceName, replica);
-      // get the current slice again because it may have been updated due to checkAndCompleteShardSplit method
-      slice = collection.getSlice(sliceName);
-      sliceProps = slice.getProperties();
-      replicas = slice.getReplicasCopy();
-    } else {
-      replicas = new HashMap<>(1);
-      sliceProps = new HashMap<>();
-      sliceProps.put(Slice.RANGE, shardRange);
-      sliceProps.put(ZkStateReader.STATE_PROP, shardState);
-      sliceProps.put(Slice.PARENT, shardParent);
-    }
-    replicas.put(replica.getName(), replica);
-    slice = new Slice(sliceName, replicas, sliceProps);
-
-    DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection, slice);
-    log.debug("Collection is now: {}", newCollection);
-    return new ZkWriteCommand(collectionName, newCollection);
-  }
-
-  /**
-   * Handles non-legacy state updates
-   */
-  protected ZkWriteCommand updateStateNew(ClusterState clusterState, final ZkNodeProps message) {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
-
-    if (collectionName == null || sliceName == null) {
-      log.error("Invalid collection and slice {}", message);
-      return ZkStateWriter.NO_OP;
-    }
-    DocCollection collection = clusterState.getCollectionOrNull(collectionName);
-    Slice slice = collection != null ? collection.getSlice(sliceName) : null;
-    if (slice == null) {
-      log.error("No such slice exists {}", message);
-      return ZkStateWriter.NO_OP;
-    }
-
-    return updateState(clusterState, message);
-  }
-
-  private DocCollection checkAndCompleteShardSplit(ClusterState prevState, DocCollection collection, String coreNodeName, String sliceName, Replica replica) {
-    Slice slice = collection.getSlice(sliceName);
-    Map<String, Object> sliceProps = slice.getProperties();
-    if (slice.getState() == Slice.State.RECOVERY) {
-      log.info("Shard: {} is in recovery state", sliceName);
-      // is this replica active?
-      if (replica.getState() == Replica.State.ACTIVE) {
-        log.info("Shard: {} is in recovery state and coreNodeName: {} is active", sliceName, coreNodeName);
-        // are all other replicas also active?
-        boolean allActive = true;
-        for (Map.Entry<String, Replica> entry : slice.getReplicasMap().entrySet()) {
-          if (coreNodeName.equals(entry.getKey())) continue;
-          if (entry.getValue().getState() != Replica.State.ACTIVE) {
-            allActive = false;
-            break;
-          }
-        }
-        if (allActive) {
-          log.info("Shard: {} - all replicas are active. Finding status of fellow sub-shards", sliceName);
-          // find out about other sub shards
-          Map<String, Slice> allSlicesCopy = new HashMap<>(collection.getSlicesMap());
-          List<Slice> subShardSlices = new ArrayList<>();
-          outer:
-          for (Map.Entry<String, Slice> entry : allSlicesCopy.entrySet()) {
-            if (sliceName.equals(entry.getKey()))
-              continue;
-            Slice otherSlice = entry.getValue();
-            if (otherSlice.getState() == Slice.State.RECOVERY) {
-              if (slice.getParent() != null && slice.getParent().equals(otherSlice.getParent())) {
-                log.info("Shard: {} - Fellow sub-shard: {} found", sliceName, otherSlice.getName());
-                // this is a fellow sub shard so check if all replicas are active
-                for (Map.Entry<String, Replica> sliceEntry : otherSlice.getReplicasMap().entrySet()) {
-                  if (sliceEntry.getValue().getState() != Replica.State.ACTIVE) {
-                    allActive = false;
-                    break outer;
-                  }
-                }
-                log.info("Shard: {} - Fellow sub-shard: {} has all replicas active", sliceName, otherSlice.getName());
-                subShardSlices.add(otherSlice);
-              }
-            }
-          }
-          if (allActive) {
-            // hurray, all sub shard replicas are active
-            log.info("Shard: {} - All replicas across all fellow sub-shards are now ACTIVE.", sliceName);
-            String parentSliceName = (String) sliceProps.remove(Slice.PARENT);
-            // now lets see if the parent leader is still the same or else there's a chance of data loss
-            // see SOLR-9438 for details
-            String shardParentZkSession  = (String) sliceProps.remove("shard_parent_zk_session");
-            String shardParentNode = (String) sliceProps.remove("shard_parent_node");
-            boolean isLeaderSame = true;
-            if (shardParentNode != null && shardParentZkSession != null)  {
-              log.info("Checking whether sub-shard leader node is still the same one at {} with ZK session id {}", shardParentNode, shardParentZkSession);
-              try {
-                VersionedData leaderZnode = null;
-                try {
-                  leaderZnode = stateManager.getData(ZkStateReader.LIVE_NODES_ZKNODE
-                      + "/" + shardParentNode, null);
-                } catch (NoSuchElementException e) {
-                  // ignore
-                }
-                if (leaderZnode == null)  {
-                  log.error("The shard leader node: {} is not live anymore!", shardParentNode);
-                  isLeaderSame = false;
-                } else if (!shardParentZkSession.equals(leaderZnode.getOwner())) {
-                  log.error("The zk session id for shard leader node: {} has changed from {} to {}",
-                      shardParentNode, shardParentZkSession, leaderZnode.getOwner());
-                  isLeaderSame = false;
-                }
-              } catch (Exception e) {
-                log.warn("Error occurred while checking if parent shard node is still live with the same zk session id. " +
-                    "We cannot switch shard states at this time.", e);
-                return collection; // we aren't going to make any changes right now
-              }
-            }
-
-            Map<String, Object> propMap = new HashMap<>();
-            propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-            propMap.put(ZkStateReader.COLLECTION_PROP, collection.getName());
-            if (isLeaderSame) {
-              log.info("Sub-shard leader node is still the same one at {} with ZK session id {}. Preparing to switch shard states.", shardParentNode, shardParentZkSession);
-              propMap.put(parentSliceName, Slice.State.INACTIVE.toString());
-              propMap.put(sliceName, Slice.State.ACTIVE.toString());
-              long now = cloudManager.getTimeSource().getEpochTimeNs();
-              for (Slice subShardSlice : subShardSlices) {
-                propMap.put(subShardSlice.getName(), Slice.State.ACTIVE.toString());
-                String lastTimeStr = subShardSlice.getStr(ZkStateReader.STATE_TIMESTAMP_PROP);
-                if (lastTimeStr != null) {
-                  long start = Long.parseLong(lastTimeStr);
-                  log.info("TIMINGS: Sub-shard " + subShardSlice.getName() + " recovered in " +
-                      TimeUnit.MILLISECONDS.convert(now - start, TimeUnit.NANOSECONDS) + " ms");
-                } else {
-                  log.info("TIMINGS Sub-shard " + subShardSlice.getName() + " not available: " + subShardSlice);
-                }
-              }
-            } else  {
-              // we must mark the shard split as failed by switching sub-shards to recovery_failed state
-              propMap.put(sliceName, Slice.State.RECOVERY_FAILED.toString());
-              for (Slice subShardSlice : subShardSlices) {
-                propMap.put(subShardSlice.getName(), Slice.State.RECOVERY_FAILED.toString());
-              }
-            }
-            TestInjection.injectSplitLatch();
-            try {
-              SplitShardCmd.unlockForSplit(cloudManager, collection.getName(), parentSliceName);
-            } catch (Exception e) {
-              log.warn("Failed to unlock shard after " + (isLeaderSame ? "" : "un") + "successful split: {} / {}",
-                  collection.getName(), parentSliceName);
-            }
-            ZkNodeProps m = new ZkNodeProps(propMap);
-            return new SliceMutator(cloudManager).updateShardState(prevState, m).collection;
-          }
-        }
-      }
-    }
-    return collection;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
deleted file mode 100644
index c0a8a7b..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.ImmutableSet;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.api.collections.Assign;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.RoutingRule;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-public class SliceMutator {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String PREFERRED_LEADER_PROP = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + "preferredleader";
-
-  public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
-
-  protected final SolrCloudManager cloudManager;
-  protected final DistribStateManager stateManager;
-
-  public SliceMutator(SolrCloudManager cloudManager) {
-    this.cloudManager = cloudManager;
-    this.stateManager = cloudManager.getDistribStateManager();
-  }
-
-  public ZkWriteCommand addReplica(ClusterState clusterState, ZkNodeProps message) {
-    log.info("createReplica() {} ", message);
-    String coll = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    String slice = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    DocCollection collection = clusterState.getCollection(coll);
-    Slice sl = collection.getSlice(slice);
-    if (sl == null) {
-      log.error("Invalid Collection/Slice {}/{} ", coll, slice);
-      return ZkStateWriter.NO_OP;
-    }
-    String coreNodeName;
-    if (message.getStr(ZkStateReader.CORE_NODE_NAME_PROP) != null) {
-      coreNodeName = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
-    } else {
-      coreNodeName = Assign.assignCoreNodeName(stateManager, collection);
-    }
-    Replica replica = new Replica(coreNodeName,
-        makeMap(
-            ZkStateReader.CORE_NAME_PROP, message.getStr(ZkStateReader.CORE_NAME_PROP),
-            ZkStateReader.BASE_URL_PROP, message.getStr(ZkStateReader.BASE_URL_PROP),
-            ZkStateReader.STATE_PROP, message.getStr(ZkStateReader.STATE_PROP),
-            ZkStateReader.NODE_NAME_PROP, message.getStr(ZkStateReader.NODE_NAME_PROP), 
-            ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE)));
-    return new ZkWriteCommand(coll, updateReplica(collection, sl, replica.getName(), replica));
-  }
-
-  public ZkWriteCommand removeReplica(ClusterState clusterState, ZkNodeProps message) {
-    final String cnn = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
-    final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
-    final String baseUrl = message.getStr(ZkStateReader.BASE_URL_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-
-    DocCollection coll = clusterState.getCollectionOrNull(collection);
-    if (coll == null) {
-      // make sure we delete the zk nodes for this collection just to be safe
-      return new ZkWriteCommand(collection, null);
-    }
-
-    Map<String, Slice> newSlices = new LinkedHashMap<>();
-
-    for (Slice slice : coll.getSlices()) {
-      Replica replica = slice.getReplica(cnn);
-      if (replica != null && (baseUrl == null || baseUrl.equals(replica.getBaseUrl()))) {
-        Map<String, Replica> newReplicas = slice.getReplicasCopy();
-        newReplicas.remove(cnn);
-        slice = new Slice(slice.getName(), newReplicas, slice.getProperties());
-      }
-      newSlices.put(slice.getName(), slice);
-    }
-
-    return new ZkWriteCommand(collection, coll.copyWithSlices(newSlices));
-  }
-
-  public ZkWriteCommand setShardLeader(ClusterState clusterState, ZkNodeProps message) {
-    StringBuilder sb = new StringBuilder();
-    String baseUrl = message.getStr(ZkStateReader.BASE_URL_PROP);
-    String coreName = message.getStr(ZkStateReader.CORE_NAME_PROP);
-    sb.append(baseUrl);
-    if (baseUrl != null && !baseUrl.endsWith("/")) sb.append("/");
-    sb.append(coreName == null ? "" : coreName);
-    if (!(sb.substring(sb.length() - 1).equals("/"))) sb.append("/");
-    String leaderUrl = sb.length() > 0 ? sb.toString() : null;
-
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    DocCollection coll = clusterState.getCollectionOrNull(collectionName);
-
-    if (coll == null) {
-      log.error("Could not mark shard leader for non existing collection:" + collectionName);
-      return ZkStateWriter.NO_OP;
-    }
-
-    Map<String, Slice> slices = coll.getSlicesMap();
-    Slice slice = slices.get(sliceName);
-
-    Replica oldLeader = slice.getLeader();
-    final Map<String, Replica> newReplicas = new LinkedHashMap<>();
-    for (Replica replica : slice.getReplicas()) {
-      // TODO: this should only be calculated once and cached somewhere?
-      String coreURL = ZkCoreNodeProps.getCoreUrl(replica.getStr(ZkStateReader.BASE_URL_PROP), replica.getStr(ZkStateReader.CORE_NAME_PROP));
-
-      if (replica == oldLeader && !coreURL.equals(leaderUrl)) {
-        replica = new ReplicaMutator(cloudManager).unsetLeader(replica);
-      } else if (coreURL.equals(leaderUrl)) {
-        replica = new ReplicaMutator(cloudManager).setLeader(replica);
-      }
-
-      newReplicas.put(replica.getName(), replica);
-    }
-
-    Map<String, Object> newSliceProps = slice.shallowCopy();
-    newSliceProps.put(Slice.REPLICAS, newReplicas);
-    slice = new Slice(slice.getName(), newReplicas, slice.getProperties());
-    return new ZkWriteCommand(collectionName, CollectionMutator.updateSlice(collectionName, coll, slice));
-  }
-
-  public ZkWriteCommand updateShardState(ClusterState clusterState, ZkNodeProps message) {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    log.info("Update shard state invoked for collection: " + collectionName + " with message: " + message);
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Map<String, Slice> slicesCopy = new LinkedHashMap<>(collection.getSlicesMap());
-    for (String key : message.keySet()) {
-      if (ZkStateReader.COLLECTION_PROP.equals(key)) continue;
-      if (Overseer.QUEUE_OPERATION.equals(key)) continue;
-
-      Slice slice = collection.getSlice(key);
-      if (slice == null) {
-        throw new RuntimeException("Overseer.updateShardState unknown collection: " + collectionName + " slice: " + key);
-      }
-      log.info("Update shard state " + key + " to " + message.getStr(key));
-      Map<String, Object> props = slice.shallowCopy();
-      
-      if (Slice.State.getState(message.getStr(key)) == Slice.State.ACTIVE) {
-        props.remove(Slice.PARENT);
-        props.remove("shard_parent_node");
-        props.remove("shard_parent_zk_session");
-      }
-      props.put(ZkStateReader.STATE_PROP, message.getStr(key));
-      // we need to use epoch time so that it's comparable across Overseer restarts
-      props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
-      Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
-      slicesCopy.put(slice.getName(), newSlice);
-    }
-
-    return new ZkWriteCommand(collectionName, collection.copyWithSlices(slicesCopy));
-  }
-
-  public ZkWriteCommand addRoutingRule(final ClusterState clusterState, ZkNodeProps message) {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    String routeKey = message.getStr("routeKey");
-    String range = message.getStr("range");
-    String targetCollection = message.getStr("targetCollection");
-    String expireAt = message.getStr("expireAt");
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Slice slice = collection.getSlice(shard);
-    if (slice == null) {
-      throw new RuntimeException("Overseer.addRoutingRule unknown collection: " + collectionName + " slice:" + shard);
-    }
-
-    Map<String, RoutingRule> routingRules = slice.getRoutingRules();
-    if (routingRules == null)
-      routingRules = new HashMap<>();
-    RoutingRule r = routingRules.get(routeKey);
-    if (r == null) {
-      Map<String, Object> map = new HashMap<>();
-      map.put("routeRanges", range);
-      map.put("targetCollection", targetCollection);
-      map.put("expireAt", expireAt);
-      RoutingRule rule = new RoutingRule(routeKey, map);
-      routingRules.put(routeKey, rule);
-    } else {
-      // add this range
-      Map<String, Object> map = r.shallowCopy();
-      map.put("routeRanges", map.get("routeRanges") + "," + range);
-      map.put("expireAt", expireAt);
-      routingRules.put(routeKey, new RoutingRule(routeKey, map));
-    }
-
-    Map<String, Object> props = slice.shallowCopy();
-    props.put("routingRules", routingRules);
-
-    Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
-    return new ZkWriteCommand(collectionName,
-        CollectionMutator.updateSlice(collectionName, collection, newSlice));
-  }
-
-  public ZkWriteCommand removeRoutingRule(final ClusterState clusterState, ZkNodeProps message) {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP;
-    String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    String routeKeyStr = message.getStr("routeKey");
-
-    log.info("Overseer.removeRoutingRule invoked for collection: " + collectionName
-        + " shard: " + shard + " routeKey: " + routeKeyStr);
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-    Slice slice = collection.getSlice(shard);
-    if (slice == null) {
-      log.warn("Unknown collection: " + collectionName + " shard: " + shard);
-      return ZkStateWriter.NO_OP;
-    }
-    Map<String, RoutingRule> routingRules = slice.getRoutingRules();
-    if (routingRules != null) {
-      routingRules.remove(routeKeyStr); // no rules left
-      Map<String, Object> props = slice.shallowCopy();
-      props.put("routingRules", routingRules);
-      Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
-      return new ZkWriteCommand(collectionName,
-          CollectionMutator.updateSlice(collectionName, collection, newSlice));
-    }
-
-    return ZkStateWriter.NO_OP;
-  }
-
-  public static DocCollection updateReplica(DocCollection collection, final Slice slice, String coreNodeName, final Replica replica) {
-    Map<String, Replica> replicasCopy = slice.getReplicasCopy();
-    if (replica == null) {
-      replicasCopy.remove(coreNodeName);
-    } else {
-      replicasCopy.put(replica.getName(), replica);
-    }
-    Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties());
-    log.debug("Old Slice: {}", slice);
-    log.debug("New Slice: {}", newSlice);
-    return CollectionMutator.updateSlice(collection.getName(), collection, newSlice);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
deleted file mode 100644
index 0a5b2c1..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import com.codahale.metrics.Timer;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.Stats;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Collections.singletonMap;
-
-/**
- * ZkStateWriter is responsible for writing updates to the cluster state stored in ZooKeeper for
- * both stateFormat=1 collection (stored in shared /clusterstate.json in ZK) and stateFormat=2 collections
- * each of which get their own individual state.json in ZK.
- *
- * Updates to the cluster state are specified using the
- * {@link #enqueueUpdate(ClusterState, List, ZkWriteCallback)} method. The class buffers updates
- * to reduce the number of writes to ZK. The buffered updates are flushed during <code>enqueueUpdate</code>
- * automatically if necessary. The {@link #writePendingUpdates()} can be used to force flush any pending updates.
- *
- * If either {@link #enqueueUpdate(ClusterState, List, ZkWriteCallback)} or {@link #writePendingUpdates()}
- * throws a {@link org.apache.zookeeper.KeeperException.BadVersionException} then the internal buffered state of the
- * class is suspect and the current instance of the class should be discarded and a new instance should be created
- * and used for any future updates.
- */
-public class ZkStateWriter {
-  private static final long MAX_FLUSH_INTERVAL = TimeUnit.NANOSECONDS.convert(Overseer.STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS);
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * Represents a no-op {@link ZkWriteCommand} which will result in no modification to cluster state
-   */
-  public static ZkWriteCommand NO_OP = ZkWriteCommand.noop();
-
-  protected final ZkStateReader reader;
-  protected final Stats stats;
-
-  protected Map<String, DocCollection> updates = new HashMap<>();
-  private int numUpdates = 0;
-  protected ClusterState clusterState = null;
-  protected boolean isClusterStateModified = false;
-  protected long lastUpdatedTime = 0;
-
-  /**
-   * Set to true if we ever get a BadVersionException so that we can disallow future operations
-   * with this instance
-   */
-  protected boolean invalidState = false;
-
-  public ZkStateWriter(ZkStateReader zkStateReader, Stats stats) {
-    assert zkStateReader != null;
-
-    this.reader = zkStateReader;
-    this.stats = stats;
-    this.clusterState = zkStateReader.getClusterState();
-  }
-
-  /**
-   * Applies the given {@link ZkWriteCommand} on the <code>prevState</code>. The modified
-   * {@link ClusterState} is returned and it is expected that the caller will use the returned
-   * cluster state for the subsequent invocation of this method.
-   * <p>
-   * The modified state may be buffered or flushed to ZooKeeper depending on the internal buffering
-   * logic of this class. The {@link #hasPendingUpdates()} method may be used to determine if the
-   * last enqueue operation resulted in buffered state. The method {@link #writePendingUpdates()} can
-   * be used to force an immediate flush of pending cluster state changes.
-   *
-   * @param prevState the cluster state information on which the given <code>cmd</code> is applied
-   * @param cmds       the list of {@link ZkWriteCommand} which specifies the change to be applied to cluster state in atomic
-   * @param callback  a {@link org.apache.solr.cloud.overseer.ZkStateWriter.ZkWriteCallback} object to be used
-   *                  for any callbacks
-   * @return modified cluster state created after applying <code>cmd</code> to <code>prevState</code>. If
-   * <code>cmd</code> is a no-op ({@link #NO_OP}) then the <code>prevState</code> is returned unmodified.
-   * @throws IllegalStateException if the current instance is no longer usable. The current instance must be
-   *                               discarded.
-   * @throws Exception             on an error in ZK operations or callback. If a flush to ZooKeeper results
-   *                               in a {@link org.apache.zookeeper.KeeperException.BadVersionException} this instance becomes unusable and
-   *                               must be discarded
-   */
-  public ClusterState enqueueUpdate(ClusterState prevState, List<ZkWriteCommand> cmds, ZkWriteCallback callback) throws IllegalStateException, Exception {
-    if (invalidState) {
-      throw new IllegalStateException("ZkStateWriter has seen a tragic error, this instance can no longer be used");
-    }
-    if (cmds.isEmpty()) return prevState;
-    if (isNoOps(cmds)) return prevState;
-
-    for (ZkWriteCommand cmd : cmds) {
-      if (cmd == NO_OP) continue;
-      if (!isClusterStateModified && clusterStateGetModifiedWith(cmd, prevState)) {
-        isClusterStateModified = true;
-      }
-      prevState = prevState.copyWith(cmd.name, cmd.collection);
-      if (cmd.collection == null || cmd.collection.getStateFormat() != 1) {
-        updates.put(cmd.name, cmd.collection);
-        numUpdates++;
-      }
-    }
-    clusterState = prevState;
-
-    if (maybeFlushAfter()) {
-      ClusterState state = writePendingUpdates();
-      if (callback != null) {
-        callback.onWrite();
-      }
-      return state;
-    }
-
-    return clusterState;
-  }
-
-  private boolean isNoOps(List<ZkWriteCommand> cmds) {
-    for (ZkWriteCommand cmd : cmds) {
-      if (cmd != NO_OP) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Check whether {@value ZkStateReader#CLUSTER_STATE} (for stateFormat = 1) get changed given command
-   */
-  private boolean clusterStateGetModifiedWith(ZkWriteCommand command, ClusterState state) {
-    DocCollection previousCollection = state.getCollectionOrNull(command.name);
-    boolean wasPreviouslyStateFormat1 = previousCollection != null && previousCollection.getStateFormat() == 1;
-    boolean isCurrentlyStateFormat1 = command.collection != null && command.collection.getStateFormat() == 1;
-    return wasPreviouslyStateFormat1 || isCurrentlyStateFormat1;
-  }
-  /**
-   * Logic to decide a flush after processing a list of ZkWriteCommand
-   *
-   * @return true if a flush to ZK is required, false otherwise
-   */
-  private boolean maybeFlushAfter() {
-    return System.nanoTime() - lastUpdatedTime > MAX_FLUSH_INTERVAL || numUpdates > Overseer.STATE_UPDATE_BATCH_SIZE;
-  }
-
-  public boolean hasPendingUpdates() {
-    return numUpdates != 0 || isClusterStateModified;
-  }
-
-  /**
-   * Writes all pending updates to ZooKeeper and returns the modified cluster state
-   *
-   * @return the modified cluster state
-   * @throws IllegalStateException if the current instance is no longer usable and must be discarded
-   * @throws KeeperException       if any ZooKeeper operation results in an error
-   * @throws InterruptedException  if the current thread is interrupted
-   */
-  public ClusterState writePendingUpdates() throws IllegalStateException, KeeperException, InterruptedException {
-    if (invalidState) {
-      throw new IllegalStateException("ZkStateWriter has seen a tragic error, this instance can no longer be used");
-    }
-    if (!hasPendingUpdates()) return clusterState;
-    Timer.Context timerContext = stats.time("update_state");
-    boolean success = false;
-    try {
-      if (!updates.isEmpty()) {
-        for (Map.Entry<String, DocCollection> entry : updates.entrySet()) {
-          String name = entry.getKey();
-          String path = ZkStateReader.getCollectionPath(name);
-          DocCollection c = entry.getValue();
-
-          if (c == null) {
-            // let's clean up the state.json of this collection only, the rest should be clean by delete collection cmd
-            log.debug("going to delete state.json {}", path);
-            reader.getZkClient().clean(path);
-          } else if (c.getStateFormat() > 1) {
-            byte[] data = Utils.toJSON(singletonMap(c.getName(), c));
-            if (reader.getZkClient().exists(path, true)) {
-              log.debug("going to update_collection {} version: {}", path, c.getZNodeVersion());
-              Stat stat = reader.getZkClient().setData(path, data, c.getZNodeVersion(), true);
-              DocCollection newCollection = new DocCollection(name, c.getSlicesMap(), c.getProperties(), c.getRouter(), stat.getVersion(), path);
-              clusterState = clusterState.copyWith(name, newCollection);
-            } else {
-              log.debug("going to create_collection {}", path);
-              reader.getZkClient().create(path, data, CreateMode.PERSISTENT, true);
-              DocCollection newCollection = new DocCollection(name, c.getSlicesMap(), c.getProperties(), c.getRouter(), 0, path);
-              clusterState = clusterState.copyWith(name, newCollection);
-            }
-          } else if (c.getStateFormat() == 1) {
-            isClusterStateModified = true;
-          }
-        }
-
-        updates.clear();
-        numUpdates = 0;
-      }
-
-      if (isClusterStateModified) {
-        assert clusterState.getZkClusterStateVersion() >= 0;
-        byte[] data = Utils.toJSON(clusterState);
-        Stat stat = reader.getZkClient().setData(ZkStateReader.CLUSTER_STATE, data, clusterState.getZkClusterStateVersion(), true);
-        Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-        // use the reader's live nodes because our cluster state's live nodes may be stale
-        clusterState = new ClusterState(stat.getVersion(), reader.getClusterState().getLiveNodes(), collections);
-        isClusterStateModified = false;
-      }
-      lastUpdatedTime = System.nanoTime();
-      success = true;
-    } catch (KeeperException.BadVersionException bve) {
-      // this is a tragic error, we must disallow usage of this instance
-      invalidState = true;
-      throw bve;
-    } finally {
-      timerContext.stop();
-      if (success) {
-        stats.success("update_state");
-      } else {
-        stats.error("update_state");
-      }
-    }
-
-    log.trace("New Cluster State is: {}", clusterState);
-    return clusterState;
-  }
-
-  /**
-   * @return the most up-to-date cluster state until the last enqueueUpdate operation
-   */
-  public ClusterState getClusterState() {
-    return clusterState;
-  }
-
-  public interface ZkWriteCallback {
-    /**
-     * Called by ZkStateWriter if state is flushed to ZK
-     */
-    void onWrite() throws Exception;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/ZkWriteCommand.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkWriteCommand.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ZkWriteCommand.java
deleted file mode 100644
index d464863..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkWriteCommand.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.overseer;
-
-import org.apache.solr.common.cloud.DocCollection;
-
-public class ZkWriteCommand {
-  public final String name;
-  public final DocCollection collection;
-  public final boolean noop;
-
-  public ZkWriteCommand(String name, DocCollection collection) {
-    this.name = name;
-    this.collection = collection;
-    this.noop = false;
-  }
-
-  /**
-   * Returns a no-op
-   */
-  protected ZkWriteCommand() {
-    this.noop = true;
-    this.name = null;
-    this.collection = null;
-  }
-
-  public static ZkWriteCommand noop() {
-    return new ZkWriteCommand();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName() + ": " + (noop ? "no-op" : name + "=" + collection);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/overseer/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/package-info.java b/solr/core/src/java/org/apache/solr/cloud/overseer/package-info.java
deleted file mode 100644
index dbd3b1d6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Classes for updating cluster state in <a href="http://wiki.apache.org/solr/SolrCloud">SolrCloud</a> mode.
- */
-package org.apache.solr.cloud.overseer;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/package-info.java b/solr/core/src/java/org/apache/solr/cloud/package-info.java
deleted file mode 100644
index 096d6fa..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Classes for dealing with ZooKeeper when operating in <a href="http://wiki.apache.org/solr/SolrCloud">SolrCloud</a> mode.
- */
-package org.apache.solr.cloud;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java b/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
deleted file mode 100644
index a4e998d..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/rule/ImplicitSnitch.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.rule;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.admin.CoreAdminHandler;
-import org.apache.solr.request.SolrQueryRequest;
-
-import static org.apache.solr.common.cloud.rule.ImplicitSnitch.CORES;
-import static org.apache.solr.common.cloud.rule.ImplicitSnitch.DISK;
-import static org.apache.solr.common.cloud.rule.ImplicitSnitch.SYSPROP;
-
-//this is the server-side component which provides the tag values
-public class ImplicitSnitch implements CoreAdminHandler.Invocable {
-
-  static long getUsableSpaceInGB(Path path) throws IOException {
-    long space = Files.getFileStore(path).getUsableSpace();
-    long spaceInGB = space / 1024 / 1024 / 1024;
-    return spaceInGB;
-  }
-
-  @Override
-  public Map<String, Object> invoke(SolrQueryRequest req) {
-    Map<String, Object> result = new HashMap<>();
-    CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName());
-    if (req.getParams().getInt(CORES, -1) == 1) {
-      result.put(CORES, cc.getLoadedCoreNames().size());
-    }
-    if (req.getParams().getInt(DISK, -1) == 1) {
-      try {
-        final long spaceInGB = getUsableSpaceInGB(cc.getCoreRootDirectory());
-        result.put(DISK, spaceInGB);
-      } catch (IOException e) {
-
-      }
-    }
-    String[] sysProps = req.getParams().getParams(SYSPROP);
-    if (sysProps != null && sysProps.length > 0) {
-      for (String prop : sysProps) result.put(SYSPROP + prop, System.getProperty(prop));
-    }
-    return result;
-  }
-
-}


[40/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
deleted file mode 100644
index 59b7218..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.RoutingRule;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.update.SolrIndexSplitter;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public MigrateCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String sourceCollectionName = message.getStr("collection");
-    String splitKey = message.getStr("split.key");
-    String targetCollectionName = message.getStr("target.collection");
-    int timeout = message.getInt("forward.timeout", 10 * 60) * 1000;
-
-    DocCollection sourceCollection = clusterState.getCollection(sourceCollectionName);
-    if (sourceCollection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown source collection: " + sourceCollectionName);
-    }
-    DocCollection targetCollection = clusterState.getCollection(targetCollectionName);
-    if (targetCollection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown target collection: " + sourceCollectionName);
-    }
-    if (!(sourceCollection.getRouter() instanceof CompositeIdRouter)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source collection must use a compositeId router");
-    }
-    if (!(targetCollection.getRouter() instanceof CompositeIdRouter)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target collection must use a compositeId router");
-    }
-
-    if (splitKey == null || splitKey.trim().length() == 0)  {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key cannot be null or empty");
-    }
-
-    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
-    CompositeIdRouter targetRouter = (CompositeIdRouter) targetCollection.getRouter();
-    Collection<Slice> sourceSlices = sourceRouter.getSearchSlicesSingle(splitKey, null, sourceCollection);
-    if (sourceSlices.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "No active slices available in source collection: " + sourceCollection + "for given split.key: " + splitKey);
-    }
-    Collection<Slice> targetSlices = targetRouter.getSearchSlicesSingle(splitKey, null, targetCollection);
-    if (targetSlices.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "No active slices available in target collection: " + targetCollection + "for given split.key: " + splitKey);
-    }
-
-    String asyncId = null;
-    if (message.containsKey(ASYNC) && message.get(ASYNC) != null)
-      asyncId = message.getStr(ASYNC);
-
-    for (Slice sourceSlice : sourceSlices) {
-      for (Slice targetSlice : targetSlices) {
-        log.info("Migrating source shard: {} to target shard: {} for split.key = " + splitKey, sourceSlice, targetSlice);
-        migrateKey(clusterState, sourceCollection, sourceSlice, targetCollection, targetSlice, splitKey,
-            timeout, results, asyncId, message);
-      }
-    }
-  }
-
-  private void migrateKey(ClusterState clusterState, DocCollection sourceCollection, Slice sourceSlice,
-                          DocCollection targetCollection, Slice targetSlice,
-                          String splitKey, int timeout,
-                          NamedList results, String asyncId, ZkNodeProps message) throws Exception {
-    String tempSourceCollectionName = "split_" + sourceSlice.getName() + "_temp_" + targetSlice.getName();
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (clusterState.hasCollection(tempSourceCollectionName)) {
-      log.info("Deleting temporary collection: " + tempSourceCollectionName);
-      Map<String, Object> props = makeMap(
-          Overseer.QUEUE_OPERATION, DELETE.toLower(),
-          NAME, tempSourceCollectionName);
-
-      try {
-        ocmh.commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-        clusterState = zkStateReader.getClusterState();
-      } catch (Exception e) {
-        log.warn("Unable to clean up existing temporary collection: " + tempSourceCollectionName, e);
-      }
-    }
-
-    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
-    DocRouter.Range keyHashRange = sourceRouter.keyHashRange(splitKey);
-
-    ShardHandlerFactory shardHandlerFactory = ocmh.shardHandlerFactory;
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-    log.info("Hash range for split.key: {} is: {}", splitKey, keyHashRange);
-    // intersect source range, keyHashRange and target range
-    // this is the range that has to be split from source and transferred to target
-    DocRouter.Range splitRange = ocmh.intersect(targetSlice.getRange(), ocmh.intersect(sourceSlice.getRange(), keyHashRange));
-    if (splitRange == null) {
-      log.info("No common hashes between source shard: {} and target shard: {}", sourceSlice.getName(), targetSlice.getName());
-      return;
-    }
-    log.info("Common hash range between source shard: {} and target shard: {} = " + splitRange, sourceSlice.getName(), targetSlice.getName());
-
-    Replica targetLeader = zkStateReader.getLeaderRetry(targetCollection.getName(), targetSlice.getName(), 10000);
-    // For tracking async calls.
-    Map<String, String> requestMap = new HashMap<>();
-
-    log.info("Asking target leader node: " + targetLeader.getNodeName() + " core: "
-        + targetLeader.getStr("core") + " to buffer updates");
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTBUFFERUPDATES.toString());
-    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to buffer updates", asyncId, requestMap);
-
-    ZkNodeProps m = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, OverseerAction.ADDROUTINGRULE.toLower(),
-        COLLECTION_PROP, sourceCollection.getName(),
-        SHARD_ID_PROP, sourceSlice.getName(),
-        "routeKey", SolrIndexSplitter.getRouteKey(splitKey) + "!",
-        "range", splitRange.toString(),
-        "targetCollection", targetCollection.getName(),
-        "expireAt", RoutingRule.makeExpiryAt(timeout));
-    log.info("Adding routing rule: " + m);
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-    // wait for a while until we see the new rule
-    log.info("Waiting to see routing rule updated in clusterstate");
-    TimeOut waitUntil = new TimeOut(60, TimeUnit.SECONDS, timeSource);
-    boolean added = false;
-    while (!waitUntil.hasTimedOut()) {
-      waitUntil.sleep(100);
-      sourceCollection = zkStateReader.getClusterState().getCollection(sourceCollection.getName());
-      sourceSlice = sourceCollection.getSlice(sourceSlice.getName());
-      Map<String, RoutingRule> rules = sourceSlice.getRoutingRules();
-      if (rules != null) {
-        RoutingRule rule = rules.get(SolrIndexSplitter.getRouteKey(splitKey) + "!");
-        if (rule != null && rule.getRouteRanges().contains(splitRange)) {
-          added = true;
-          break;
-        }
-      }
-    }
-    if (!added) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not add routing rule: " + m);
-    }
-
-    log.info("Routing rule added successfully");
-
-    // Create temp core on source shard
-    Replica sourceLeader = zkStateReader.getLeaderRetry(sourceCollection.getName(), sourceSlice.getName(), 10000);
-
-    // create a temporary collection with just one node on the shard leader
-    String configName = zkStateReader.readConfigName(sourceCollection.getName());
-    Map<String, Object> props = makeMap(
-        Overseer.QUEUE_OPERATION, CREATE.toLower(),
-        NAME, tempSourceCollectionName,
-        NRT_REPLICAS, 1,
-        OverseerCollectionMessageHandler.NUM_SLICES, 1,
-        CollectionAdminParams.COLL_CONF, configName,
-        OverseerCollectionMessageHandler.CREATE_NODE_SET, sourceLeader.getNodeName());
-    if (asyncId != null) {
-      String internalAsyncId = asyncId + Math.abs(System.nanoTime());
-      props.put(ASYNC, internalAsyncId);
-    }
-
-    log.info("Creating temporary collection: " + props);
-    ocmh.commandMap.get(CREATE).call(clusterState, new ZkNodeProps(props), results);
-    // refresh cluster state
-    clusterState = zkStateReader.getClusterState();
-    Slice tempSourceSlice = clusterState.getCollection(tempSourceCollectionName).getSlices().iterator().next();
-    Replica tempSourceLeader = zkStateReader.getLeaderRetry(tempSourceCollectionName, tempSourceSlice.getName(), 120000);
-
-    String tempCollectionReplica1 = tempSourceLeader.getCoreName();
-    String coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
-        sourceLeader.getNodeName(), tempCollectionReplica1);
-    // wait for the replicas to be seen as active on temp source leader
-    log.info("Asking source leader to wait for: " + tempCollectionReplica1 + " to be alive on: " + sourceLeader.getNodeName());
-    CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-    cmd.setCoreName(tempCollectionReplica1);
-    cmd.setNodeName(sourceLeader.getNodeName());
-    cmd.setCoreNodeName(coreNodeName);
-    cmd.setState(Replica.State.ACTIVE);
-    cmd.setCheckLive(true);
-    cmd.setOnlyIfLeader(true);
-    // we don't want this to happen asynchronously
-    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), new ModifiableSolrParams(cmd.getParams()), shardHandler, null, null);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection leader" +
-        " or timed out waiting for it to come up", asyncId, requestMap);
-
-    log.info("Asking source leader to split index");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-    params.set(CoreAdminParams.CORE, sourceLeader.getStr("core"));
-    params.add(CoreAdminParams.TARGET_CORE, tempSourceLeader.getStr("core"));
-    params.set(CoreAdminParams.RANGES, splitRange.toString());
-    params.set("split.key", splitKey);
-
-    String tempNodeName = sourceLeader.getNodeName();
-
-    ocmh.sendShardRequest(tempNodeName, params, shardHandler, asyncId, requestMap);
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to invoke SPLIT core admin command", asyncId, requestMap);
-
-    log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
-        tempSourceCollectionName, targetLeader.getNodeName());
-    String tempCollectionReplica2 = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(),
-        zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
-    props = new HashMap<>();
-    props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-    props.put(COLLECTION_PROP, tempSourceCollectionName);
-    props.put(SHARD_ID_PROP, tempSourceSlice.getName());
-    props.put("node", targetLeader.getNodeName());
-    props.put(CoreAdminParams.NAME, tempCollectionReplica2);
-    // copy over property params:
-    for (String key : message.keySet()) {
-      if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
-        props.put(key, message.getStr(key));
-      }
-    }
-    // add async param
-    if (asyncId != null) {
-      props.put(ASYNC, asyncId);
-    }
-    ((AddReplicaCmd)ocmh.commandMap.get(ADDREPLICA)).addReplica(clusterState, new ZkNodeProps(props), results, null);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create replica of " +
-        "temporary collection in target leader node.", asyncId, requestMap);
-
-    coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
-        targetLeader.getNodeName(), tempCollectionReplica2);
-    // wait for the replicas to be seen as active on temp source leader
-    log.info("Asking temp source leader to wait for: " + tempCollectionReplica2 + " to be alive on: " + targetLeader.getNodeName());
-    cmd = new CoreAdminRequest.WaitForState();
-    cmd.setCoreName(tempSourceLeader.getStr("core"));
-    cmd.setNodeName(targetLeader.getNodeName());
-    cmd.setCoreNodeName(coreNodeName);
-    cmd.setState(Replica.State.ACTIVE);
-    cmd.setCheckLive(true);
-    cmd.setOnlyIfLeader(true);
-    params = new ModifiableSolrParams(cmd.getParams());
-
-    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection" +
-        " replica or timed out waiting for them to come up", asyncId, requestMap);
-
-    log.info("Successfully created replica of temp source collection on target leader node");
-
-    log.info("Requesting merge of temp source collection replica to target leader");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.MERGEINDEXES.toString());
-    params.set(CoreAdminParams.CORE, targetLeader.getStr("core"));
-    params.set(CoreAdminParams.SRC_CORE, tempCollectionReplica2);
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-    String msg = "MIGRATE failed to merge " + tempCollectionReplica2 + " to "
-        + targetLeader.getStr("core") + " on node: " + targetLeader.getNodeName();
-    ocmh.processResponses(results, shardHandler, true, msg, asyncId, requestMap);
-
-    log.info("Asking target leader to apply buffered updates");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to apply buffered updates",
-        asyncId, requestMap);
-
-    try {
-      log.info("Deleting temporary collection: " + tempSourceCollectionName);
-      props = makeMap(
-          Overseer.QUEUE_OPERATION, DELETE.toLower(),
-          NAME, tempSourceCollectionName);
-      ocmh.commandMap.get(DELETE). call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-    } catch (Exception e) {
-      log.error("Unable to delete temporary collection: " + tempSourceCollectionName
-          + ". Please remove it manually", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
deleted file mode 100644
index 6071b1b..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.cloud.ActiveReplicaWatcher;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class MoveReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public MoveReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    moveReplica(ocmh.zkStateReader.getClusterState(), message, results);
-  }
-
-  private void moveReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    log.debug("moveReplica() : {}", Utils.toJSONString(message));
-    ocmh.checkRequired(message, COLLECTION_PROP, CollectionParams.TARGET_NODE);
-    String collection = message.getStr(COLLECTION_PROP);
-    String targetNode = message.getStr(CollectionParams.TARGET_NODE);
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean inPlaceMove = message.getBool(IN_PLACE_MOVE, true);
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-
-    String async = message.getStr(ASYNC);
-
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (!clusterState.getLiveNodes().contains(targetNode)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target node: " + targetNode + " not in live nodes: " + clusterState.getLiveNodes());
-    }
-    Replica replica = null;
-    if (message.containsKey(REPLICA_PROP)) {
-      String replicaName = message.getStr(REPLICA_PROP);
-      replica = coll.getReplica(replicaName);
-      if (replica == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " replica: " + replicaName + " does not exist");
-      }
-    } else {
-      String sourceNode = message.getStr(CollectionParams.SOURCE_NODE, message.getStr(CollectionParams.FROM_NODE));
-      if (sourceNode == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + CollectionParams.SOURCE_NODE +
-            " or '" + CollectionParams.FROM_NODE + "' is a required param");
-      }
-      String shardId = message.getStr(SHARD_ID_PROP);
-      if (shardId == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + SHARD_ID_PROP + "' is a required param");
-      }
-      Slice slice = coll.getSlice(shardId);
-      List<Replica> sliceReplicas = new ArrayList<>(slice.getReplicas(r -> sourceNode.equals(r.getNodeName())));
-      if (sliceReplicas.isEmpty()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " node: " + sourceNode + " does not have any replica belonging to shard: " + shardId);
-      }
-      Collections.shuffle(sliceReplicas, OverseerCollectionMessageHandler.RANDOM);
-      replica = sliceReplicas.iterator().next();
-    }
-
-    if (coll.getStr(CollectionAdminParams.COLOCATED_WITH) != null) {
-      // we must ensure that moving this replica does not cause the co-location to break
-      String sourceNode = replica.getNodeName();
-      String colocatedCollectionName = coll.getStr(CollectionAdminParams.COLOCATED_WITH);
-      DocCollection colocatedCollection = clusterState.getCollectionOrNull(colocatedCollectionName);
-      if (colocatedCollection != null) {
-        if (colocatedCollection.getReplica((s, r) -> sourceNode.equals(r.getNodeName())) != null) {
-          // check if we have at least two replicas of the collection on the source node
-          // only then it is okay to move one out to another node
-          List<Replica> replicasOnSourceNode = coll.getReplicas(replica.getNodeName());
-          if (replicasOnSourceNode == null || replicasOnSourceNode.size() < 2) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Collection: " + collection + " is co-located with collection: " + colocatedCollectionName
-                    + " and has a single replica: " + replica.getName() + " on node: " + replica.getNodeName()
-                    + " so it is not possible to move it to another node");
-          }
-        }
-      }
-    }
-
-    log.info("Replica will be moved to node {}: {}", targetNode, replica);
-    Slice slice = null;
-    for (Slice s : coll.getSlices()) {
-      if (s.getReplicas().contains(replica)) {
-        slice = s;
-      }
-    }
-    assert slice != null;
-    Object dataDir = replica.get("dataDir");
-    boolean isSharedFS = replica.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && dataDir != null;
-
-    if (isSharedFS && inPlaceMove) {
-      log.debug("-- moveHdfsReplica");
-      moveHdfsReplica(clusterState, results, dataDir.toString(), targetNode, async, coll, replica, slice, timeout, waitForFinalState);
-    } else {
-      log.debug("-- moveNormalReplica (inPlaceMove=" + inPlaceMove + ", isSharedFS=" + isSharedFS);
-      moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice, timeout, waitForFinalState);
-    }
-  }
-
-  private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String targetNode, String async,
-                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
-    String skipCreateReplicaInClusterState = "true";
-    if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
-      skipCreateReplicaInClusterState = "false";
-      ZkNodeProps removeReplicasProps = new ZkNodeProps(
-          COLLECTION_PROP, coll.getName(),
-          SHARD_ID_PROP, slice.getName(),
-          REPLICA_PROP, replica.getName()
-      );
-      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
-      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
-      if (async != null) removeReplicasProps.getProperties().put(ASYNC, async);
-      NamedList deleteResult = new NamedList();
-      try {
-        ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-      } catch (SolrException e) {
-        // assume this failed completely so there's nothing to roll back
-        deleteResult.add("failure", e.toString());
-      }
-      if (deleteResult.get("failure") != null) {
-        String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
-            coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
-        log.warn(errorString);
-        results.add("failure", errorString);
-        return;
-      }
-
-      TimeOut timeOut = new TimeOut(20L, TimeUnit.SECONDS, timeSource);
-      while (!timeOut.hasTimedOut()) {
-        coll = ocmh.zkStateReader.getClusterState().getCollection(coll.getName());
-        if (coll.getReplica(replica.getName()) != null) {
-          timeOut.sleep(100);
-        } else {
-          break;
-        }
-      }
-      if (timeOut.hasTimedOut()) {
-        results.add("failure", "Still see deleted replica in clusterstate!");
-        return;
-      }
-
-    }
-
-    String ulogDir = replica.getStr(CoreAdminParams.ULOG_DIR);
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.CORE_NODE_NAME, replica.getName(),
-        CoreAdminParams.NAME, replica.getCoreName(),
-        WAIT_FOR_FINAL_STATE, String.valueOf(waitForFinalState),
-        SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, skipCreateReplicaInClusterState,
-        CoreAdminParams.ULOG_DIR, ulogDir.substring(0, ulogDir.lastIndexOf(UpdateLog.TLOG_NAME)),
-        CoreAdminParams.DATA_DIR, dataDir);
-    if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
-    NamedList addResult = new NamedList();
-    try {
-      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, addResult, null);
-    } catch (Exception e) {
-      // fatal error - try rolling back
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      results.add("failure", errorString);
-      log.warn("Error adding replica " + addReplicasProps + " - trying to roll back...", e);
-      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
-      NamedList rollback = new NamedList();
-      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
-      if (rollback.get("failure") != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent: " + rollback.get("failure"));
-      }
-      return;
-    }
-    if (addResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-      log.debug("--- trying to roll back...");
-      // try to roll back
-      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
-      NamedList rollback = new NamedList();
-      try {
-        ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent!", e);
-      }
-      if (rollback.get("failure") != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent! Failure: " + rollback.get("failure"));
-      }
-      return;
-    } else {
-      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), replica.getCoreName(), targetNode);
-      results.add("success", successString);
-    }
-  }
-
-  private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
-                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
-    String newCoreName = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, slice.getName(), replica.getType());
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.NAME, newCoreName);
-    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-    NamedList addResult = new NamedList();
-    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(1, ocmh);
-    ActiveReplicaWatcher watcher = null;
-    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null).get(0);
-    log.debug("props " + props);
-    if (replica.equals(slice.getLeader()) || waitForFinalState) {
-      watcher = new ActiveReplicaWatcher(coll.getName(), null, Collections.singletonList(newCoreName), countDownLatch);
-      log.debug("-- registered watcher " + watcher);
-      ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
-    }
-    if (addResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-      if (watcher != null) { // unregister
-        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
-      }
-      return;
-    }
-    // wait for the other replica to be active if the source replica was a leader
-    if (watcher != null) {
-      try {
-        log.debug("Waiting for leader's replica to recover.");
-        if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
-          String errorString = String.format(Locale.ROOT, "Timed out waiting for leader's replica to recover, collection=%s shard=%s" +
-              " on node=%s", coll.getName(), slice.getName(), targetNode);
-          log.warn(errorString);
-          results.add("failure", errorString);
-          return;
-        } else {
-          log.debug("Replica " + watcher.getActiveReplicas() + " is active - deleting the source...");
-        }
-      } finally {
-        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
-      }
-    }
-
-    ZkNodeProps removeReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        REPLICA_PROP, replica.getName());
-    if (async != null) removeReplicasProps.getProperties().put(ASYNC, async);
-    NamedList deleteResult = new NamedList();
-    try {
-      ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-    } catch (SolrException e) {
-      deleteResult.add("failure", e.toString());
-    }
-    if (deleteResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
-          coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-    } else {
-      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), newCoreName, targetNode);
-      results.add("success", successString);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
deleted file mode 100644
index a724bc7..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ /dev/null
@@ -1,1003 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.cloud.LockTree;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerMessageHandler;
-import org.apache.solr.cloud.OverseerNodePrioritizer;
-import org.apache.solr.cloud.OverseerSolrResponse;
-import org.apache.solr.cloud.OverseerTaskProcessor;
-import org.apache.solr.cloud.Stats;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrCloseable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
-import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
-import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-/**
- * A {@link OverseerMessageHandler} that handles Collections API related
- * overseer messages.
- */
-public class OverseerCollectionMessageHandler implements OverseerMessageHandler, SolrCloseable {
-
-  public static final String NUM_SLICES = "numShards";
-
-  public static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
-  public static final String CREATE_NODE_SET_SHUFFLE = CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
-  public static final String CREATE_NODE_SET_EMPTY = "EMPTY";
-  public static final String CREATE_NODE_SET = CollectionAdminParams.CREATE_NODE_SET_PARAM;
-
-  public static final String ROUTER = "router";
-
-  public static final String SHARDS_PROP = "shards";
-
-  public static final String REQUESTID = "requestid";
-
-  public static final String COLL_PROP_PREFIX = "property.";
-
-  public static final String ONLY_IF_DOWN = "onlyIfDown";
-
-  public static final String SHARD_UNIQUE = "shardUnique";
-
-  public static final String ONLY_ACTIVE_NODES = "onlyactivenodes";
-
-  static final String SKIP_CREATE_REPLICA_IN_CLUSTER_STATE = "skipCreateReplicaInClusterState";
-
-  public static final Map<String, Object> COLLECTION_PROPS_AND_DEFAULTS = Collections.unmodifiableMap(makeMap(
-      ROUTER, DocRouter.DEFAULT_NAME,
-      ZkStateReader.REPLICATION_FACTOR, "1",
-      ZkStateReader.NRT_REPLICAS, "1",
-      ZkStateReader.TLOG_REPLICAS, "0",
-      ZkStateReader.PULL_REPLICAS, "0",
-      ZkStateReader.MAX_SHARDS_PER_NODE, "1",
-      ZkStateReader.AUTO_ADD_REPLICAS, "false",
-      DocCollection.RULE, null,
-      POLICY, null,
-      SNITCH, null,
-      WITH_COLLECTION, null,
-      COLOCATED_WITH, null));
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  Overseer overseer;
-  ShardHandlerFactory shardHandlerFactory;
-  String adminPath;
-  ZkStateReader zkStateReader;
-  SolrCloudManager cloudManager;
-  String myId;
-  Stats stats;
-  TimeSource timeSource;
-
-  // Set that tracks collections that are currently being processed by a running task.
-  // This is used for handling mutual exclusion of the tasks.
-
-  final private LockTree lockTree = new LockTree();
-  ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
-      new SynchronousQueue<>(),
-      new DefaultSolrThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
-
-  protected static final Random RANDOM;
-  static {
-    // We try to make things reproducible in the context of our tests by initializing the random instance
-    // based on the current seed
-    String seed = System.getProperty("tests.seed");
-    if (seed == null) {
-      RANDOM = new Random();
-    } else {
-      RANDOM = new Random(seed.hashCode());
-    }
-  }
-
-  final Map<CollectionAction, Cmd> commandMap;
-
-  private volatile boolean isClosed;
-
-  public OverseerCollectionMessageHandler(ZkStateReader zkStateReader, String myId,
-                                        final ShardHandlerFactory shardHandlerFactory,
-                                        String adminPath,
-                                        Stats stats,
-                                        Overseer overseer,
-                                        OverseerNodePrioritizer overseerPrioritizer) {
-    this.zkStateReader = zkStateReader;
-    this.shardHandlerFactory = shardHandlerFactory;
-    this.adminPath = adminPath;
-    this.myId = myId;
-    this.stats = stats;
-    this.overseer = overseer;
-    this.cloudManager = overseer.getSolrCloudManager();
-    this.timeSource = cloudManager.getTimeSource();
-    this.isClosed = false;
-    commandMap = new ImmutableMap.Builder<CollectionAction, Cmd>()
-        .put(REPLACENODE, new ReplaceNodeCmd(this))
-        .put(DELETENODE, new DeleteNodeCmd(this))
-        .put(BACKUP, new BackupCmd(this))
-        .put(RESTORE, new RestoreCmd(this))
-        .put(CREATESNAPSHOT, new CreateSnapshotCmd(this))
-        .put(DELETESNAPSHOT, new DeleteSnapshotCmd(this))
-        .put(SPLITSHARD, new SplitShardCmd(this))
-        .put(ADDROLE, new OverseerRoleCmd(this, ADDROLE, overseerPrioritizer))
-        .put(REMOVEROLE, new OverseerRoleCmd(this, REMOVEROLE, overseerPrioritizer))
-        .put(MOCK_COLL_TASK, this::mockOperation)
-        .put(MOCK_SHARD_TASK, this::mockOperation)
-        .put(MOCK_REPLICA_TASK, this::mockOperation)
-        .put(MIGRATESTATEFORMAT, this::migrateStateFormat)
-        .put(CREATESHARD, new CreateShardCmd(this))
-        .put(MIGRATE, new MigrateCmd(this))
-        .put(CREATE, new CreateCollectionCmd(this))
-        .put(MODIFYCOLLECTION, this::modifyCollection)
-        .put(ADDREPLICAPROP, this::processReplicaAddPropertyCommand)
-        .put(DELETEREPLICAPROP, this::processReplicaDeletePropertyCommand)
-        .put(BALANCESHARDUNIQUE, this::balanceProperty)
-        .put(REBALANCELEADERS, this::processRebalanceLeaders)
-        .put(RELOAD, this::reloadCollection)
-        .put(DELETE, new DeleteCollectionCmd(this))
-        .put(CREATEALIAS, new CreateAliasCmd(this))
-        .put(DELETEALIAS, new DeleteAliasCmd(this))
-        .put(ALIASPROP, new SetAliasPropCmd(this))
-        .put(MAINTAINROUTEDALIAS, new MaintainRoutedAliasCmd(this))
-        .put(OVERSEERSTATUS, new OverseerStatusCmd(this))
-        .put(DELETESHARD, new DeleteShardCmd(this))
-        .put(DELETEREPLICA, new DeleteReplicaCmd(this))
-        .put(ADDREPLICA, new AddReplicaCmd(this))
-        .put(MOVEREPLICA, new MoveReplicaCmd(this))
-        .put(UTILIZENODE, new UtilizeNodeCmd(this))
-        .build()
-    ;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public SolrResponse processMessage(ZkNodeProps message, String operation) {
-    MDCLoggingContext.setCollection(message.getStr(COLLECTION));
-    MDCLoggingContext.setShard(message.getStr(SHARD_ID_PROP));
-    MDCLoggingContext.setReplica(message.getStr(REPLICA_PROP));
-    log.debug("OverseerCollectionMessageHandler.processMessage : {} , {}", operation, message);
-
-    NamedList results = new NamedList();
-    try {
-      CollectionAction action = getCollectionAction(operation);
-      Cmd command = commandMap.get(action);
-      if (command != null) {
-        command.call(cloudManager.getClusterStateProvider().getClusterState(), message, results);
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
-            + operation);
-      }
-    } catch (Exception e) {
-      String collName = message.getStr("collection");
-      if (collName == null) collName = message.getStr(NAME);
-
-      if (collName == null) {
-        SolrException.log(log, "Operation " + operation + " failed", e);
-      } else  {
-        SolrException.log(log, "Collection: " + collName + " operation: " + operation
-            + " failed", e);
-      }
-
-      results.add("Operation " + operation + " caused exception:", e);
-      SimpleOrderedMap nl = new SimpleOrderedMap();
-      nl.add("msg", e.getMessage());
-      nl.add("rspCode", e instanceof SolrException ? ((SolrException)e).code() : -1);
-      results.add("exception", nl);
-    }
-    return new OverseerSolrResponse(results);
-  }
-
-  @SuppressForbidden(reason = "Needs currentTimeMillis for mock requests")
-  private void mockOperation(ClusterState state, ZkNodeProps message, NamedList results) throws InterruptedException {
-    //only for test purposes
-    Thread.sleep(message.getInt("sleep", 1));
-    log.info("MOCK_TASK_EXECUTED time {} data {}", System.currentTimeMillis(), Utils.toJSONString(message));
-    results.add("MOCK_FINISHED", System.currentTimeMillis());
-  }
-
-  private CollectionAction getCollectionAction(String operation) {
-    CollectionAction action = CollectionAction.get(operation);
-    if (action == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
-    }
-    return action;
-  }
-
-  private void reloadCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.RELOAD.toString());
-
-    String asyncId = message.getStr(ASYNC);
-    Map<String, String> requestMap = null;
-    if (asyncId != null) {
-      requestMap = new HashMap<>();
-    }
-    collectionCmd(message, params, results, Replica.State.ACTIVE, asyncId, requestMap);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processRebalanceLeaders(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
-        CORE_NODE_NAME_PROP, BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
-    params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
-    params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
-    params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
-    params.set(CORE_NODE_NAME_PROP, message.getStr(CORE_NODE_NAME_PROP));
-    params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
-    params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
-
-    String baseUrl = message.getStr(BASE_URL_PROP);
-    ShardRequest sreq = new ShardRequest();
-    sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
-    // yes, they must use same admin handler path everywhere...
-    params.set("qt", adminPath);
-    sreq.purpose = ShardRequest.PURPOSE_PRIVATE;
-    sreq.shards = new String[] {baseUrl};
-    sreq.actualShards = sreq.shards;
-    sreq.params = params;
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    shardHandler.submit(sreq, baseUrl, sreq.params);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processReplicaAddPropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP);
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICAPROP.toLower());
-    propMap.putAll(message.getProperties());
-    ZkNodeProps m = new ZkNodeProps(propMap);
-    inQueue.offer(Utils.toJSON(m));
-  }
-
-  private void processReplicaDeletePropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP);
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, DELETEREPLICAPROP.toLower());
-    propMap.putAll(message.getProperties());
-    ZkNodeProps m = new ZkNodeProps(propMap);
-    inQueue.offer(Utils.toJSON(m));
-  }
-
-  private void balanceProperty(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) || StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "The '" + COLLECTION_PROP + "' and '" + PROPERTY_PROP +
-              "' parameters are required for the BALANCESHARDUNIQUE operation, no action taken");
-    }
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, BALANCESHARDUNIQUE.toLower());
-    propMap.putAll(message.getProperties());
-    inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-  }
-
-  /**
-   * Get collection status from cluster state.
-   * Can return collection status by given shard name.
-   *
-   *
-   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
-   * @param name  collection name
-   * @param requestedShards a set of shards to be returned in the status.
-   *                        An empty or null values indicates <b>all</b> shards.
-   * @return map of collection properties
-   */
-  @SuppressWarnings("unchecked")
-  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
-    if (collection == null)  {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
-    }
-    if (requestedShards == null || requestedShards.isEmpty()) {
-      return collection;
-    } else {
-      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
-      Map<String, Object>  selected = new HashMap<>();
-      for (String selectedShard : requestedShards) {
-        if (!shards.containsKey(selectedShard)) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
-        }
-        selected.put(selectedShard, shards.get(selectedShard));
-        collection.put("shards", selected);
-      }
-      return collection;
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws Exception {
-    ((DeleteReplicaCmd) commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, message, results, onComplete);
-
-  }
-
-  boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
-    TimeOut timeout = new TimeOut(timeoutms, TimeUnit.MILLISECONDS, timeSource);
-    while (! timeout.hasTimedOut()) {
-      timeout.sleep(100);
-      DocCollection docCollection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (docCollection == null) { // someone already deleted the collection
-        return true;
-      }
-      Slice slice = docCollection.getSlice(shard);
-      if(slice == null || slice.getReplica(replicaName) == null) {
-        return true;
-      }
-    }
-    // replica still exists after the timeout
-    return false;
-  }
-
-  void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws Exception {
-    ZkNodeProps m = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
-        ZkStateReader.CORE_NAME_PROP, core,
-        ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
-        ZkStateReader.COLLECTION_PROP, collectionName,
-        ZkStateReader.CORE_NODE_NAME_PROP, replicaName,
-        ZkStateReader.BASE_URL_PROP, replica.getStr(ZkStateReader.BASE_URL_PROP));
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-  }
-
-  void checkRequired(ZkNodeProps message, String... props) {
-    for (String prop : props) {
-      if(message.get(prop) == null){
-        throw new SolrException(ErrorCode.BAD_REQUEST, StrUtils.join(Arrays.asList(props),',') +" are required params" );
-      }
-    }
-
-  }
-
-  //TODO should we not remove in the next release ?
-  private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    final String collectionName = message.getStr(COLLECTION_PROP);
-
-    boolean firstLoop = true;
-    // wait for a while until the state format changes
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    while (! timeout.hasTimedOut()) {
-      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (collection == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " not found");
-      }
-      if (collection.getStateFormat() == 2) {
-        // Done.
-        results.add("success", new SimpleOrderedMap<>());
-        return;
-      }
-
-      if (firstLoop) {
-        // Actually queue the migration command.
-        firstLoop = false;
-        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, MIGRATESTATEFORMAT.toLower(), COLLECTION_PROP, collectionName);
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-      }
-      timeout.sleep(100);
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not migrate state format for collection: " + collectionName);
-  }
-
-  void commit(NamedList results, String slice, Replica parentShardLeader) {
-    log.debug("Calling soft commit to make sub shard updates visible");
-    String coreUrl = new ZkCoreNodeProps(parentShardLeader).getCoreUrl();
-    // HttpShardHandler is hard coded to send a QueryRequest hence we go direct
-    // and we force open a searcher so that we have documents to show upon switching states
-    UpdateResponse updateResponse = null;
-    try {
-      updateResponse = softCommit(coreUrl);
-      processResponse(results, null, coreUrl, updateResponse, slice, Collections.emptySet());
-    } catch (Exception e) {
-      processResponse(results, e, coreUrl, updateResponse, slice, Collections.emptySet());
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to call distrib softCommit on: " + coreUrl, e);
-    }
-  }
-
-
-  static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
-
-    try (HttpSolrClient client = new HttpSolrClient.Builder(url)
-        .withConnectionTimeout(30000)
-        .withSocketTimeout(120000)
-        .build()) {
-      UpdateRequest ureq = new UpdateRequest();
-      ureq.setParams(new ModifiableSolrParams());
-      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
-      return ureq.process(client);
-    }
-  }
-
-  String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
-      if (docCollection != null && docCollection.getSlicesMap() != null) {
-        Map<String,Slice> slicesMap = docCollection.getSlicesMap();
-        for (Slice slice : slicesMap.values()) {
-          for (Replica replica : slice.getReplicas()) {
-            // TODO: for really large clusters, we could 'index' on this
-
-            String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-
-            if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
-              return replica.getName();
-            }
-          }
-        }
-      }
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
-  }
-
-  void waitForNewShard(String collectionName, String sliceName) throws KeeperException, InterruptedException {
-    log.debug("Waiting for slice {} of collection {} to be available", sliceName, collectionName);
-    RTimer timer = new RTimer();
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (collection == null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Unable to find collection: " + collectionName + " in clusterstate");
-      }
-      Slice slice = collection.getSlice(sliceName);
-      if (slice != null) {
-        log.debug("Waited for {}ms for slice {} of collection {} to be available",
-            timer.getTime(), sliceName, collectionName);
-        return;
-      }
-      Thread.sleep(1000);
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR,
-        "Could not find new slice " + sliceName + " in collection " + collectionName
-            + " even after waiting for " + timer.getTime() + "ms"
-    );
-  }
-
-  DocRouter.Range intersect(DocRouter.Range a, DocRouter.Range b) {
-    if (a == null || b == null || !a.overlaps(b)) {
-      return null;
-    } else if (a.isSubsetOf(b))
-      return a;
-    else if (b.isSubsetOf(a))
-      return b;
-    else if (b.includes(a.max)) {
-      return new DocRouter.Range(b.min, a.max);
-    } else  {
-      return new DocRouter.Range(a.min, b.max);
-    }
-  }
-
-  void sendShardRequest(String nodeName, ModifiableSolrParams params,
-                        ShardHandler shardHandler, String asyncId,
-                        Map<String, String> requestMap) {
-    sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap, adminPath, zkStateReader);
-
-  }
-
-  public static void sendShardRequest(String nodeName, ModifiableSolrParams params, ShardHandler shardHandler,
-                                      String asyncId, Map<String, String> requestMap, String adminPath,
-                                      ZkStateReader zkStateReader) {
-    if (asyncId != null) {
-      String coreAdminAsyncId = asyncId + Math.abs(System.nanoTime());
-      params.set(ASYNC, coreAdminAsyncId);
-      requestMap.put(nodeName, coreAdminAsyncId);
-    }
-
-    ShardRequest sreq = new ShardRequest();
-    params.set("qt", adminPath);
-    sreq.purpose = 1;
-    String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-    sreq.shards = new String[]{replica};
-    sreq.actualShards = sreq.shards;
-    sreq.nodeName = nodeName;
-    sreq.params = params;
-
-    shardHandler.submit(sreq, replica, sreq.params);
-  }
-
-  void addPropertyParams(ZkNodeProps message, ModifiableSolrParams params) {
-    // Now add the property.key=value pairs
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        params.set(key, message.getStr(key));
-      }
-    }
-  }
-
-  void addPropertyParams(ZkNodeProps message, Map<String, Object> map) {
-    // Now add the property.key=value pairs
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        map.put(key, message.getStr(key));
-      }
-    }
-  }
-
-
-  private void modifyCollection(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    
-    final String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    //the rest of the processing is based on writing cluster state properties
-    //remove the property here to avoid any errors down the pipeline due to this property appearing
-    String configName = (String) message.getProperties().remove(CollectionAdminParams.COLL_CONF);
-    
-    if(configName != null) {
-      validateConfigOrThrowSolrException(configName);
-      
-      boolean isLegacyCloud =  Overseer.isLegacy(zkStateReader);
-      createConfNode(cloudManager.getDistribStateManager(), configName, collectionName, isLegacyCloud);
-      reloadCollection(null, new ZkNodeProps(NAME, collectionName), results);
-    }
-    
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    boolean areChangesVisible = true;
-    while (!timeout.hasTimedOut()) {
-      DocCollection collection = cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName);
-      areChangesVisible = true;
-      for (Map.Entry<String,Object> updateEntry : message.getProperties().entrySet()) {
-        String updateKey = updateEntry.getKey();
-
-        if (!updateKey.equals(ZkStateReader.COLLECTION_PROP)
-            && !updateKey.equals(Overseer.QUEUE_OPERATION)
-            && updateEntry.getValue() != null // handled below in a separate conditional
-            && !collection.get(updateKey).equals(updateEntry.getValue())){
-          areChangesVisible = false;
-          break;
-        }
-
-        if (updateEntry.getValue() == null && collection.containsKey(updateKey)) {
-          areChangesVisible = false;
-          break;
-        }
-      }
-      if (areChangesVisible) break;
-      timeout.sleep(100);
-    }
-
-    if (!areChangesVisible)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not modify collection " + message);
-  }
-
-  void cleanupCollection(String collectionName, NamedList results) throws Exception {
-    log.error("Cleaning up collection [" + collectionName + "]." );
-    Map<String, Object> props = makeMap(
-        Overseer.QUEUE_OPERATION, DELETE.toLower(),
-        NAME, collectionName);
-    commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-  }
-
-  Map<String, Replica> waitToSeeReplicasInState(String collectionName, Collection<String> coreNames) throws InterruptedException {
-    Map<String, Replica> result = new HashMap<>();
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    while (true) {
-      DocCollection coll = zkStateReader.getClusterState().getCollection(collectionName);
-      for (String coreName : coreNames) {
-        if (result.containsKey(coreName)) continue;
-        for (Slice slice : coll.getSlices()) {
-          for (Replica replica : slice.getReplicas()) {
-            if (coreName.equals(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
-              result.put(coreName, replica);
-              break;
-            }
-          }
-        }
-      }
-      
-      if (result.size() == coreNames.size()) {
-        return result;
-      } else {
-        log.debug("Expecting {} cores but found {}", coreNames, result);
-      }
-      if (timeout.hasTimedOut()) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out waiting to see all replicas: " + coreNames + " in cluster state. Last state: " + coll);
-      }
-      
-      Thread.sleep(100);
-    }
-  }
-
-  List<ZkNodeProps> addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws Exception {
-
-    return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);
-  }
-
-  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
-                        String asyncId, Map<String, String> requestMap) {
-    processResponses(results, shardHandler, abortOnError, msgOnError, asyncId, requestMap, Collections.emptySet());
-  }
-
-  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
-                                String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
-    //Processes all shard responses
-    ShardResponse srsp;
-    do {
-      srsp = shardHandler.takeCompletedOrError();
-      if (srsp != null) {
-        processResponse(results, srsp, okayExceptions);
-        Throwable exception = srsp.getException();
-        if (abortOnError && exception != null)  {
-          // drain pending requests
-          while (srsp != null)  {
-            srsp = shardHandler.takeCompletedOrError();
-          }
-          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, exception);
-        }
-      }
-    } while (srsp != null);
-
-    //If request is async wait for the core admin to complete before returning
-    if (asyncId != null) {
-      waitForAsyncCallsToComplete(requestMap, results);
-      requestMap.clear();
-    }
-  }
-
-
-  void validateConfigOrThrowSolrException(String configName) throws IOException, KeeperException, InterruptedException {
-    boolean isValid = cloudManager.getDistribStateManager().hasData(ZkConfigManager.CONFIGS_ZKNODE + "/" + configName);
-    if(!isValid) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Can not find the specified config set: " + configName);
-    }
-  }
-
-  /**
-   * This doesn't validate the config (path) itself and is just responsible for creating the confNode.
-   * That check should be done before the config node is created.
-   */
-  public static void createConfNode(DistribStateManager stateManager, String configName, String coll, boolean isLegacyCloud) throws IOException, AlreadyExistsException, BadVersionException, KeeperException, InterruptedException {
-    
-    if (configName != null) {
-      String collDir = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll;
-      log.debug("creating collections conf node {} ", collDir);
-      byte[] data = Utils.toJSON(makeMap(ZkController.CONFIGNAME_PROP, configName));
-      if (stateManager.hasData(collDir)) {
-        stateManager.setData(collDir, data, -1);
-      } else {
-        stateManager.makePath(collDir, data, CreateMode.PERSISTENT, false);
-      }
-    } else {
-      if(isLegacyCloud){
-        log.warn("Could not obtain config name");
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST,"Unable to get config name");
-      }
-    }
-  }
-  
-  private List<Replica> collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
-                             NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap) {
-    return collectionCmd( message, params, results, stateMatcher, asyncId, requestMap, Collections.emptySet());
-  }
-
-  /**
-   * Send request to all replicas of a collection
-   * @return List of replicas which is not live for receiving the request
-   */
-  List<Replica> collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
-                     NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
-    log.info("Executing Collection Cmd={}, asyncId={}", params, asyncId);
-    String collectionName = message.getStr(NAME);
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-    DocCollection coll = clusterState.getCollection(collectionName);
-    List<Replica> notLivesReplicas = new ArrayList<>();
-    for (Slice slice : coll.getSlices()) {
-      notLivesReplicas.addAll(sliceCmd(clusterState, params, stateMatcher, slice, shardHandler, asyncId, requestMap));
-    }
-
-    processResponses(results, shardHandler, false, null, asyncId, requestMap, okayExceptions);
-    return notLivesReplicas;
-  }
-
-  /**
-   * Send request to all replicas of a slice
-   * @return List of replicas which is not live for receiving the request
-   */
-  List<Replica> sliceCmd(ClusterState clusterState, ModifiableSolrParams params, Replica.State stateMatcher,
-                Slice slice, ShardHandler shardHandler, String asyncId, Map<String, String> requestMap) {
-    List<Replica> notLiveReplicas = new ArrayList<>();
-    for (Replica replica : slice.getReplicas()) {
-      if ((stateMatcher == null || Replica.State.getState(replica.getStr(ZkStateReader.STATE_PROP)) == stateMatcher)) {
-        if (clusterState.liveNodesContain(replica.getStr(ZkStateReader.NODE_NAME_PROP))) {
-          // For thread safety, only simple clone the ModifiableSolrParams
-          ModifiableSolrParams cloneParams = new ModifiableSolrParams();
-          cloneParams.add(params);
-          cloneParams.set(CoreAdminParams.CORE, replica.getStr(ZkStateReader.CORE_NAME_PROP));
-
-          sendShardRequest(replica.getStr(ZkStateReader.NODE_NAME_PROP), cloneParams, shardHandler, asyncId, requestMap);
-        } else {
-          notLiveReplicas.add(replica);
-        }
-      }
-    }
-    return notLiveReplicas;
-  }
-  
-  private void processResponse(NamedList results, ShardResponse srsp, Set<String> okayExceptions) {
-    Throwable e = srsp.getException();
-    String nodeName = srsp.getNodeName();
-    SolrResponse solrResponse = srsp.getSolrResponse();
-    String shard = srsp.getShard();
-
-    processResponse(results, e, nodeName, solrResponse, shard, okayExceptions);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processResponse(NamedList results, Throwable e, String nodeName, SolrResponse solrResponse, String shard, Set<String> okayExceptions) {
-    String rootThrowable = null;
-    if (e instanceof RemoteSolrException) {
-      rootThrowable = ((RemoteSolrException) e).getRootThrowable();
-    }
-
-    if (e != null && (rootThrowable == null || !okayExceptions.contains(rootThrowable))) {
-      log.error("Error from shard: " + shard, e);
-
-      SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-      if (failure == null) {
-        failure = new SimpleOrderedMap();
-        results.add("failure", failure);
-      }
-
-      failure.add(nodeName, e.getClass().getName() + ":" + e.getMessage());
-
-    } else {
-
-      SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-      if (success == null) {
-        success = new SimpleOrderedMap();
-        results.add("success", success);
-      }
-
-      success.add(nodeName, solrResponse.getResponse());
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private void waitForAsyncCallsToComplete(Map<String, String> requestMap, NamedList results) {
-    for (String k:requestMap.keySet()) {
-      log.debug("I am Waiting for :{}/{}", k, requestMap.get(k));
-      results.add(requestMap.get(k), waitForCoreAdminAsyncCallToComplete(k, requestMap.get(k)));
-    }
-  }
-
-  private NamedList waitForCoreAdminAsyncCallToComplete(String nodeName, String requestId) {
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTSTATUS.toString());
-    params.set(CoreAdminParams.REQUESTID, requestId);
-    int counter = 0;
-    ShardRequest sreq;
-    do {
-      sreq = new ShardRequest();
-      params.set("qt", adminPath);
-      sreq.purpose = 1;
-      String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-      sreq.shards = new String[] {replica};
-      sreq.actualShards = sreq.shards;
-      sreq.params = params;
-
-      shardHandler.submit(sreq, replica, sreq.params);
-
-      ShardResponse srsp;
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          NamedList results = new NamedList();
-          processResponse(results, srsp, Collections.emptySet());
-          if (srsp.getSolrResponse().getResponse() == null) {
-            NamedList response = new NamedList();
-            response.add("STATUS", "failed");
-            return response;
-          }
-          
-          String r = (String) srsp.getSolrResponse().getResponse().get("STATUS");
-          if (r.equals("running")) {
-            log.debug("The task is still RUNNING, continuing to wait.");
-            try {
-              Thread.sleep(1000);
-            } catch (InterruptedException e) {
-              Thread.currentThread().interrupt();
-            }
-            continue;
-
-          } else if (r.equals("completed")) {
-            log.debug("The task is COMPLETED, returning");
-            return srsp.getSolrResponse().getResponse();
-          } else if (r.equals("failed")) {
-            // TODO: Improve this. Get more information.
-            log.debug("The task is FAILED, returning");
-            return srsp.getSolrResponse().getResponse();
-          } else if (r.equals("notfound")) {
-            log.debug("The task is notfound, retry");
-            if (counter++ < 5) {
-              try {
-                Thread.sleep(1000);
-              } catch (InterruptedException e) {
-              }
-              break;
-            }
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request for requestId: " + requestId + "" + srsp.getSolrResponse().getResponse().get("STATUS") +
-                "retried " + counter + "times");
-          } else {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request " + srsp.getSolrResponse().getResponse().get("STATUS"));
-          }
-        }
-      } while (srsp != null);
-    } while(true);
-  }
-
-  @Override
-  public String getName() {
-    return "Overseer Collection Message Handler";
-  }
-
-  @Override
-  public String getTimerName(String operation) {
-    return "collection_" + operation;
-  }
-
-  @Override
-  public String getTaskKey(ZkNodeProps message) {
-    return message.containsKey(COLLECTION_PROP) ?
-      message.getStr(COLLECTION_PROP) : message.getStr(NAME);
-  }
-
-
-  private long sessionId = -1;
-  private LockTree.Session lockSession;
-
-  @Override
-  public Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch) {
-    if (lockSession == null || sessionId != taskBatch.getId()) {
-      //this is always called in the same thread.
-      //Each batch is supposed to have a new taskBatch
-      //So if taskBatch changes we must create a new Session
-      // also check if the running tasks are empty. If yes, clear lockTree
-      // this will ensure that locks are not 'leaked'
-      if(taskBatch.getRunningTasks() == 0) lockTree.clear();
-      lockSession = lockTree.getSession();
-    }
-    return lockSession.lock(getCollectionAction(message.getStr(Overseer.QUEUE_OPERATION)),
-        Arrays.asList(
-            getTaskKey(message),
-            message.getStr(ZkStateReader.SHARD_ID_PROP),
-            message.getStr(ZkStateReader.REPLICA_PROP))
-
-    );
-  }
-
-
-  @Override
-  public void close() throws IOException {
-    this.isClosed = true;
-    if (tpe != null) {
-      if (!tpe.isShutdown()) {
-        ExecutorUtil.shutdownAndAwaitTermination(tpe);
-      }
-    }
-  }
-
-  @Override
-  public boolean isClosed() {
-    return isClosed;
-  }
-
-  protected interface Cmd {
-    void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
deleted file mode 100644
index 16f9327..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.cloud.OverseerNodePrioritizer;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
-
-public class OverseerRoleCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-  private final CollectionAction operation;
-  private final OverseerNodePrioritizer overseerPrioritizer;
-
-
-
-  public OverseerRoleCmd(OverseerCollectionMessageHandler ocmh, CollectionAction operation, OverseerNodePrioritizer prioritizer) {
-    this.ocmh = ocmh;
-    this.operation = operation;
-    this.overseerPrioritizer = prioritizer;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    Map roles = null;
-    String node = message.getStr("node");
-
-    String roleName = message.getStr("role");
-    boolean nodeExists = false;
-    if (nodeExists = zkClient.exists(ZkStateReader.ROLES, true)) {
-      roles = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.ROLES, null, new Stat(), true));
-    } else {
-      roles = new LinkedHashMap(1);
-    }
-
-    List nodeList = (List) roles.get(roleName);
-    if (nodeList == null) roles.put(roleName, nodeList = new ArrayList());
-    if (ADDROLE == operation) {
-      log.info("Overseer role added to {}", node);
-      if (!nodeList.contains(node)) nodeList.add(node);
-    } else if (REMOVEROLE == operation) {
-      log.info("Overseer role removed from {}", node);
-      nodeList.remove(node);
-    }
-
-    if (nodeExists) {
-      zkClient.setData(ZkStateReader.ROLES, Utils.toJSON(roles), true);
-    } else {
-      zkClient.create(ZkStateReader.ROLES, Utils.toJSON(roles), CreateMode.PERSISTENT, true);
-    }
-    //if there are too many nodes this command may time out. And most likely dedicated
-    // overseers are created when there are too many nodes  . So , do this operation in a separate thread
-    new Thread(() -> {
-      try {
-        overseerPrioritizer.prioritizeOverseerNodes(ocmh.myId);
-      } catch (Exception e) {
-        log.error("Error in prioritizing Overseer", e);
-      }
-
-    }).start();
-
-  }
-
-}


[07/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
deleted file mode 100644
index bac5a4c..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
+++ /dev/null
@@ -1,1129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.LongSummaryStatistics;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.stream.Collectors;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.shingle.ShingleFilter;
-import org.apache.lucene.analysis.shingle.ShingleFilterFactory;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.CharsRefBuilder;
-
-import org.apache.solr.analysis.TokenizerChain;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.util.SolrPluginUtils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * A component that can be used in isolation, or in conjunction with {@link QueryComponent} to identify 
- * &amp; score "phrases" found in the input string, based on shingles in indexed fields.
- *
- * <p>
- * The most common way to use this component is in conjunction with field that use 
- * {@link ShingleFilterFactory} on both the <code>index</code> and <code>query</code> analyzers.  
- * An example field type configuration would be something like this...
- * </p>
- * <pre class="prettyprint">
- * &lt;fieldType name="phrases" class="solr.TextField" positionIncrementGap="100"&gt;
- *   &lt;analyzer type="index"&gt;
- *     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
- *     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
- *     &lt;filter class="solr.ShingleFilterFactory" minShingleSize="2" maxShingleSize="3" outputUnigrams="true"/&gt;
- *   &lt;/analyzer&gt;
- *   &lt;analyzer type="query"&gt;
- *     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
- *     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
- *     &lt;filter class="solr.ShingleFilterFactory" minShingleSize="2" maxShingleSize="7" outputUnigramsIfNoShingles="true" outputUnigrams="true"/&gt;
- *   &lt;/analyzer&gt;
- * &lt;/fieldType&gt;
- * </pre>
- * <p>
- * ...where the <code>query</code> analyzer's <code>maxShingleSize="7"</code> determines the maximum 
- * possible phrase length that can be hueristically deduced, the <code>index</code> analyzer's 
- * <code>maxShingleSize="3"</code> determines the accuracy of phrases identified.  The large the 
- * indexed <code>maxShingleSize</code> the higher the accuracy.  Both analyzers must include 
- * <code>minShingleSize="2" outputUnigrams="true"</code>.
- * </p>
- * <p>
- * With a field type like this, one or more fields can be specified (with weights) via a 
- * <code>phrases.fields</code> param to request that this component identify possible phrases in the 
- * input <code>q</code> param, or an alternative <code>phrases.q</code> override param.  The identified
- * phrases will include their scores relative each field specified, as well an overal weighted score based
- * on the field weights provided by the client.  Higher score values indicate a greater confidence in the 
- * Phrase.
- * </p>
- * 
- * <p>
- * <b>NOTE:</b> In a distributed request, this component uses a single phase (piggy backing on the 
- * {@link ShardRequest#PURPOSE_GET_TOP_IDS} generated by {@link QueryComponent} if it is in use) to 
- * collect all field &amp; shingle stats.  No "refinement" requests are used.
- * </p>
- *
- * @lucene.experimental
- */
-public class PhrasesIdentificationComponent extends SearchComponent {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /** The only shard purpose that will cause this component to do work &amp; return data during shard req */
-  public static final int SHARD_PURPOSE = ShardRequest.PURPOSE_GET_TOP_IDS;
-  
-  /** Name, also used as a request param to identify whether the user query concerns this component */
-  public static final String COMPONENT_NAME = "phrases";
-
-  // TODO: ideally these should live in a commons.params class?
-  public static final String PHRASE_INPUT = "phrases.q";
-  public static final String PHRASE_FIELDS = "phrases.fields";
-  public static final String PHRASE_ANALYSIS_FIELD = "phrases.analysis.field";
-  public static final String PHRASE_SUMMARY_PRE = "phrases.pre";
-  public static final String PHRASE_SUMMARY_POST = "phrases.post";
-  public static final String PHRASE_INDEX_MAXLEN = "phrases.maxlength.index";
-  public static final String PHRASE_QUERY_MAXLEN = "phrases.maxlength.query";
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    final SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) {
-      return;
-    }
-    if (params.getBool(ShardParams.IS_SHARD, false)) {
-      // only one stage/purpose where we should do any work on a shard
-      if (0 == (SHARD_PURPOSE & params.getInt(ShardParams.SHARDS_PURPOSE, 0))) {
-        return;
-      }
-    }
-
-    // if we're still here, then we should parse & validate our input, 
-    // putting it in the request context so our process method knows it should do work
-    rb.req.getContext().put(this.getClass(), PhrasesContextData.parseAndValidateRequest(rb.req));
-  }
-
-  @Override
-  public int distributedProcess(ResponseBuilder rb) {
-    final PhrasesContextData contextData = (PhrasesContextData) rb.req.getContext().get(this.getClass());
-    if (null == contextData) {
-      // if prepare didn't give us anything to work with, then we should do nothing
-      return ResponseBuilder.STAGE_DONE;
-    }
-
-    if (rb.stage < ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      return ResponseBuilder.STAGE_EXECUTE_QUERY;
-  
-    } else if (rb.stage == ResponseBuilder.STAGE_EXECUTE_QUERY) {
-      // if we're being used in conjunction with QueryComponent, it should have already created
-      // (in this staged) the only ShardRequest we need...
-      for (ShardRequest sreq : rb.outgoing) {
-        if (0 != (SHARD_PURPOSE & sreq.purpose) ) {
-          return ResponseBuilder.STAGE_GET_FIELDS;
-        }
-      }
-      // ...if we can't find it, then evidently we're being used in isolation,
-      // and we need to create our own ShardRequest...
-      ShardRequest sreq = new ShardRequest();
-      sreq.purpose = SHARD_PURPOSE;
-      sreq.params = new ModifiableSolrParams(rb.req.getParams());
-      sreq.params.remove(ShardParams.SHARDS);
-      rb.addRequest(this, sreq);
-      return ResponseBuilder.STAGE_GET_FIELDS;
-      
-    } else if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
-      // NOTE: we don't do any actual work in this stage, but we need to ensure that even if
-      // we are being used in isolation w/o QueryComponent that SearchHandler "tracks" a STAGE_GET_FIELDS
-      // so that finishStage(STAGE_GET_FIELDS) is called on us and we can add our merged results
-      // (w/o needing extra code paths for merging phrase results when QueryComponent is/is not used)
-      return ResponseBuilder.STAGE_DONE;
-    }
-
-    return ResponseBuilder.STAGE_DONE;
-  }
-  
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    // NOTE: we don't do this after STAGE_EXECUTE_QUERY because if we're also being used with
-    // QueryComponent, we don't want to add our results to the response until *after*
-    // QueryComponent adds the main DocList
-    
-    final PhrasesContextData contextData = (PhrasesContextData) rb.req.getContext().get(this.getClass());
-    if (null == contextData || rb.stage != ResponseBuilder.STAGE_GET_FIELDS) {
-      // if prepare didn't give us anything to work with, or this isn't our stage, then do nothing
-      return;
-    }
-      
-    // sanity check: the shard requests we use/piggy-back on should only hapen once per shard,
-    // but let's future proof ourselves against the possibility that some shards might get/respond
-    // to the same request "purpose" multiple times...
-    final BitSet shardsHandled = new BitSet(rb.shards.length);
-    
-    // Collect Shard responses
-    for (ShardRequest sreq : rb.finished) {
-      if (0 != (sreq.purpose & SHARD_PURPOSE)) {
-        for (ShardResponse shardRsp : sreq.responses) {
-          final int shardNum = rb.getShardNum(shardRsp.getShard());
-          if (! shardsHandled.get(shardNum)) {
-            shardsHandled.set(shardNum);
-            // shards.tolerant=true can cause nulls on exceptions/errors
-            // if we don't get phrases/stats from a shard, just ignore that shard
-            final SolrResponse rsp = shardRsp.getSolrResponse();
-            if (null == rsp) continue;
-            final NamedList<Object> top = rsp.getResponse();
-            if (null == top) continue;
-            final NamedList<Object> phrasesWrapper = (NamedList<Object>) top.get("phrases");
-            if (null == phrasesWrapper) continue;
-            final List<NamedList<Object>> shardPhrases = (List<NamedList<Object>>) phrasesWrapper.get("_all");
-            if (null == shardPhrases) continue;
-            
-            Phrase.populateStats(contextData.allPhrases, shardPhrases);
-          }
-        }
-      }
-    }
-    scoreAndAddResultsToResponse(rb, contextData);
-  }
-
-  
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-    final PhrasesContextData contextData = (PhrasesContextData) rb.req.getContext().get(this.getClass());
-    if (null == contextData) {
-      // if prepare didn't give us anything to work with, then we should do nothing
-      return;
-    }
-
-    // regardless of single node / shard, we need local stats...
-    Phrase.populateStats(contextData.allPhrases, contextData.fieldWeights.keySet(), rb.req.getSearcher());
-
-    if ( rb.req.getParams().getBool(ShardParams.IS_SHARD, false) ) {
-      // shard request, return stats for all phrases (in original order)
-      SimpleOrderedMap<Object> output = new SimpleOrderedMap<>();
-      output.add("_all", Phrase.formatShardResponse(contextData.allPhrases));
-      // TODO: might want to add numDocs() & getSumTotalTermFreq(f)/getDocCount(f) stats from each field...
-      // so that we can sum/merge them for use in scoring?
-      rb.rsp.add("phrases", output);
-    } else {
-      // full single node request...
-      scoreAndAddResultsToResponse(rb, contextData);
-    }
-  }
-
-  /** 
-   * Helper method (suitable for both single node &amp; distributed coordinator node) to 
-   * score, sort, and format the end user response once all phrases have been populated with stats.
-   */
-  private void scoreAndAddResultsToResponse(final ResponseBuilder rb, final PhrasesContextData contextData) {
-    assert null != contextData : "Should not be called if no phrase data to use";
-    if (null == contextData) {
-      // if prepare didn't give us anything to work with, then we should do nothing
-      return;
-    }
-    
-    SimpleOrderedMap<Object> output = new SimpleOrderedMap<>();
-    rb.rsp.add("phrases", output);
-    output.add("input", contextData.rawInput);
-
-    if (0 == contextData.allPhrases.size()) {
-      // w/o any phrases, the summary is just the input again...
-      output.add("summary", contextData.rawInput);
-      output.add("details", Collections.<Object>emptyList());
-      return;
-    }
-    
-    Phrase.populateScores(contextData);
-    final int maxPosition = contextData.allPhrases.get(contextData.allPhrases.size()-1).getPositionEnd();
-    
-    final List<Phrase> validScoringPhrasesSorted = contextData.allPhrases.stream()
-      // TODO: ideally this cut off of "0.0" should be a request option...
-      // so users can tune how aggresive/conservative they want to be in finding phrases
-      // but for that to be useful, we need:
-      //  - more hard & fast documentation about the "range" of scores that may be returned
-      //  - "useful" scores for single words
-      .filter(p -> 0.0D < p.getTotalScore())
-      .sorted(Comparator.comparing((p -> p.getTotalScore()), Collections.reverseOrder()))
-      .collect(Collectors.toList());
-
-    // we want to return only high scoring phrases that don't overlap w/higher scoring phrase
-    final BitSet positionsCovered = new BitSet(maxPosition+1);
-    final List<Phrase> results = new ArrayList<>(maxPosition);
-    for (Phrase phrase : validScoringPhrasesSorted) {
-      final BitSet phrasePositions = phrase.getPositionsBitSet();
-      
-      if (! phrasePositions.intersects(positionsCovered)) {
-        // we can use this phrase, record it...
-        positionsCovered.or(phrasePositions);
-        results.add(phrase);
-      } // else: overlaps higher scoring position(s), skip this phrase
-      
-      if (positionsCovered.cardinality() == maxPosition+1) {
-        // all positions are covered, so we can bail out and skip the rest
-        break;
-      }
-    }
-    
-    // a "quick summary" of the suggested parsing
-    output.add("summary", contextData.summarize(results));
-    // useful user level info on every (high scoring) phrase found (in current, descending score, order)
-    output.add("details", results.stream()
-               .map(p -> p.getDetails()).collect(Collectors.toList()));
-  }
-  
-  @Override
-  public String getDescription() {
-    return "Phrases Identification Component";
-  }
-
-  /** 
-   * Simple container for all request options and data this component needs to store in the Request Context 
-   * @lucene.internal
-   */
-  public static final class PhrasesContextData {
-
-    public final String rawInput;
-    public final int maxIndexedPositionLength; 
-    public final int maxQueryPositionLength; 
-    public final Map<String,Double> fieldWeights;
-    public final SchemaField analysisField;
-    public final List<Phrase> allPhrases;
-    public final String summaryPre;
-    public final String summaryPost;
-
-    // TODO: add an option to bias field weights based on sumTTF of the fields
-    // (easy enough to "sum the sums" across multiple shards before scoring)
-
-    /**
-     * Parses the params included in this request, throwing appropriate user level 
-     * Exceptions for invalid input, and returning a <code>PhrasesContextData</code>
-     * suitable for use in this request.
-     */
-    public static PhrasesContextData parseAndValidateRequest(final SolrQueryRequest req) throws SolrException {
-      return new PhrasesContextData(req);
-    }
-    private PhrasesContextData(final SolrQueryRequest req) throws SolrException {
-      final SolrParams params = req.getParams();
-
-      this.rawInput = params.get(PHRASE_INPUT, params.get(CommonParams.Q));
-      if (null == this.rawInput) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "phrase identification requires a query string or "
-                                + PHRASE_INPUT + " param override");
-      }
-
-      { // field weights & analysis field...
-        
-        SchemaField tmpAnalysisField = null;
-        Map<String,Double> tmpWeights = new TreeMap<>();
-        
-        final String analysisFieldName = params.get(PHRASE_ANALYSIS_FIELD);
-        if (null != analysisFieldName) {
-          tmpAnalysisField = req.getSchema().getFieldOrNull(analysisFieldName);
-          if (null == tmpAnalysisField) {
-            throw new SolrException(ErrorCode.BAD_REQUEST,
-                                    PHRASE_ANALYSIS_FIELD + " param specifies a field name that does not exist: " +
-                                    analysisFieldName);
-          }
-        }
-        
-        final Map<String,Float> rawFields = SolrPluginUtils.parseFieldBoosts(params.getParams(PHRASE_FIELDS));
-        if (rawFields.isEmpty()) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-                                  PHRASE_FIELDS + " param must specify a (weighted) list of fields " +
-                                  "to evaluate for phrase identification");
-        }
-        
-        for (Map.Entry<String,Float> entry : rawFields.entrySet()) {
-          final SchemaField field = req.getSchema().getFieldOrNull(entry.getKey());
-          if (null == field) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-                                  PHRASE_FIELDS + " param contains a field name that does not exist: " +
-                                  entry.getKey());
-          }
-          if (null == tmpAnalysisField) {
-            tmpAnalysisField = field;
-          }
-          if ( null == analysisFieldName ) {
-            if (! field.getType().equals(tmpAnalysisField.getType())) {
-              throw new SolrException
-                (ErrorCode.BAD_REQUEST,
-                 "All fields specified in " + PHRASE_FIELDS + " must have the same fieldType, " +
-                 "or the advanced " + PHRASE_ANALYSIS_FIELD + " option must specify an override");
-            }
-          }
-          // if a weight isn't specified, assume "1.0" 
-          final double weight = null == entry.getValue() ? 1.0D : entry.getValue();
-          if (weight < 0) {
-            throw new SolrException(ErrorCode.BAD_REQUEST,
-                                    PHRASE_FIELDS + " param must use non-negative weight value for field " + field.getName());
-          }
-          tmpWeights.put(entry.getKey(), weight);
-        }
-        assert null != tmpAnalysisField;
-        
-        this.analysisField = tmpAnalysisField;
-        this.fieldWeights = Collections.unmodifiableMap(tmpWeights);
-      }
-
-      { // index/query max phrase sizes...
-        final FieldType ft = analysisField.getType();
-        this.maxIndexedPositionLength = req.getParams().getInt(PHRASE_INDEX_MAXLEN,
-                                                               getMaxShingleSize(ft.getIndexAnalyzer()));
-        if (this.maxIndexedPositionLength < 0) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-                                  "Unable to determine max position length of indexed phrases using " +
-                                  "index analyzer for analysis field: " + analysisField.getName() +
-                                  " and no override detected using param: " + PHRASE_INDEX_MAXLEN);
-        }
-        this.maxQueryPositionLength = req.getParams().getInt(PHRASE_QUERY_MAXLEN,
-                                                             getMaxShingleSize(ft.getQueryAnalyzer()));
-        if (this.maxQueryPositionLength < 0) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-                                  "Unable to determine max position length of query phrases using " +
-                                  "query analyzer for analysis field: " + analysisField.getName() +
-                                  " and no override detected using param: " + PHRASE_QUERY_MAXLEN);
-        }
-        if (this.maxQueryPositionLength < this.maxIndexedPositionLength) {
-          throw new SolrException
-            (ErrorCode.BAD_REQUEST,
-             "Effective value of " + PHRASE_INDEX_MAXLEN + " (either from index analyzer shingle factory, " +
-             " or expert param override) must be less then or equal to the effective value of " +
-             PHRASE_QUERY_MAXLEN + " (either from query analyzer shingle factory, or expert param override)");
-        }
-      }
-      
-      this.summaryPre = params.get(PHRASE_SUMMARY_PRE, "{");
-      this.summaryPost = params.get(PHRASE_SUMMARY_POST, "}");
-
-      this.allPhrases = Phrase.extractPhrases(this.rawInput, this.analysisField,
-                                              this.maxIndexedPositionLength,
-                                              this.maxQueryPositionLength);
-        
-    }
-    
-    /**
-     * Given a list of phrases to be returned to the user, summarizes those phrases by decorating the 
-     * original input string to indicate where the identified phrases exist, using {@link #summaryPre} 
-     * and {@link #summaryPost}
-     *
-     * @param results a list of (non overlapping) Phrases that have been identified, sorted from highest scoring to lowest
-     * @return the original user input, decorated to indicate the identified phrases
-     */
-    public String summarize(final List<Phrase> results) {
-      final StringBuffer out = new StringBuffer(rawInput);
-      
-      // sort by *reverse* position so we can go back to front 
-      final List<Phrase> reversed = results.stream()
-        .sorted(Comparator.comparing((p -> p.getPositionStart()), Collections.reverseOrder()))
-        .collect(Collectors.toList());
-
-      for (Phrase p : reversed) {
-        out.insert(p.getOffsetEnd(), summaryPost);
-        out.insert(p.getOffsetStart(), summaryPre);
-      }
-      return out.toString();
-    }
-  }
-      
-  
-  /** 
-   * Model the data known about a single (candidate) Phrase -- which may or may not be indexed 
-   * @lucene.internal
-   */
-  public static final class Phrase {
-
-    /**
-     * Factory method for constructing a list of Phrases given the specified input and using the analyzer
-     * for the specified field.  The <code>maxIndexedPositionLength</code> and 
-     * <code>maxQueryPositionLength</code> provided *must* match the effective values used by 
-     * respective analyzers.
-     */
-    public static List<Phrase> extractPhrases(final String input, final SchemaField analysisField,
-                                              final int maxIndexedPositionLength,
-                                              final int maxQueryPositionLength) {
-
-      // TODO: rather then requiring the query analyzer to produce the Phrases for us (assuming Shingles)
-      // we could potentially just require that it produces unigrams compatible with the unigrams in the
-      // indexed fields, and then build our own Phrases at query time -- making the maxQueryPositionLength
-      // a 100% run time configuration option.
-      // But that could be tricky given an arbitrary analyzer -- we'd have pay careful attention
-      // to positions, and we'd have to guess/assume what placeholders/fillers was used in the indexed Phrases
-      // (typically shingles)
-
-      assert maxIndexedPositionLength <= maxQueryPositionLength;
-      
-      final CharsRefBuilder buffer = new CharsRefBuilder();
-      final FieldType ft = analysisField.getType();
-      final Analyzer analyzer = ft.getQueryAnalyzer();
-      final List<Phrase> results = new ArrayList<>(42);
-      try (TokenStream tokenStream = analyzer.tokenStream(analysisField.getName(), input)) {
-        
-        final OffsetAttribute offsetAttr = tokenStream.addAttribute(OffsetAttribute.class);
-        final PositionIncrementAttribute posIncAttr = tokenStream.addAttribute(PositionIncrementAttribute.class);
-        final PositionLengthAttribute posLenAttr = tokenStream.addAttribute(PositionLengthAttribute.class);
-        final TermToBytesRefAttribute termAttr = tokenStream.addAttribute(TermToBytesRefAttribute.class);
-        
-        int position = 0;
-        int lastPosLen = -1;
-        
-        tokenStream.reset();
-        while (tokenStream.incrementToken()) {
-          final Phrase phrase = new Phrase();
-
-          final int posInc = posIncAttr.getPositionIncrement();
-          final int posLen = posLenAttr.getPositionLength();
-
-          if (0 == posInc && posLen <= lastPosLen) {
-            // This requirement of analyzers to return tokens in ascending order of length
-            // is currently neccessary for the "linking" logic below to work
-            // if people run into real world sitautions where this is problematic,
-            // we can relax this check if we also make the linking logic more complex
-            // (ie: less optimzied)
-            throw new SolrException
-              (ErrorCode.BAD_REQUEST, "Phrase identification currently requires that " +
-               "the analyzer used must produce tokens that overlap in increasing order of length. ");
-          }
-          
-          position += posInc;
-          lastPosLen = posLen;
-          
-          phrase.position_start = position;
-          phrase.position_end = position + posLen;
-          
-          phrase.is_indexed = (posLen <= maxIndexedPositionLength);
-          
-          phrase.offset_start = offsetAttr.startOffset();
-          phrase.offset_end = offsetAttr.endOffset();
-
-          // populate the subsequence directly from the raw input using the offsets,
-          // (instead of using the TermToBytesRefAttribute) so we preserve the original
-          // casing, whitespace, etc...
-          phrase.subSequence = input.subSequence(phrase.offset_start, phrase.offset_end);
-          
-          if (phrase.is_indexed) {
-            // populate the bytes so we can build term queries
-            phrase.bytes = BytesRef.deepCopyOf(termAttr.getBytesRef());
-          }
-          
-          results.add(phrase);
-        }
-        tokenStream.end();
-      } catch (IOException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-                                "Analysis error extracting phrases from: " + input, e); 
-      }
-      
-      // fill in the relationships of each phrase
-      //
-      // NOTE: this logic currently requries that the phrases are sorted by position ascending
-      // (automatic because of how PositionIncrementAttribute works) then by length ascending
-      // (when positions are tied).
-      // We could de-optimize this code if we find that secondary ordering is too restrictive for
-      // some analyzers
-      //
-      // NOTE changes to scoring model may be allow optimize/prune down the relationships tracked,
-      // ...OR.... may require us to add/track more details about sub/parent phrases
-      //
-      for (int p = 0; p < results.size(); p++) {
-        final Phrase current = results.get(p);
-        if (! current.is_indexed) {
-          // we're not an interesting sub phrase of anything
-          continue;
-        }
-        
-        // setup links from the phrase to itself if needed
-        addLinkages(current, current, maxIndexedPositionLength);
-        
-        // scan backwards looking for phrases that might include us...
-        BEFORE: for (int i = p-1; 0 <= i; i--) {
-          final Phrase previous = results.get(i);
-          if (previous.position_start < (current.position_end - maxQueryPositionLength)) {
-            // we've scanned so far back nothing else is viable
-            break BEFORE;
-          }
-          // any 'previous' phrases must start where current starts or earlier,
-          // so only need to check the end...
-          if (current.position_end <= previous.position_end) {
-            addLinkages(previous, current, maxIndexedPositionLength);
-          }
-        }
-        // scan forwards looking for phrases that might include us...
-        AFTER: for (int i = p+1; i < results.size(); i++) {
-          final Phrase next = results.get(i);
-          // the only way a phrase that comes after current can include current is
-          // if they have the same start position...
-          if (current.position_start != next.position_start) {
-            // we've scanned so far forward nothing else is viable
-            break AFTER;
-          }
-          // any 'next' phrases must start where current starts, so only need to check the end...
-          if (current.position_end <= next.position_end) {
-            addLinkages(next, current, maxIndexedPositionLength);
-          }
-        }
-      }
-      
-      return Collections.unmodifiableList(results);
-    }
-
-    /** 
-     * Given two phrases, one of which is a super set of the other, adds the neccessary linkages 
-     * needed by the scoring model
-     */
-    private static void addLinkages(final Phrase outer, final Phrase inner,
-                                    final int maxIndexedPositionLength) {
-      
-      assert outer.position_start <= inner.position_start;
-      assert inner.position_end <= outer.position_end;
-      assert inner.is_indexed;
-      
-      final int inner_len = inner.getPositionLength();
-      if (1 == inner_len) {
-        outer.individualIndexedTerms.add(inner);
-      }
-      if (maxIndexedPositionLength == inner_len
-          || (inner == outer && inner_len < maxIndexedPositionLength)) {
-        outer.largestIndexedSubPhrases.add(inner);
-      }
-      if (outer.is_indexed && inner != outer) {
-        inner.indexedSuperPhrases.add(outer);
-      }
-    }
-
-    /**
-     * Format the phrases suitable for returning in a shard response
-     * @see #populateStats(List,List)
-     */
-    public static List<NamedList<Object>> formatShardResponse(final List<Phrase> phrases) {
-      List<NamedList<Object>> results = new ArrayList<>(phrases.size());
-      for (Phrase p : phrases) {
-        NamedList<Object> data = new SimpleOrderedMap<>();
-        // quick and dirty way to validate that our shards aren't using different analyzers
-        // so the coordinating node can fail fast when mergingthe results
-        data.add("checksum", p.getChecksum());
-        if (p.is_indexed) {
-          data.add("ttf", new NamedList<Object>(p.phrase_ttf));
-          data.add("df", new NamedList<Object>(p.phrase_df));
-        }
-        data.add("conj_dc", new NamedList<Object>(p.subTerms_conjunctionCounts));
-
-        results.add(data);
-      }
-      return results;
-    }
-    
-    /**
-     * Populates the phrases with (merged) stats from a remote shard
-     * @see #formatShardResponse
-     */
-    public static void populateStats(final List<Phrase> phrases, final List<NamedList<Object>> shardData) {
-      final int numPhrases = phrases.size();
-      if (shardData.size() != numPhrases) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                "num phrases in shard data not consistent: " +
-                                numPhrases + " vs " + shardData.size());
-      }
-      for (int i = 0; i < phrases.size(); i++) {
-        // rather then being paranoid about the expected structure, we'll just let the low level
-        // code throw an NPE / CCE / AIOOBE / etc. and wrap & rethrow later...
-        try {
-          final Phrase p = phrases.get(i);
-          final NamedList<Object> data = shardData.get(i);
-          // sanity check the correct phrase
-          if (! p.getChecksum().equals(data.get("checksum"))) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                    "phrase #" + i + " in shard data had invalid checksum");
-          }
-          if (p.is_indexed) {
-            for (Map.Entry<String,Long> ttf : (NamedList<Long>) data.get("ttf")) {
-              p.phrase_ttf.merge(ttf.getKey(), ttf.getValue(), Long::sum);
-            }
-            for (Map.Entry<String,Long> df : (NamedList<Long>) data.get("df")) {
-              p.phrase_df.merge(df.getKey(), df.getValue(), Long::sum);
-            }
-          }
-          for (Map.Entry<String,Long> conj_dc : (NamedList<Long>) data.get("conj_dc")) {
-            p.subTerms_conjunctionCounts.merge(conj_dc.getKey(), conj_dc.getValue(), Long::sum);
-          }
-        } catch (RuntimeException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                  "shard data for phrase#" + i + " not consistent", e);
-        }
-      }
-    }
-    
-    /**
-     * Populates the phrases with stats from the local index for the specified fields 
-     */
-    public static void populateStats(final List<Phrase> phrases, final Collection<String> fieldNames,
-                                     final SolrIndexSearcher searcher) throws IOException {
-      final IndexReader reader = searcher.getIndexReader();
-      for (String field : fieldNames) {
-        for (Phrase phrase : phrases) {
-          if (phrase.is_indexed) {
-            // add stats based on this entire phrase as an indexed term
-            final Term t = new Term(field, phrase.bytes);
-            phrase.phrase_ttf.put(field, reader.totalTermFreq(t));
-            phrase.phrase_df.put(field, (long)reader.docFreq(t));
-          }
-
-          // even if our phrase is too long to be indexed whole, add stats based on the
-          // conjunction of all the individual terms in the phrase
-          List<Query> filters = new ArrayList<>(phrase.individualIndexedTerms.size());
-          for (Phrase term : phrase.individualIndexedTerms) {
-            // trust the SolrIndexSearcher to cache & intersect the individual terms so that this
-            // can be efficient regardless of how often terms are re-used multiple times in the input/phrases
-            filters.add(new TermQuery(new Term(field, term.bytes)));
-          }
-          final long count = searcher.getDocSet(filters).size();
-          phrase.subTerms_conjunctionCounts.put(field, count);
-        }
-      }
-    }
-    
-    /** 
-     * Uses the previously popuated stats to populate each Phrase with it's scores for the specified fields, 
-     * and it's over all (weighted) total score.  This is not needed on shard requests.
-     * 
-     * @see #populateStats
-     * @see #getFieldScore(String)
-     * @see #getTotalScore
-     */
-    public static void populateScores(final PhrasesContextData contextData) {
-      populateScores(contextData.allPhrases, contextData.fieldWeights, 
-                     contextData.maxIndexedPositionLength,
-                     contextData.maxQueryPositionLength);
-    }
-    
-    /** 
-     * Public for testing purposes
-     * @see #populateScores(PhrasesIdentificationComponent.PhrasesContextData)
-     * @lucene.internal
-     */
-    public static void populateScores(final List<Phrase> phrases, final Map<String,Double> fieldWeights,
-                                      final int maxIndexedPositionLength,
-                                      final int maxQueryPositionLength) {
-      final double total_weight = fieldWeights.values().stream().mapToDouble(Double::doubleValue).sum();
-      for (Phrase phrase : phrases) {
-        double phrase_cumulative_score = 0.0D;
-        for (Map.Entry<String,Double> entry : fieldWeights.entrySet()) {
-          final String field = entry.getKey();
-          final double weight = entry.getValue();
-          double field_score = computeFieldScore(phrase, field,
-                                                 maxIndexedPositionLength, maxQueryPositionLength);
-          phrase.fieldScores.put(field,field_score);
-          phrase_cumulative_score += (field_score * weight);
-        }
-        phrase.total_score = (total_weight < 0 ? Double.NEGATIVE_INFINITY
-                              : (phrase_cumulative_score / total_weight));
-      }
-    }
-    
-    private Phrase() {
-      // No-Op
-    }
-
-    private boolean is_indexed;
-    private double total_score = -1.0D; // until we get a computed score, this is "not a phrase"
-    
-    private CharSequence subSequence;
-    private BytesRef bytes;
-    private int offset_start;
-    private int offset_end;
-    private int position_start;
-    private int position_end;
-    private Integer checksum = null;
-    
-    /** NOTE: Indexed phrases of length 1 are the (sole) individual terms of themselves */
-    private final List<Phrase> individualIndexedTerms = new ArrayList<>(7);
-    /** 
-     * NOTE: Indexed phrases of length less then the max indexed length are the (sole) 
-     * largest sub-phrases of themselves 
-     */
-    private final List<Phrase> largestIndexedSubPhrases = new ArrayList<>(7);
-    /** Phrases larger then this phrase which are indexed and fully contain it */
-    private final List<Phrase> indexedSuperPhrases = new ArrayList<>(7);
-    
-    // NOTE: keys are field names
-    private final Map<String,Long> subTerms_conjunctionCounts = new TreeMap<>();
-    private final Map<String,Long> phrase_ttf = new TreeMap<>();
-    private final Map<String,Long> phrase_df = new TreeMap<>();
-    private final Map<String,Double> fieldScores = new TreeMap<>();
-
-    public String toString() {
-      return "'" + subSequence + "'"
-        + "[" + offset_start + ":" + offset_end + "]"
-        + "[" + position_start + ":" + position_end + "]";
-    }
-
-    public NamedList getDetails() {
-      SimpleOrderedMap<Object> out = new SimpleOrderedMap<Object>();
-      out.add("text", subSequence);
-      out.add("offset_start", getOffsetStart());
-      out.add("offset_end", getOffsetEnd());
-      out.add("score", getTotalScore());
-      out.add("field_scores", fieldScores);
-      return out;
-    }
-    
-    /** 
-     * Computes &amp; caches the checksum of this Phrase (if not already cached).
-     * needed only when merging shard data to validate no inconsistencies with the remote shards
-     */
-    private Integer getChecksum() {
-      if (null == checksum) {
-        checksum = Arrays.hashCode(new int[] { offset_start, offset_end, position_start, position_end });
-      }
-      return checksum;
-    }
-    /** The characters from the original input that corrispond with this Phrase */
-    public CharSequence getSubSequence() {
-      return subSequence;
-    }
-    
-    /** 
-     * Returns the list of "individual" (ie: <code>getPositionLength()==1</code> terms.
-     * NOTE: Indexed phrases of length 1 are the (sole) individual terms of themselves
-     */
-    public List<Phrase> getIndividualIndexedTerms() { 
-      return individualIndexedTerms;
-    }
-    /** 
-     * Returns the list of (overlapping) sub phrases that have the largest possible size based on 
-     * the effective value of {@link PhrasesContextData#maxIndexedPositionLength}. 
-     * NOTE: Indexed phrases of length less then the max indexed length are the (sole) 
-     * largest sub-phrases of themselves.
-     */
-    public List<Phrase> getLargestIndexedSubPhrases() {
-      return largestIndexedSubPhrases;
-    }
-    /** 
-     * Returns all phrases larger then this phrase, which fully include this phrase, and are indexed.
-     * NOTE: A Phrase is <em>never</em> the super phrase of itself.
-     */
-    public List<Phrase> getIndexedSuperPhrases() {
-      return indexedSuperPhrases;
-    }
-
-    /** NOTE: positions start at '1' */
-    public int getPositionStart() {
-      return position_start;
-    }
-    /** NOTE: positions start at '1' */
-    public int getPositionEnd() {
-      return position_end;
-    }
-    public int getPositionLength() {
-      return position_end - position_start;
-    }
-    /** Each set bit identifies a position filled by this Phrase */
-    public BitSet getPositionsBitSet() {
-      final BitSet result = new BitSet();
-      result.set(position_start, position_end);
-      return result;
-    }
-    public int getOffsetStart() {
-      return offset_start;
-    }
-    public int getOffsetEnd() {
-      return offset_end;
-    }
-    
-    /** 
-     * Returns the overall score for this Phrase.  In the current implementation, 
-     * the only garuntee made regarding the range of possible values is that 0 (or less) means 
-     * it is not a good phrase.
-     *
-     * @return A numeric value indicating the confidence in this Phrase, higher numbers are higher confidence.
-     */
-    public double getTotalScore() {
-      return total_score;
-    }
-    /** 
-     * Returns the score for this Phrase in this given field. In the current implementation, 
-     * the only garuntee made regarding the range of possible values is that 0 (or less) means 
-     * it is not a good phrase.
-     *
-     * @return A numeric value indicating the confidence in this Phrase for this field, higher numbers are higher confidence.
-     */
-    public double getFieldScore(String field) {
-      return fieldScores.getOrDefault(field, -1.0D);
-    }
-    
-    /** 
-     * Returns the number of total TTF of this (indexed) Phrase <em>as term</em> in the specified field. 
-     * NOTE: behavior of calling this method is undefined unless one of the {@link #populateStats} 
-     * methods has been called with this field.
-     */
-    public long getTTF(String field) {
-      if (!is_indexed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                "TTF is only available for indexed phrases");
-      }
-      return phrase_ttf.getOrDefault(field, 0L);
-    }
-    /** 
-     * Returns the number of documents that contain <em>all</em> of the {@link #getIndividualIndexedTerms} 
-     * that make up this Phrase, in the specified field. 
-     * NOTE: behavior of calling this method is undefined unless one of the {@link #populateStats} 
-     * methods has been called with this field.
-     */
-    public long getConjunctionDocCount(String field) {
-      return subTerms_conjunctionCounts.getOrDefault(field, 0L);
-    }
-    /** 
-     * Returns the number of documents that contain this (indexed) Phrase <em>as term</em> 
-     * in the specified field. 
-     * NOTE: behavior of calling this method is undefined unless one of the {@link #populateStats} 
-     * methods has been called with this field.
-     */
-    public long getDocFreq(String field) {
-      if (!is_indexed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                "DF is only available for indexed phrases");
-      }
-      return phrase_df.getOrDefault(field, 0L);
-    }
-
-    /** 
-     * Uses the previously popuated stats to compute a score for the specified field.
-     *
-     * <p>
-     * The current implementation returns scores in the range of <code>[0,1]</code>, but this 
-     * may change in future implementations.  The only current garuntees are:
-     * </p>
-     * 
-     * <ul>
-     * <li>0 (or less) means this is garunteed to not be a phrase</li>
-     * <li>larger numbers are higher confidence</li>
-     * </li>
-     * 
-     * @see #populateStats
-     * @see #populateScores
-     * @see #getFieldScore(String)
-     * @return a score value
-     */
-    private static double computeFieldScore(final Phrase input,
-                                            final String field,
-                                            final int maxIndexedPositionLength,
-                                            final int maxQueryPositionLength) {
-      final long num_indexed_sub_phrases = input.getLargestIndexedSubPhrases().size();
-      assert 0 <= num_indexed_sub_phrases; // should be impossible
-
-      if (input.getIndividualIndexedTerms().size() < input.getPositionLength()) {
-        // there are "gaps" in our input, where individual words have not been indexed (stop words, 
-        // or multivalue position gap) which means we are not a viable candidate for being a valid Phrase.
-        return -1.0D;
-      }
-      
-      final long phrase_conj_count = input.getConjunctionDocCount(field);
-      // if there isn't a single document containing all the terms in our
-      // phrase, then it is 100% not a phrase
-      if (phrase_conj_count <= 0) {
-        return -1.0D;
-      }
-      
-      // single words automatically score 0.0 (unless they already scored less for not existing
-      if (input.getPositionLength() <= 1) {
-        return 0.0D;
-      }
-      
-      double field_score = 0.0D;
-      long max_sub_conj_count = phrase_conj_count;
-      
-      // At the moment, the contribution of each "words" sub-Phrase to the field score to the input
-      // Phrase is independent of any context of "input".  Depending on if/how sub-phrase scoring
-      // changes, we might consider computing the scores of all the indexed phrases first, and
-      // aching the portions of their values that are re-used when computing the scores of
-      // longer phrases?
-      //
-      // This would make the overall scoring of all phrases a lot more complicated,
-      // but could save CPU cycles? 
-      // (particularly when maxIndexedPositionLength <<< maxQueryPositionLength ???)
-      //
-      // My gut says that knowing the conj_count(input) "context" should help us score the 
-      // sub-phrases better, but i can't yet put my finger on why/how.  maybe by comparing
-      // the conj_count(input) to the max(conj_count(parent of words)) ?
-      
-      // for each of the longest indexed phrases, aka indexed sub-sequence of "words", we have...
-      for (Phrase words : input.getLargestIndexedSubPhrases()) {
-        // we're going to compute scores in range of [-1:1] to indicate the likelihood that our
-        // "words" should be used as a "phrase", based on a bayesian document categorization model,
-        // where the "words as a phrase" (aka: phrase) is our candidate category.
-        //
-        //  P(words|phrase) * P(phrase) - P(words|not phrase) * P(not phrase)
-        //
-        // Where...
-        //  P(words|phrase)     =  phrase_ttf / min(word_ttf)
-        //  P(phrase)           =~ phrase_docFreq / conj_count(words in phrase)      *SEE NOTE BELOW*
-        //  P(words|not phrase) =  phrase_ttf / max(word_ttf) 
-        //  P(not a phrase)     =  1 - P(phrase)
-        //
-        //       ... BUT! ...
-        //
-        // NOTE: we're going to reduce our "P(phrase) by the max "P(phrase)" of all the (indexed)
-        // candidate phrases we are a sub-phrase of, to try to offset the inherent bias in favor 
-        // of small indexed phrases -- because anytime the super-phrase exists, the sub-phrase exists
-
-        
-        // IDEA: consider replacing this entire baysian model with LLR (or rootLLR)...
-        //  http://mahout.apache.org/docs/0.13.0/api/docs/mahout-math/org/apache/mahout/math/stats/LogLikelihood.html
-        // ...where we compute LLR over each of the TTF of the pairs of adjacent sub-phrases of each 
-        // indexed phrase and take the min|max|avg of the LLR scores.
-        //
-        // ie: for indexed shingle "quick brown fox" compute LLR(ttf("quick"), ttf("brown fox")) &
-        // LLR(ttf("quick brown"), ttf("fox")) using ttf("quick brown fox") as the co-occurance
-        // count, and sumTTF-ttf("quick")-ttf("brown")-ttf("fox") as the "something else"
-        //
-        // (we could actually compute LLR stats over TTF and DF and combine them)
-        //
-        // NOTE: Going the LLR/rootLLR route would require building a full "tree" of every (indexed)
-        // sub-phrase of every other phrase (or at least: all siblings of diff sizes that add up to
-        // an existing phrase).  As well as require us to give up on a predictible "range" of
-        // legal values for scores (IIUC from the LLR docs)
-        
-        final long phrase_ttf = words.getTTF(field);
-        final long phrase_df = words.getDocFreq(field);
-        final long words_conj_count = words.getConjunctionDocCount(field);
-        max_sub_conj_count = Math.max(words_conj_count, max_sub_conj_count);
-        
-        final double max_wrapper_phrase_probability = 
-          words.getIndexedSuperPhrases().stream()
-          .mapToDouble(p -> p.getConjunctionDocCount(field) <= 0 ?
-                       // special case check -- we already know *our* conj count > 0,
-                       // but we need a similar check for wrapper phrases: if <= 0, their probability is 0
-                       0.0D : ((double)p.getDocFreq(field) / p.getConjunctionDocCount(field))).max().orElse(0.0D);
-        
-        final LongSummaryStatistics words_ttfs = 
-          words.getIndividualIndexedTerms().stream()
-          .collect(Collectors.summarizingLong(t -> t.getTTF(field)));
-        
-        final double words_phrase_prob = (phrase_ttf / (double)words_ttfs.getMin());
-        final double words_not_phrase_prob = (phrase_ttf / (double)words_ttfs.getMax());
-        
-        final double phrase_prob = (phrase_conj_count / (double)words_conj_count);
-        
-          
-        final double phrase_score = words_phrase_prob * (phrase_prob - max_wrapper_phrase_probability);
-        final double not_phrase_score =  words_not_phrase_prob * (1 - (phrase_prob - max_wrapper_phrase_probability));
-        final double words_score = phrase_score - not_phrase_score;
-        
-        field_score += words_score;
-      }
-
-      // NOTE: the "scaling" factors below can "increase" negative scores (by reducing the unsigned value)
-      // when they should ideally be penalizing the scores further, but since we currently don't care
-      // about any score lower then 0, it's not worth worrying about.
-      
-      // Average the accumulated score over the number of actual indexed sub-phrases that contributed
-      //
-      // NOTE: since we subsequently want to multiply the score by a fraction with num_indexed_sub_phrases
-      // in the numerator, we can skip this...
-      // SEE BELOW // field_score /= (double) num_indexed_sub_phrases;
-      
-      // If we leave field_score as is, then a phrase longer then the maxIndexedPositionLength
-      // will never score higher then the highest scoring sub-phrase it has (because we've averaged them)
-      // so we scale the scores against the longest possible phrase length we're considering
-      //
-      // NOTE: We don't use num_indexed_sub_phrases in the numerator since we skipped it when
-      // averating above...
-      field_score *= ( 1.0D // SEE ABOVE // * ( (double)num_indexed_sub_phrases )
-                       / (1 + maxQueryPositionLength - maxIndexedPositionLength) );
-      
-      // scale the field_score based on the ratio of the conjunction docCount for the whole phrase
-      // realtive to the largest conjunction docCount of it's (largest indexed) sub phrases, to penalize
-      // the scores of very long phrases that exist very rarely relative to the how often their
-      // sub phrases exist in the index
-      field_score *= ( ((double) phrase_conj_count) / max_sub_conj_count);
-
-      return field_score;
-    }
-  }
-
-  /** 
-   * Helper method, public for testing purposes only.
-   * <p>
-   * Given an analyzer, inspects it to determine if:
-   * <ul>
-   *  <li>it is a {@link TokenizerChain}</li>
-   *  <li>it contains exactly one instance of {@link ShingleFilterFactory}</li>
-   * </ul>
-   * <p>
-   * If these these conditions are met, then this method returns the <code>maxShingleSize</code> 
-   * in effect for this analyzer, otherwise returns -1.
-   * </p>
-   * 
-   * @param analyzer An analyzer inspect
-   * @return <code>maxShingleSize</code> if available
-   * @lucene.internal
-   */
-  public static int getMaxShingleSize(Analyzer analyzer) {
-    if (!TokenizerChain.class.isInstance(analyzer)) {
-      return -1;
-    }
-    
-    final TokenFilterFactory[] factories = ((TokenizerChain) analyzer).getTokenFilterFactories();
-    if (0 == factories.length) {
-      return -1;
-    }
-    int result = -1;
-    for (TokenFilterFactory tff : factories) {
-      if (ShingleFilterFactory.class.isInstance(tff)) {
-        if (0 < result) {
-          // more then one shingle factory in our analyzer, which is weird, so make no assumptions...
-          return -1;
-        }
-        // would be nice if there was an easy way to just ask a factory for the effective value
-        // of an arguement...
-        final Map<String,String> args = tff.getOriginalArgs();
-        result = args.containsKey("maxShingleSize")
-          ? Integer.parseInt(args.get("maxShingleSize")) : ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE;
-      }
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PivotFacet.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacet.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacet.java
deleted file mode 100644
index 37a522e..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacet.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.component.FacetComponent.FacetBase;
-
-/**
- * Models a single instance of a "pivot" specified by a {@link FacetParams#FACET_PIVOT} 
- * param, which may contain multiple nested fields.
- *
- * This class is also used to coordinate the refinement requests needed from various 
- * shards when doing processing a distributed request
- */
-public class PivotFacet extends FacetBase {
-
-  /** 
-   * Local param used to indicate that refinements are required on a pivot. Should
-   * also be used as the prefix for concatenating with the value to determine the
-   * name of the multi-valued param that will contain all of the values needed for 
-   * refinement.
-   */
-  public static final String REFINE_PARAM = "fpt";
-  
-  // TODO: is this really needed? can't we just loop over 0<=i<rb.shards.length ?
-  public final BitSet knownShards = new BitSet();
-  
-  private final Map<Integer, List<PivotFacetValue>> queuedRefinements = new HashMap<>();
-  
-  // if null, then either we haven't collected any responses from shards
-  // or all the shards that have responded so far haven't had any values for the top
-  // field of this pivot.  May be null forever if no doc in any shard has a value 
-  // for the top field of the pivot
-  private PivotFacetField pivotFacetField;
-  
-  public PivotFacet(ResponseBuilder rb, String facetStr) {
-    super(rb, FacetParams.FACET_PIVOT, facetStr);
-  }
-  
-  /**
-   * Tracks that the specified shard needs to be asked to refine the specified 
-   * {@link PivotFacetValue} 
-   * 
-   * @see #getQueuedRefinements
-   */
-  public void addRefinement(int shardNumber, PivotFacetValue value) {
-    
-    if (!queuedRefinements.containsKey(shardNumber)) {
-      queuedRefinements.put(shardNumber, new ArrayList<PivotFacetValue>());
-    }
-    
-    queuedRefinements.get(shardNumber).add(value);
-  }
-  
-  /**
-   * An immutable List of the {@link PivotFacetValue}s that need to be
-   * refined for this pivot.  Once these refinements have been processed, 
-   * the caller should clear them using {@link #removeAllRefinementsForShard}
-   *
-   * @see #addRefinement
-   * @see #removeAllRefinementsForShard
-   * @return a list of the values to refine, or an empty list.
-   */
-  public List<PivotFacetValue> getQueuedRefinements(int shardNumber) {
-    List<PivotFacetValue> raw = queuedRefinements.get(shardNumber);
-    if (null == raw) {
-      raw = Collections.<PivotFacetValue>emptyList();
-    }
-    return Collections.unmodifiableList(raw);
-  }
-
-  /**
-   * Clears the list of queued refinements for the specified shard
-   *
-   * @see #addRefinement
-   * @see #getQueuedRefinements
-   */
-  public void removeAllRefinementsForShard(int shardNumber) {
-    queuedRefinements.remove(shardNumber);
-  }
-  
-  /**
-   * If true, then additional refinement requests are needed to flesh out the correct
-   * counts for this Pivot
-   *
-   * @see #getQueuedRefinements
-   */
-  public boolean isRefinementsRequired() {
-    return ! queuedRefinements.isEmpty();
-  }
-  
-  /** 
-   * A recursive method for generating <code>NamedLists</code> for this pivot
-   * suitable for including in a pivot facet response to the original distributed request.
-   *
-   * @see PivotFacetField#trim
-   * @see PivotFacetField#convertToListOfNamedLists
-   */
-  public List<NamedList<Object>> getTrimmedPivotsAsListOfNamedLists() {
-    if (null == pivotFacetField) {
-      // no values in any shard for the top field of this pivot
-      return Collections.<NamedList<Object>>emptyList();
-    }
-
-    pivotFacetField.trim();
-    return pivotFacetField.convertToListOfNamedLists();
-  }  
-
-  /** 
-   * A recursive method for determining which {@link PivotFacetValue}s need to be
-   * refined for this pivot.
-   *
-   * @see PivotFacetField#queuePivotRefinementRequests
-   */
-  public void queuePivotRefinementRequests() {
-    if (null == pivotFacetField) return; // NOOP
-
-    pivotFacetField.sort();
-    pivotFacetField.queuePivotRefinementRequests(this);
-  }
-  
-  /**
-   * Recursively merges the response from the specified shard, tracking the known shards.
-   * 
-   * @see PivotFacetField#contributeFromShard
-   * @see PivotFacetField#createFromListOfNamedLists
-   */
-  public void mergeResponseFromShard(int shardNumber, ResponseBuilder rb, List<NamedList<Object>> response) {
-    
-    knownShards.set(shardNumber);
-    if (pivotFacetField == null) {
-      pivotFacetField = PivotFacetField.createFromListOfNamedLists(shardNumber, rb,  null,  response);
-    } else {
-      pivotFacetField.contributeFromShard(shardNumber, rb, response);
-    }
-  }
-
-  public String toString() {
-    return "[" + facetStr + "] | " + this.getKey();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PivotFacetField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetField.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetField.java
deleted file mode 100644
index 9b73841..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetField.java
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-
-
-/**
- * Models a single field somewhere in a hierarchy of fields as part of a pivot facet.  
- * This pivot field contains {@link PivotFacetValue}s which may each contain a nested
- * {@link PivotFacetField} child.  This <code>PivotFacetField</code> may itself 
- * be a child of a {@link PivotFacetValue} parent.
- *
- * @see PivotFacetValue
- * @see PivotFacetFieldValueCollection
- */
-@SuppressWarnings("rawtypes")
-public class PivotFacetField {
-  
-  public final String field;
-
-  // null if this is a top level pivot, 
-  // otherwise the value of the parent pivot we are nested under
-  public final PivotFacetValue parentValue;
-
-  public final PivotFacetFieldValueCollection valueCollection;
-  
-  // Facet parameters relating to this field
-  private final int facetFieldLimit;
-  private final int facetFieldMinimumCount;
-  private final int facetFieldOffset;  
-  private final String facetFieldSort;
-
-  private final Map<Integer, Integer> numberOfValuesContributedByShard = new HashMap<>();
-  private final Map<Integer, Integer> shardLowestCount = new HashMap<>();
-
-  private boolean needRefinementAtThisLevel = true;
-    
-  private PivotFacetField(ResponseBuilder rb, PivotFacetValue parent, String fieldName) {
-    
-    field = fieldName;
-    parentValue = parent;
-    
-    // facet params
-    SolrParams parameters = rb.req.getParams();
-    facetFieldMinimumCount = parameters.getFieldInt(field, FacetParams.FACET_PIVOT_MINCOUNT, 1);
-    facetFieldOffset = parameters.getFieldInt(field, FacetParams.FACET_OFFSET, 0);
-    facetFieldLimit = parameters.getFieldInt(field, FacetParams.FACET_LIMIT, 100);
-    String defaultSort = (facetFieldLimit > 0) ? FacetParams.FACET_SORT_COUNT : FacetParams.FACET_SORT_INDEX;
-    facetFieldSort = parameters.getFieldParam(field, FacetParams.FACET_SORT, defaultSort);
-
-    valueCollection = new PivotFacetFieldValueCollection(facetFieldMinimumCount, facetFieldOffset, facetFieldLimit, facetFieldSort);
-    
-    if ( (facetFieldLimit < 0) || 
-         // TODO: possible refinement issue if limit=0 & mincount=0 & missing=true
-         // (ie: we only want the missing count for this field)
-         (facetFieldLimit <= 0 && facetFieldMinimumCount == 0) ||
-         (facetFieldSort.equals(FacetParams.FACET_SORT_INDEX) && facetFieldMinimumCount <= 0) 
-         ) {
-      // in any of these cases, there's no need to refine this level of the pivot
-      needRefinementAtThisLevel = false;
-    }
-  }
-
-  /** 
-   * A recursive method that walks up the tree of pivot fields/values to build 
-   * a list of String representations of the values that lead down to this 
-   * PivotFacetField.
-   *
-   * @return A mutable List of the pivot values leading down to this pivot field, 
-   *      will never be null but may contain nulls and may be empty if this is a top 
-   *      level pivot field
-   * @see PivotFacetValue#getValuePath
-   */
-  public List<String> getValuePath() {
-    if (null != parentValue) {
-      return parentValue.getValuePath();
-    }
-    return new ArrayList<String>(3);
-  }
-
-  /**
-   * A recursive method to construct a new <code>PivotFacetField</code> object from 
-   * the contents of the {@link NamedList}s provided by the specified shard, relative 
-   * to a parent value (if this is not the top field in the pivot hierarchy)
-   *
-   * The associated child {@link PivotFacetValue}s will be recursively built as well.
-   *
-   * @see PivotFacetValue#createFromNamedList
-   * @param shardNumber the id of the shard that provided this data
-   * @param rb The response builder of the current request
-   * @param owner the parent value in the current pivot (may be null)
-   * @param pivotValues the data from the specified shard for this pivot field, may be null or empty
-   * @return the new PivotFacetField, null if pivotValues is null or empty.
-   */
-  public static PivotFacetField createFromListOfNamedLists(int shardNumber, ResponseBuilder rb, PivotFacetValue owner, List<NamedList<Object>> pivotValues) {
-    
-    if (null == pivotValues || pivotValues.size() <= 0) return null;
-    
-    NamedList<Object> firstValue = pivotValues.get(0);
-    PivotFacetField createdPivotFacetField 
-      = new PivotFacetField(rb, owner, PivotFacetHelper.getField(firstValue));
-    
-    int lowestCount = Integer.MAX_VALUE;
-    
-    for (NamedList<Object> pivotValue : pivotValues) {
-      
-      lowestCount = Math.min(lowestCount, PivotFacetHelper.getCount(pivotValue));
-      
-      PivotFacetValue newValue = PivotFacetValue.createFromNamedList
-        (shardNumber, rb, createdPivotFacetField, pivotValue);
-      createdPivotFacetField.valueCollection.add(newValue);
-    }
-      
-    createdPivotFacetField.shardLowestCount.put(shardNumber,  lowestCount);
-    createdPivotFacetField.numberOfValuesContributedByShard.put(shardNumber, pivotValues.size());
-
-    return createdPivotFacetField;
-  }
-  
-  /**
-   * Destructive method that recursively prunes values from the data structure 
-   * based on the counts for those values and the effective sort, mincount, limit, 
-   * and offset being used for each field.
-   * <p>
-   * This method should only be called after all refinement is completed just prior 
-   * calling {@link #convertToListOfNamedLists}
-   * </p>
-   *
-   * @see PivotFacet#getTrimmedPivotsAsListOfNamedLists
-   * @see PivotFacetFieldValueCollection#trim
-   */
-  public void trim() {
-    // SOLR-6331...
-    //
-    // we can probably optimize the memory usage by trimming each level of the pivot once
-    // we know we've fully refined the values at that level 
-    // (ie: fold this logic into refineNextLevelOfFacets)
-    this.valueCollection.trim();
-  }
-  
-  /**
-   * Recursively sorts the collection of values associated with this field, and 
-   * any sub-pivots those values have.
-   *
-   * @see FacetParams#FACET_SORT
-   * @see PivotFacetFieldValueCollection#sort
-   */
-  public void sort() {
-    this.valueCollection.sort();
-  }
-  
-  /** 
-   * A recursive method for generating <code>NamedLists</code> from this field 
-   * suitable for including in a pivot facet response to the original distributed request.
-   */
-  public List<NamedList<Object>> convertToListOfNamedLists() { 
-    
-    List<NamedList<Object>> convertedPivotList = null;
-    
-    if (valueCollection.size() > 0) {
-      convertedPivotList = new LinkedList<>();
-      for (PivotFacetValue pivot : valueCollection)
-        convertedPivotList.add(pivot.convertToNamedList());
-    }
-  
-    return convertedPivotList;
-  }     
-
-  /** 
-   * A recursive method for determining which {@link PivotFacetValue}s need to be
-   * refined for this pivot.
-   *
-   * @see PivotFacet#queuePivotRefinementRequests
-   */
-  public void queuePivotRefinementRequests(PivotFacet pf) {
-    
-    if (needRefinementAtThisLevel) {
-
-      if (0 < facetFieldMinimumCount) {
-        // missing is always a candidate for refinement if at least one shard met the minimum
-        PivotFacetValue missing = valueCollection.getMissingValue();
-        if (null != missing) {
-          processDefiniteCandidateElement(pf, valueCollection.getMissingValue());
-        }
-      }
-
-      if (! valueCollection.getExplicitValuesList().isEmpty()) {
-
-        if (FacetParams.FACET_SORT_COUNT.equals(facetFieldSort)) {
-          // we only need to things that are currently in our limit,
-          // or might be in our limit if we get increased counts from shards that
-          // didn't include this value the first time
-          final int indexOfCountThreshold 
-            = Math.min(valueCollection.getExplicitValuesListSize(), 
-                       facetFieldOffset + facetFieldLimit) - 1;
-          final int countThreshold = valueCollection.getAt(indexOfCountThreshold).getCount();
-          
-          int positionInResults = 0;
-          
-          for (PivotFacetValue value : valueCollection.getExplicitValuesList()) {
-            if (positionInResults <= indexOfCountThreshold) {
-              // This element is within the top results, so we need to get information 
-              // from all of the shards.
-              processDefiniteCandidateElement(pf, value);
-            } else {
-              // This element is not within the top results, but may still need to be refined.
-              processPossibleCandidateElement(pf, value, countThreshold);
-            }
-            
-            positionInResults++;
-          }
-        } else { // FACET_SORT_INDEX
-          // everything needs refined to see what the per-shard mincount excluded
-          for (PivotFacetValue value : valueCollection.getExplicitValuesList()) {
-            processDefiniteCandidateElement(pf, value);
-          }
-        }
-      }
-
-      needRefinementAtThisLevel = false;
-    }
-      
-    if ( pf.isRefinementsRequired() ) {
-      // if any refinements are needed, then we need to stop and wait to
-      // see how the picture may change before drilling down to child pivot fields 
-      return;
-    } else {
-      // Since outstanding requests have been filled, then we can drill down 
-      // to the next deeper level and check it.
-      refineNextLevelOfFacets(pf);
-    }
-  }
-      
-  /**
-   * Adds refinement requests for the value for each shard that has not already contributed 
-   * a count for this value.
-   */
-  private void processDefiniteCandidateElement(PivotFacet pf, PivotFacetValue value) {
-    
-    for (int shard = pf.knownShards.nextSetBit(0); 
-         0 <= shard; 
-         shard = pf.knownShards.nextSetBit(shard+1)) {   
-      if ( ! value.shardHasContributed(shard) ) {
-        if ( // if we're doing index order, we need to refine anything  
-             // (mincount may have excluded from a shard)
-            FacetParams.FACET_SORT_INDEX.equals(facetFieldSort)
-            || (// 'missing' value isn't affected by limit, needs refined if shard didn't provide
-                null == value.getValue() ||
-                // if we are doing count order, we need to refine if the limit was hit
-                // (if not, the shard doesn't have the value or it would have returned already)
-                numberOfValuesContributedByShardWasLimitedByFacetFieldLimit(shard))) {
-          pf.addRefinement(shard, value);
-        }
-      }
-    }  
-  }
-
-  private boolean numberOfValuesContributedByShardWasLimitedByFacetFieldLimit(int shardNumber) {
-    return facetFieldLimit <= numberOfValuesContributedByShard(shardNumber);
-  }
-  
-  private int numberOfValuesContributedByShard(final int shardNumber) { 
-    return numberOfValuesContributedByShard.containsKey(shardNumber)
-      ? numberOfValuesContributedByShard.get(shardNumber) 
-      : 0;
-  }
-  
-  /** 
-   * Checks the {@link #lowestCountContributedbyShard} for each shard, combined with the 
-   * counts we already know, to see if this value is a viable candidate -- 
-   * <b>Does not make sense when using {@link FacetParams#FACET_SORT_INDEX}</b>
-   *
-   * @see #processDefiniteCandidateElement
-   */
-  private void processPossibleCandidateElement(PivotFacet pf, PivotFacetValue value, 
-                                               final int refinementThreshold) {
-    
-    assert FacetParams.FACET_SORT_COUNT.equals(facetFieldSort)
-      : "Method only makes sense when sorting by count";
-
-    int maxPossibleCountAfterRefinement = value.getCount();
-    
-    for (int shard = pf.knownShards.nextSetBit(0); 
-         0 <= shard;
-         shard = pf.knownShards.nextSetBit(shard+1)) {
-      if ( ! value.shardHasContributed(shard) ) {
-        maxPossibleCountAfterRefinement += lowestCountContributedbyShard(shard);
-      }
-    }
-    
-    if (refinementThreshold <= maxPossibleCountAfterRefinement) {
-      processDefiniteCandidateElement(pf, value);
-    }
-  }
-   
-  private int lowestCountContributedbyShard(int shardNumber) {
-    return (shardLowestCount.containsKey(shardNumber))
-      ? shardLowestCount.get(shardNumber) 
-      : 0;
-  }
-  
-  private void refineNextLevelOfFacets(PivotFacet pf) {
-
-    List<PivotFacetValue> explicitValsToRefine 
-      = valueCollection.getNextLevelValuesToRefine();
-    
-    for (PivotFacetValue value : explicitValsToRefine) {
-      if (null != value.getChildPivot()) {
-        value.getChildPivot().queuePivotRefinementRequests(pf);
-      }
-    }
-
-    PivotFacetValue missing = this.valueCollection.getMissingValue();
-    if(null != missing && null != missing.getChildPivot()) {
-      missing.getChildPivot().queuePivotRefinementRequests(pf);
-    }
-  }
-  
-  private void incrementShardValueCount(int shardNumber) {
-    if (!numberOfValuesContributedByShard.containsKey(shardNumber)) {
-      numberOfValuesContributedByShard.put(shardNumber, 1);
-    } else {
-      numberOfValuesContributedByShard.put(shardNumber, numberOfValuesContributedByShard.get(shardNumber)+1);
-    }
-  }
-  
-  private void contributeValueFromShard(int shardNumber, ResponseBuilder rb, NamedList<Object> shardValue) {
-    
-    incrementShardValueCount(shardNumber);
-
-    Comparable value = PivotFacetHelper.getValue(shardValue);
-    int count = PivotFacetHelper.getCount(shardValue);
-    
-    // We're changing values so we most mark the collection as dirty
-    valueCollection.markDirty();
-    
-    if ( ( !shardLowestCount.containsKey(shardNumber) )
-         || shardLowestCount.get(shardNumber) > count) {
-      shardLowestCount.put(shardNumber,  count);
-    }
-    
-    PivotFacetValue facetValue = valueCollection.get(value);
-    if (null == facetValue) {
-      // never seen before, we need to create it from scratch
-      facetValue = PivotFacetValue.createFromNamedList(shardNumber, rb, this, shardValue);
-      this.valueCollection.add(facetValue);
-    } else {
-      facetValue.mergeContributionFromShard(shardNumber, rb, shardValue);
-    }
-  }
-  
-  /**
-   * Recursively merges the contributions from the specified shard for each 
-   * {@link PivotFacetValue} represended in the <code>response</code>.
-   * 
-   * @see PivotFacetValue#mergeContributionFromShard
-   * @param shardNumber the id of the shard that provided this data
-   * @param rb The response builder of the current request
-   * @param response the data from the specified shard for this pivot field, may be null
-   */
-  public void contributeFromShard(int shardNumber, ResponseBuilder rb, List<NamedList<Object>> response) {
-    if (null == response) return;
-
-    for (NamedList<Object> responseValue : response) {
-      contributeValueFromShard(shardNumber, rb, responseValue);
-    }
-  }
-  
-  public String toString(){
-    return String.format(Locale.ROOT, "P:%s F:%s V:%s",
-                         parentValue, field, valueCollection);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
deleted file mode 100644
index 5c2b07f..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.solr.common.params.FacetParams;
-
-/**
- * Emcapsulates a collection of {@link PivotFacetValue}s associated with a 
- * {@link PivotFacetField} withs pecial tracking of a {@link PivotFacetValue} 
- * corrisponding to the <code>null</code> value when {@link FacetParams#FACET_MISSING} 
- * is used.
- *
- * @see #markDirty
- * @see PivotFacetField
- * @see PivotFacetValue
- */
-@SuppressWarnings("rawtypes")
-public class PivotFacetFieldValueCollection implements Iterable<PivotFacetValue> {
-  private List<PivotFacetValue> explicitValues;  
-  private PivotFacetValue missingValue;
-  private Map<Comparable, PivotFacetValue> valuesMap;
-  private boolean dirty = true;
-
-  //Facet parameters relating to this field
-  private final int facetFieldMinimumCount;
-  private final int facetFieldOffset;  
-  private final int facetFieldLimit;
-  private final String facetFieldSort;
-  
-  
-  public PivotFacetFieldValueCollection(int minCount, int offset, int limit, String fieldSort){
-    this.explicitValues = new ArrayList<>();
-    this.valuesMap = new HashMap<>(); 
-    this.facetFieldMinimumCount = minCount;
-    this.facetFieldOffset = offset;
-    this.facetFieldLimit = limit;
-    this.facetFieldSort = fieldSort;
-  }
-
-  /**
-   * Indicates that the values in this collection have been modified by the caller.
-   *
-   * Any caller that manipulates the {@link PivotFacetValue}s contained in this collection
-   * must call this method after doing so.
-   */
-  public void markDirty() {
-    dirty = true;
-  }
-
-  /**
-   * The {@link PivotFacetValue} with corisponding to a a value of 
-   * <code>null</code> when {@link FacetParams#FACET_MISSING} is used.
-   * 
-   * @return the appropriate <code>PivotFacetValue</code> object, may be null 
-   *         if we "missing" is not in use, or if it does not meat the mincount.
-   */
-  public PivotFacetValue getMissingValue(){
-    return missingValue;
-  }
-
-  /** 
-   * Read-Only access to the Collection of {@link PivotFacetValue}s corrisponding to 
-   * non-missing values.
-   *
-   * @see #getMissingValue
-   */
-  public List<PivotFacetValue> getExplicitValuesList() {
-    return Collections.unmodifiableList(explicitValues);
-  }
-
-  /** 
-   * Size of {@link #getExplicitValuesList}
-   */
-  public int getExplicitValuesListSize() {
-    return this.explicitValues.size();
-  }
-  
-  /** 
-   * Total number of {@link PivotFacetValue}s, including the "missing" value if used.
-   *
-   * @see #getMissingValue
-   * @see #getExplicitValuesList
-   */
-  public int size() {
-    return this.getExplicitValuesListSize() + (this.missingValue == null ? 0 : 1);
-  }
-  
-  /**
-   * Returns the appropriate sub-list of the explicit values that need to be refined, 
-   * based on the {@link FacetParams#FACET_OFFSET} &amp; {@link FacetParams#FACET_LIMIT} 
-   * for this field.
-   *
-   * @see #getExplicitValuesList
-   * @see List#subList
-   */
-  public List<PivotFacetValue> getNextLevelValuesToRefine() {
-    final int numRefinableValues = getExplicitValuesListSize();
-    if (facetFieldOffset < numRefinableValues) {
-      final int offsetPlusCount = (facetFieldLimit >= 0) 
-        ? Math.min(facetFieldLimit + facetFieldOffset, numRefinableValues) 
-        : numRefinableValues;
-      return getExplicitValuesList().subList(facetFieldOffset,  offsetPlusCount);
-    } else {
-      return Collections.<PivotFacetValue>emptyList();
-    }
-  }
-  
-  /**
-   * Fast lookup to retrieve a {@link PivotFacetValue} from this collection if it 
-   * exists
-   *
-   * @param value of the <code>PivotFacetValue</code> to lookup, if 
-   *        <code>null</code> this returns the same as {@link #getMissingValue}
-   * @return the corrisponding <code>PivotFacetValue</code> or null if there is 
-   *        no <code>PivotFacetValue</code> in this collection corrisponding to 
-   *        the specified value.
-   */
-  public PivotFacetValue get(Comparable value){
-    return valuesMap.get(value);
-  }
-  
-  /**
-   * Fetchs a {@link PivotFacetValue} from this collection via the index, may not 
-   * be used to fetch the <code>PivotFacetValue</code> corrisponding to the missing-value.
-   *
-   * @see #getExplicitValuesList
-   * @see List#get(int)
-   * @see #getMissingValue
-   */
-  public PivotFacetValue getAt(int index){
-    return explicitValues.get(index);
-  }
-  
-  /**
-   * Adds a {@link PivotFacetValue} to this collection -- callers must not use this 
-   * method if a {@link PivotFacetValue} with the same value already exists in this collection
-   */
-  public void add(PivotFacetValue pfValue) {
-    Comparable val = pfValue.getValue();
-    assert ! this.valuesMap.containsKey(val) 
-      : "Must not add duplicate PivotFacetValue with redundent inner value";
-
-    dirty = true;
-    if(null == val) {
-      this.missingValue = pfValue;
-    } else {
-      this.explicitValues.add(pfValue);
-    }
-    this.valuesMap.put(val, pfValue);
-  }
-
-
-  /**
-   * Destructive method that recursively prunes values from the data structure 
-   * based on the counts for those values and the effective sort, mincount, limit, 
-   * and offset being used for each field.
-   * <p>
-   * This method should only be called after all refinement is completed.
-   * </p>
-   *
-   * @see PivotFacetField#trim
-   * @see PivotFacet#getTrimmedPivotsAsListOfNamedLists
-   */
-  public void trim() {   // NOTE: destructive
-    // TODO: see comment in PivotFacetField about potential optimization
-    // (ie: trim as we refine)
-    trimNonNullValues(); 
-    trimNullValue();
-  }
-  
-  private void trimNullValue(){
-    if (missingValue == null) {
-      return;
-    }
-
-    if (missingValue.getCount() >= facetFieldMinimumCount){
-      if (null != missingValue.getChildPivot()) {
-        missingValue.getChildPivot().trim();
-      }
-    } else { // missing count less than mincount
-      missingValue = null;
-    }
-  }
-  
-  private void trimNonNullValues(){
-    if (explicitValues != null && explicitValues.size() > 0) {
-      
-      sort();
-      
-      ArrayList<PivotFacetValue> trimmedValues = new ArrayList<>();
-      
-      int facetsSkipped = 0;
-      
-      for (PivotFacetValue pivotValue : explicitValues) {
-        
-        if (pivotValue.getCount() >= facetFieldMinimumCount) {
-          if (facetsSkipped >= facetFieldOffset) {
-            trimmedValues.add(pivotValue);
-            if (pivotValue.getChildPivot() != null) {
-              pivotValue.getChildPivot().trim();
-            }
-            if (facetFieldLimit > 0 && trimmedValues.size() >= facetFieldLimit) {
-              break;
-            }
-          } else {
-            facetsSkipped++;
-          }
-        }
-      }
-      
-      explicitValues = trimmedValues;
-      valuesMap.clear();
-    }
-  }
-  
-  /**
-   * Sorts the collection and recursively sorts the collections assocaited with 
-   * any sub-pivots.
-   *
-   * @see FacetParams#FACET_SORT
-   * @see PivotFacetField#sort
-   */
-  public void sort() {
-    
-    if (dirty) {
-      if (facetFieldSort.equals(FacetParams.FACET_SORT_COUNT)) {
-        Collections.sort(this.explicitValues, new PivotFacetCountComparator());
-      } else if (facetFieldSort.equals(FacetParams.FACET_SORT_INDEX)) {
-        Collections.sort(this.explicitValues, new PivotFacetValueComparator());
-      }
-      dirty = false;
-    }
-    
-    for (PivotFacetValue value : this.explicitValues)
-      if (value.getChildPivot() != null) {
-        value.getChildPivot().sort();
-      }
-   
-    if (missingValue != null && missingValue.getChildPivot() != null) {
-      missingValue.getChildPivot().sort();
-    }
-  }
-
-  /**
-   * Iterator over all elements in this Collection, including the result of 
-   * {@link #getMissingValue} as the last element (if it exists)
-   */
-  @Override
-  public Iterator<PivotFacetValue> iterator() {
-    Iterator<PivotFacetValue> it = new Iterator<PivotFacetValue>() {
-      private final Iterator valuesIterator = explicitValues.iterator();
-      private boolean shouldGiveMissingValue = (missingValue != null);
-      
-      @Override
-      public boolean hasNext() {
-        return valuesIterator.hasNext() || shouldGiveMissingValue;
-      }
-      
-      @Override
-      public PivotFacetValue next() {
-        while(valuesIterator.hasNext()){
-          return (PivotFacetValue) valuesIterator.next();
-        }
-        //else
-        if(shouldGiveMissingValue){
-          shouldGiveMissingValue = false;
-          return missingValue;
-        }
-        return null;
-      }
-      
-      @Override
-      public void remove() {
-        throw new UnsupportedOperationException("Can't remove from this iterator");
-      }
-    };
-    return it;
-  }
-    
-  /** Sorts {@link PivotFacetValue} instances by their count */
-  public static class PivotFacetCountComparator implements Comparator<PivotFacetValue> {
-    public int compare(PivotFacetValue left, PivotFacetValue right) {
-      int countCmp = right.getCount() - left.getCount();
-      return (0 != countCmp) ? countCmp : 
-        compareWithNullLast(left.getValue(), right.getValue());
-    }    
-  }
-  
-  /** Sorts {@link PivotFacetValue} instances by their value */
-  public static class PivotFacetValueComparator implements Comparator<PivotFacetValue> {
-    public int compare(PivotFacetValue left, PivotFacetValue right) {
-      return compareWithNullLast(left.getValue(), right.getValue());
-    }
-  }
-  
-  /**
-   * A helper method for use in <code>Comparator</code> classes where object properties 
-   * are <code>Comparable</code> but may be null.
-   */
-  static int compareWithNullLast(final Comparable o1, final Comparable o2) {
-    if (null == o1) {
-      if (null == o2) {
-        return 0;
-      }
-      return 1; // o1 is null, o2 is not
-    }
-    if (null == o2) {
-      return -1; // o2 is null, o1 is not
-    }
-    return o1.compareTo(o2);
-  }
-  
-  public String toString(){
-    return String.format(Locale.ROOT, "Values:%s | Missing:%s ", explicitValues, missingValue);
-  }
-}
-
-


[21/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/ClassifyStream.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ClassifyStream.java b/solr/core/src/java/org/apache/solr/handler/ClassifyStream.java
deleted file mode 100644
index c79c409..0000000
--- a/solr/core/src/java/org/apache/solr/handler/ClassifyStream.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Locale;
-
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.comp.StreamComparator;
-import org.apache.solr.client.solrj.io.stream.StreamContext;
-import org.apache.solr.client.solrj.io.stream.TupleStream;
-import org.apache.solr.client.solrj.io.stream.expr.Explanation;
-import org.apache.solr.client.solrj.io.stream.expr.Expressible;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
-import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.core.SolrCore;
-import org.apache.lucene.analysis.*;
-
-/**
- *  The classify expression retrieves a model trained by the train expression and uses it to classify documents from a stream
- *  Syntax:
- *  classif(model(...), anyStream(...), field="body")
- * @since 6.3.0
- **/
-
-public class ClassifyStream extends TupleStream implements Expressible {
-  private TupleStream docStream;
-  private TupleStream modelStream;
-
-  private String field;
-  private String analyzerField;
-  private Tuple  modelTuple;
-
-  Analyzer analyzer;
-  private Map<CharSequence, Integer> termToIndex;
-  private List<Double> idfs;
-  private List<Double> modelWeights;
-
-  public ClassifyStream(StreamExpression expression, StreamFactory factory) throws IOException {
-    List<StreamExpression> streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class);
-    if (streamExpressions.size() != 2) {
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting two stream but found %d",expression, streamExpressions.size()));
-    }
-
-    modelStream = factory.constructStream(streamExpressions.get(0));
-    docStream = factory.constructStream(streamExpressions.get(1));
-
-    StreamExpressionNamedParameter fieldParameter = factory.getNamedOperand(expression, "field");
-    if (fieldParameter == null) {
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - field parameter must be specified",expression, streamExpressions.size()));
-    }
-    analyzerField = field = fieldParameter.getParameter().toString();
-
-    StreamExpressionNamedParameter analyzerFieldParameter = factory.getNamedOperand(expression, "analyzerField");
-    if (analyzerFieldParameter != null) {
-      analyzerField = analyzerFieldParameter.getParameter().toString();
-    }
-  }
-
-  @Override
-  public void setStreamContext(StreamContext context) {
-    Object solrCoreObj = context.get("solr-core");
-    if (solrCoreObj == null || !(solrCoreObj instanceof SolrCore) ) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "StreamContext must have SolrCore in solr-core key");
-    }
-    analyzer = ((SolrCore) solrCoreObj).getLatestSchema().getFieldType(analyzerField).getIndexAnalyzer();
-
-    this.docStream.setStreamContext(context);
-    this.modelStream.setStreamContext(context);
-  }
-
-  @Override
-  public List<TupleStream> children() {
-    List<TupleStream> l = new ArrayList<>();
-    l.add(docStream);
-    l.add(modelStream);
-    return l;
-  }
-
-  @Override
-  public void open() throws IOException {
-    this.docStream.open();
-    this.modelStream.open();
-  }
-
-  @Override
-  public void close() throws IOException {
-    this.docStream.close();
-    this.modelStream.close();
-  }
-
-  @Override
-  public Tuple read() throws IOException {
-    if (modelTuple == null) {
-
-      modelTuple = modelStream.read();
-      if (modelTuple == null || modelTuple.EOF) {
-        throw new IOException("Model tuple not found for classify stream!");
-      }
-
-      termToIndex = new HashMap<>();
-
-      List<String> terms = modelTuple.getStrings("terms_ss");
-
-      for (int i = 0; i < terms.size(); i++) {
-        termToIndex.put(terms.get(i), i);
-      }
-
-      idfs = modelTuple.getDoubles("idfs_ds");
-      modelWeights = modelTuple.getDoubles("weights_ds");
-    }
-
-    Tuple docTuple = docStream.read();
-    if (docTuple.EOF) return docTuple;
-
-    String text = docTuple.getString(field);
-
-    double tfs[] = new double[termToIndex.size()];
-
-    TokenStream tokenStream = analyzer.tokenStream(analyzerField, text);
-    CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
-    tokenStream.reset();
-
-    int termCount = 0;
-    while (tokenStream.incrementToken()) {
-      termCount++;
-      if (termToIndex.containsKey(termAtt.toString())) {
-        tfs[termToIndex.get(termAtt.toString())]++;
-      }
-    }
-
-    tokenStream.end();
-    tokenStream.close();
-
-    List<Double> tfidfs = new ArrayList<>(termToIndex.size());
-    tfidfs.add(1.0);
-    for (int i = 0; i < tfs.length; i++) {
-      if (tfs[i] != 0) {
-        tfs[i] = 1 + Math.log(tfs[i]);
-      }
-      tfidfs.add(this.idfs.get(i) * tfs[i]);
-    }
-
-    double total = 0.0;
-    for (int i = 0; i < tfidfs.size(); i++) {
-      total += tfidfs.get(i) * modelWeights.get(i);
-    }
-
-    double score = total * ((float) (1.0 / Math.sqrt(termCount)));
-    double positiveProb = sigmoid(total);
-
-    docTuple.put("probability_d", positiveProb);
-    docTuple.put("score_d",  score);
-
-    return docTuple;
-  }
-
-  private double sigmoid(double in) {
-    double d = 1.0 / (1+Math.exp(-in));
-    return d;
-  }
-
-  @Override
-  public StreamComparator getStreamSort() {
-    return null;
-  }
-
-  @Override
-  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
-    return toExpression(factory, true);
-  }
-
-  private StreamExpression toExpression(StreamFactory factory, boolean includeStreams) throws IOException {
-    // function name
-    StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass()));
-
-    if (includeStreams) {
-      if (docStream instanceof Expressible && modelStream instanceof Expressible) {
-        expression.addParameter(((Expressible)modelStream).toExpression(factory));
-        expression.addParameter(((Expressible)docStream).toExpression(factory));
-      } else {
-        throw new IOException("This ClassifyStream contains a non-expressible TupleStream - it cannot be converted to an expression");
-      }
-    }
-
-    expression.addParameter(new StreamExpressionNamedParameter("field", field));
-    expression.addParameter(new StreamExpressionNamedParameter("analyzerField", analyzerField));
-
-    return expression;
-  }
-
-  @Override
-  public Explanation toExplanation(StreamFactory factory) throws IOException {
-    StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString());
-
-    explanation.setFunctionName(factory.getFunctionName(this.getClass()));
-    explanation.setImplementingClass(this.getClass().getName());
-    explanation.setExpressionType(Explanation.ExpressionType.STREAM_DECORATOR);
-    explanation.setExpression(toExpression(factory, false).toString());
-
-    explanation.addChild(docStream.toExplanation(factory));
-    explanation.addChild(modelStream.toExplanation(factory));
-
-    return explanation;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java
deleted file mode 100644
index 1859f04..0000000
--- a/solr/core/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.loader.ContentStreamLoader;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
-
-/**
- * Shares common code between various handlers that manipulate 
- * {@link org.apache.solr.common.util.ContentStream} objects.
- */
-public abstract class ContentStreamHandlerBase extends RequestHandlerBase {
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-
-    // Caching off by default
-    httpCaching = false;
-    if (args != null) {
-      Object caching = args.get("httpCaching");
-      if(caching!=null) {
-        httpCaching = Boolean.parseBoolean(caching.toString());
-      }
-    }
-  }
-  
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    SolrParams params = req.getParams();
-    UpdateRequestProcessorChain processorChain =
-        req.getCore().getUpdateProcessorChain(params);
-
-    UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp);
-
-    try {
-      ContentStreamLoader documentLoader = newLoader(req, processor);
-
-
-      Iterable<ContentStream> streams = req.getContentStreams();
-      if (streams == null) {
-        if (!RequestHandlerUtils.handleCommit(req, processor, params, false) && !RequestHandlerUtils.handleRollback(req, processor, params, false)) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "missing content stream");
-        }
-      } else {
-
-        for (ContentStream stream : streams) {
-          documentLoader.load(req, rsp, stream, processor);
-        }
-
-        // Perhaps commit from the parameters
-        RequestHandlerUtils.handleCommit(req, processor, params, false);
-        RequestHandlerUtils.handleRollback(req, processor, params, false);
-      }
-    } finally {
-      // finish the request
-      try {
-        processor.finish();
-      } finally {
-        processor.close();
-      }
-    }
-  }
-
-  protected abstract ContentStreamLoader newLoader(SolrQueryRequest req, UpdateRequestProcessor processor);
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/ContentStreamLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ContentStreamLoader.java b/solr/core/src/java/org/apache/solr/handler/ContentStreamLoader.java
deleted file mode 100644
index 8632eae..0000000
--- a/solr/core/src/java/org/apache/solr/handler/ContentStreamLoader.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-
-/**
- * Load a {@link org.apache.solr.common.util.ContentStream} into Solr
- *
- **/
-public abstract class ContentStreamLoader {
-
-  protected String errHeader;
-
-  public String getErrHeader() {
-    return errHeader;
-  }
-
-  public void setErrHeader(String errHeader) {
-    this.errHeader = errHeader;
-  }
-
-  /**
-   * Loaders are responsible for closing the stream
-   *
-   * @param req The input {@link org.apache.solr.request.SolrQueryRequest}
-   * @param rsp The response, in case the Loader wishes to add anything
-   * @param stream The {@link org.apache.solr.common.util.ContentStream} to add
-   */
-  public abstract void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception;
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
deleted file mode 100644
index 7f67981..0000000
--- a/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import javax.xml.stream.XMLInputFactory;
-import javax.xml.stream.XMLStreamConstants;
-import javax.xml.stream.XMLStreamException;
-import javax.xml.stream.XMLStreamReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Set;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.client.solrj.request.DocumentAnalysisRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.AnalysisParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.ContentStreamBase;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.XMLErrorLogger;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.common.EmptyEntityResolver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * An analysis handler that provides a breakdown of the analysis process of provided documents. This handler expects a
- * (single) content stream of the following format:
- * <br>
- * <pre><code>
- *  &lt;docs&gt;
- *      &lt;doc&gt;
- *          &lt;field name="id"&gt;1&lt;/field&gt;
- *          &lt;field name="name"&gt;The Name&lt;/field&gt;
- *          &lt;field name="text"&gt;The Text Value&lt;/field&gt;
- *      &lt;doc&gt;
- *      &lt;doc&gt;...&lt;/doc&gt;
- *      &lt;doc&gt;...&lt;/doc&gt;
- *      ...
- *  &lt;/docs&gt;
- * </code></pre>
- * <br>
- * <em><b>Note: Each document must contain a field which serves as the unique key. This key is used in the returned
- * response to associate an analysis breakdown to the analyzed document.</b></em>
- * <p>
- * Like the {@link org.apache.solr.handler.FieldAnalysisRequestHandler}, this handler also supports query analysis by
- * sending either an "analysis.query" or "q" request parameter that holds the query text to be analyzed. It also
- * supports the "analysis.showmatch" parameter which when set to {@code true}, all field tokens that match the query
- * tokens will be marked as a "match".
- *
- *
- * @since solr 1.4
- */
-public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final XMLErrorLogger xmllog = new XMLErrorLogger(log);
-
-  private XMLInputFactory inputFactory;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-
-    inputFactory = XMLInputFactory.newInstance();
-    EmptyEntityResolver.configureXMLInputFactory(inputFactory);
-    inputFactory.setXMLReporter(xmllog);
-    try {
-      // The java 1.6 bundled stax parser (sjsxp) does not currently have a thread-safe
-      // XMLInputFactory, as that implementation tries to cache and reuse the
-      // XMLStreamReader.  Setting the parser-specific "reuse-instance" property to false
-      // prevents this.
-      // All other known open-source stax parsers (and the bea ref impl)
-      // have thread-safe factories.
-      inputFactory.setProperty("reuse-instance", Boolean.FALSE);
-    } catch (IllegalArgumentException ex) {
-      // Other implementations will likely throw this exception since "reuse-instance"
-      // is implementation specific.
-      log.debug("Unable to set the 'reuse-instance' property for the input factory: " + inputFactory);
-    }
-  }
-
-  @Override
-  protected NamedList doAnalysis(SolrQueryRequest req) throws Exception {
-    DocumentAnalysisRequest analysisRequest = resolveAnalysisRequest(req);
-    return handleAnalysisRequest(analysisRequest, req.getSchema());
-  }
-
-  @Override
-  public String getDescription() {
-    return "Provides a breakdown of the analysis process of provided documents";
-  }
-
-
-  //================================================ Helper Methods ==================================================
-
-  /**
-   * Resolves the {@link DocumentAnalysisRequest} from the given solr request.
-   *
-   * @param req The solr request.
-   *
-   * @return The resolved document analysis request.
-   *
-   * @throws IOException        Thrown when reading/parsing the content stream of the request fails.
-   * @throws XMLStreamException Thrown when reading/parsing the content stream of the request fails.
-   */
-  DocumentAnalysisRequest resolveAnalysisRequest(SolrQueryRequest req) throws IOException, XMLStreamException {
-
-    DocumentAnalysisRequest request = new DocumentAnalysisRequest();
-
-    SolrParams params = req.getParams();
-
-    String query = params.get(AnalysisParams.QUERY, params.get(CommonParams.Q, null));
-    request.setQuery(query);
-
-    boolean showMatch = params.getBool(AnalysisParams.SHOW_MATCH, false);
-    request.setShowMatch(showMatch);
-
-    ContentStream stream = extractSingleContentStream(req);
-    InputStream is = null;
-    XMLStreamReader parser = null;
-    
-    try {
-      is = stream.getStream();
-      final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType());
-      parser = (charset == null) ?
-        inputFactory.createXMLStreamReader(is) : inputFactory.createXMLStreamReader(is, charset);
-
-      while (true) {
-        int event = parser.next();
-        switch (event) {
-          case XMLStreamConstants.END_DOCUMENT: {
-            parser.close();
-            return request;
-          }
-          case XMLStreamConstants.START_ELEMENT: {
-            String currTag = parser.getLocalName();
-            if ("doc".equals(currTag)) {
-              log.trace("Reading doc...");
-              SolrInputDocument document = readDocument(parser, req.getSchema());
-              request.addDocument(document);
-            }
-            break;
-          }
-        }
-      }
-
-    } finally {
-      if (parser != null) parser.close();
-      IOUtils.closeQuietly(is);
-    }
-  }
-
-  /**
-   * Handles the resolved {@link DocumentAnalysisRequest} and returns the analysis response as a named list.
-   *
-   * @param request The {@link DocumentAnalysisRequest} to be handled.
-   * @param schema  The index schema.
-   *
-   * @return The analysis response as a named list.
-   */
-  NamedList<Object> handleAnalysisRequest(DocumentAnalysisRequest request, IndexSchema schema) {
-
-    SchemaField uniqueKeyField = schema.getUniqueKeyField();
-    NamedList<Object> result = new SimpleOrderedMap<>();
-
-    for (SolrInputDocument document : request.getDocuments()) {
-
-      NamedList<NamedList> theTokens = new SimpleOrderedMap<>();
-      result.add(document.getFieldValue(uniqueKeyField.getName()).toString(), theTokens);
-      for (String name : document.getFieldNames()) {
-
-        // there's no point of providing analysis to unindexed fields.
-        SchemaField field = schema.getField(name);
-        if (!field.indexed()) {
-          continue;
-        }
-
-        NamedList<Object> fieldTokens = new SimpleOrderedMap<>();
-        theTokens.add(name, fieldTokens);
-
-        FieldType fieldType = schema.getFieldType(name);
-
-        final String queryValue = request.getQuery();
-        Set<BytesRef> termsToMatch;
-        try {
-          termsToMatch = (queryValue != null && request.isShowMatch())
-            ? getQueryTokenSet(queryValue, fieldType.getQueryAnalyzer())
-            : EMPTY_BYTES_SET;
-        } catch (Exception e) {
-          // ignore analysis exceptions since we are applying arbitrary text to all fields
-          termsToMatch = EMPTY_BYTES_SET;
-        }
-
-        if (request.getQuery() != null) {
-          try {
-            AnalysisContext analysisContext = new AnalysisContext(fieldType, fieldType.getQueryAnalyzer(), EMPTY_BYTES_SET);
-            fieldTokens.add("query", analyzeValue(request.getQuery(), analysisContext));
-          } catch (Exception e) {
-            // ignore analysis exceptions since we are applying arbitrary text to all fields
-          }
-        }
-
-        Analyzer analyzer = fieldType.getIndexAnalyzer();
-        AnalysisContext analysisContext = new AnalysisContext(fieldType, analyzer, termsToMatch);
-        Collection<Object> fieldValues = document.getFieldValues(name);
-        NamedList<NamedList<? extends Object>> indexTokens 
-          = new SimpleOrderedMap<>();
-        for (Object fieldValue : fieldValues) {
-          indexTokens.add(String.valueOf(fieldValue), 
-                          analyzeValue(fieldValue.toString(), analysisContext));
-        }
-        fieldTokens.add("index", indexTokens);
-      }
-    }
-
-    return result;
-  }
-
-  /**
-   * Reads the document from the given xml stream reader. The following document format is expected:
-   * <p/>
-   * <pre><code>
-   * &lt;doc&gt;
-   *    &lt;field name="id"&gt;1&lt;/field&gt;
-   *    &lt;field name="name"&gt;The Name&lt;/field&gt;
-   *    &lt;field name="text"&gt;The Text Value&lt;/field&gt;
-   * &lt;/doc&gt;
-   * </code></pre>
-   * <p/>
-   * <p/>
-   * <em>NOTE: each read document is expected to have at least one field which serves as the unique key.</em>
-   *
-   * @param reader The {@link XMLStreamReader} from which the document will be read.
-   * @param schema The index schema. The schema is used to validate that the read document has a unique key field.
-   *
-   * @return The read document.
-   *
-   * @throws XMLStreamException When reading of the document fails.
-   */
-  SolrInputDocument readDocument(XMLStreamReader reader, IndexSchema schema) throws XMLStreamException {
-    SolrInputDocument doc = new SolrInputDocument();
-
-    String uniqueKeyField = schema.getUniqueKeyField().getName();
-
-    StringBuilder text = new StringBuilder();
-    String fieldName = null;
-    boolean hasId = false;
-
-    while (true) {
-      int event = reader.next();
-      switch (event) {
-        // Add everything to the text
-        case XMLStreamConstants.SPACE:
-        case XMLStreamConstants.CDATA:
-        case XMLStreamConstants.CHARACTERS:
-          text.append(reader.getText());
-          break;
-
-        case XMLStreamConstants.END_ELEMENT:
-          if ("doc".equals(reader.getLocalName())) {
-            if (!hasId) {
-              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                      "All documents must contain a unique key value: '" + doc.toString() + "'");
-            }
-            return doc;
-          } else if ("field".equals(reader.getLocalName())) {
-            doc.addField(fieldName, text.toString());
-            if (uniqueKeyField.equals(fieldName)) {
-              hasId = true;
-            }
-          }
-          break;
-
-        case XMLStreamConstants.START_ELEMENT:
-          text.setLength(0);
-          String localName = reader.getLocalName();
-          if (!"field".equals(localName)) {
-            log.warn("unexpected XML tag doc/" + localName);
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unexpected XML tag doc/" + localName);
-          }
-
-          for (int i = 0; i < reader.getAttributeCount(); i++) {
-            String attrName = reader.getAttributeLocalName(i);
-            if (NAME.equals(attrName)) {
-              fieldName = reader.getAttributeValue(i);
-            }
-          }
-          break;
-      }
-    }
-  }
-
-  /**
-   * Extracts the only content stream from the request. {@link org.apache.solr.common.SolrException.ErrorCode#BAD_REQUEST}
-   * error is thrown if the request doesn't hold any content stream or holds more than one.
-   *
-   * @param req The solr request.
-   *
-   * @return The single content stream which holds the documents to be analyzed.
-   */
-  private ContentStream extractSingleContentStream(SolrQueryRequest req) {
-    Iterable<ContentStream> streams = req.getContentStreams();
-    String exceptionMsg = "DocumentAnalysisRequestHandler expects a single content stream with documents to analyze";
-    if (streams == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, exceptionMsg);
-    }
-    Iterator<ContentStream> iter = streams.iterator();
-    if (!iter.hasNext()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, exceptionMsg);
-    }
-    ContentStream stream = iter.next();
-    if (iter.hasNext()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, exceptionMsg);
-    }
-    return stream;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java
deleted file mode 100644
index d7d5b71..0000000
--- a/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DumpRequestHandler extends RequestHandlerBase
-{
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException 
-  {
-    // Show params
-    rsp.add( "params", req.getParams().toNamedList() );
-    String[] parts = req.getParams().getParams("urlTemplateValues");
-    if (parts != null && parts.length > 0) {
-      Map map = new LinkedHashMap<>();
-      rsp.getValues().add("urlTemplateValues", map);
-      for (String part : parts) {
-        map.put(part, req.getPathTemplateValues().get(part));
-      }
-    }
-
-    String[] returnParams = req.getParams().getParams("param");
-    if(returnParams !=null) {
-      NamedList params = (NamedList) rsp.getValues().get("params");
-      for (String returnParam : returnParams) {
-        String[] vals = req.getParams().getParams(returnParam);
-        if(vals != null){
-          if (vals.length == 1) {
-            params.add(returnParam, vals[0]);
-          } else {
-            params.add(returnParam, vals);
-          }
-
-        }
-
-      }
-    }
-
-    if(req.getParams().getBool("getdefaults", false)){
-      NamedList def = (NamedList) initArgs.get(PluginInfo.DEFAULTS);
-      rsp.add("getdefaults", def);
-    }
-
-
-    if(req.getParams().getBool("initArgs", false)) {
-      rsp.add("initArgs", initArgs);
-    }
-        
-    // Write the streams...
-    if( req.getContentStreams() != null ) {
-      ArrayList<NamedList<Object>> streams = new ArrayList<>();
-      // Cycle through each stream
-      for( ContentStream content : req.getContentStreams() ) {
-        NamedList<Object> stream = new SimpleOrderedMap<>();
-        stream.add(NAME, content.getName());
-        stream.add( "sourceInfo", content.getSourceInfo() );
-        stream.add( "size", content.getSize() );
-        stream.add( "contentType", content.getContentType() );
-        Reader reader = content.getReader();
-        try {
-          stream.add( "stream", IOUtils.toString(reader) );
-        } finally {
-          reader.close();
-        }
-        streams.add( stream );
-      }
-      rsp.add( "streams", streams );
-    }
-
-    rsp.add("context", req.getContext());
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Dump handler (debug)";
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String subPath) {
-    if(subpaths !=null && subpaths.contains(subPath)) return this;
-    return null;
-  }
-  private List<String> subpaths;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    if(args !=null) {
-      NamedList nl = (NamedList) args.get(PluginInfo.DEFAULTS);
-      if(nl!=null) subpaths = nl.getAll("subpath");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/ExportHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ExportHandler.java b/solr/core/src/java/org/apache/solr/handler/ExportHandler.java
deleted file mode 100644
index ea9239d..0000000
--- a/solr/core/src/java/org/apache/solr/handler/ExportHandler.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler;
-
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.handler.component.SearchHandler;
-import org.apache.solr.handler.export.ExportWriter;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-import static org.apache.solr.common.params.CommonParams.JSON;
-
-public class ExportHandler extends SearchHandler {
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    try {
-      super.handleRequestBody(req, rsp);
-    } catch (Exception e) {
-      rsp.setException(e);
-    }
-    String wt = req.getParams().get(CommonParams.WT, JSON);
-    if("xsort".equals(wt)) wt = JSON;
-    Map<String, String> map = new HashMap<>(1);
-    map.put(CommonParams.WT, ReplicationHandler.FILE_STREAM);
-    req.setParams(SolrParams.wrapDefaults(new MapSolrParams(map),req.getParams()));
-    rsp.add(ReplicationHandler.FILE_STREAM, new ExportWriter(req, rsp, wt));
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java
deleted file mode 100644
index a7e1ab9..0000000
--- a/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.client.solrj.request.FieldAnalysisRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.AnalysisParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.commons.io.IOUtils;
-
-import java.io.Reader;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Set;
-
-/**
- * Provides the ability to specify multiple field types and field names in the same request. Expected parameters:
- * <table border="1" summary="table of parameters">
- * <tr>
- * <th align="left">Name</th>
- * <th align="left">Type</th>
- * <th align="left">required</th>
- * <th align="left">Description</th>
- * <th align="left">Multi-valued</th>
- * </tr>
- * <tr>
- * <td>analysis.fieldname</td>
- * <td>string</td>
- * <td>no</td>
- * <td>When present, the text will be analyzed based on the type of this field name.</td>
- * <td>Yes, this parameter may hold a comma-separated list of values and the analysis will be performed for each of the specified fields</td>
- * </tr>
- * <tr>
- * <td>analysis.fieldtype</td>
- * <td>string</td>
- * <td>no</td>
- * <td>When present, the text will be analyzed based on the specified type</td>
- * <td>Yes, this parameter may hold a comma-separated list of values and the analysis will be performed for each of the specified field types</td>
- * </tr>
- * <tr>
- * <td>analysis.fieldvalue</td>
- * <td>string</td>
- * <td>no</td>
- * <td>The text that will be analyzed. The analysis will mimic the index-time analysis.</td>
- * <td>No</td>
- * </tr>
- * <tr>
- * <td>{@code analysis.query} OR {@code q}</td>
- * <td>string</td>
- * <td>no</td>
- * <td>When present, the text that will be analyzed. The analysis will mimic the query-time analysis. Note that the
- * {@code analysis.query} parameter as precedes the {@code q} parameters.</td>
- * <td>No</td>
- * </tr>
- * <tr>
- * <td>analysis.showmatch</td>
- * <td>boolean</td>
- * <td>no</td>
- * <td>When set to {@code true} and when query analysis is performed, the produced tokens of the field value
- * analysis will be marked as "matched" for every token that is produces by the query analysis</td>
- * <td>No</td>
- * </tr>
- * </table>
- * <p>Note that if neither analysis.fieldname and analysis.fieldtype is specified, then the default search field's
- * analyzer is used.</p>
- * <p>Note that if one of analysis.value or analysis.query or q must be specified</p>
- *
- * @since solr 1.4 
- */
-public class FieldAnalysisRequestHandler extends AnalysisRequestHandlerBase {
-
-  @Override
-  protected NamedList doAnalysis(SolrQueryRequest req) throws Exception {
-    FieldAnalysisRequest analysisRequest = resolveAnalysisRequest(req);
-    IndexSchema indexSchema = req.getSchema();
-    return handleAnalysisRequest(analysisRequest, indexSchema);
-  }
-
-  @Override
-  public String getDescription() {
-    return "Provide a breakdown of the analysis process of field/query text";
-  }
-
-  // ================================================= Helper methods ================================================
-
-  /**
-   * Resolves the AnalysisRequest based on the parameters in the given SolrParams.
-   *
-   * @param req the request
-   *
-   * @return AnalysisRequest containing all the information about what needs to be analyzed, and using what
-   *         fields/types
-   */
-  FieldAnalysisRequest resolveAnalysisRequest(SolrQueryRequest req) throws SolrException {
-    SolrParams solrParams = req.getParams();
-    FieldAnalysisRequest analysisRequest = new FieldAnalysisRequest();
-
-    boolean useDefaultSearchField = true;
-    if (solrParams.get(AnalysisParams.FIELD_TYPE) != null) {
-      analysisRequest.setFieldTypes(Arrays.asList(solrParams.get(AnalysisParams.FIELD_TYPE).split(",")));
-      useDefaultSearchField = false;
-    }
-    if (solrParams.get(AnalysisParams.FIELD_NAME) != null) {
-      analysisRequest.setFieldNames(Arrays.asList(solrParams.get(AnalysisParams.FIELD_NAME).split(",")));
-      useDefaultSearchField = false;
-    }
-    if (useDefaultSearchField) {
-      if (solrParams.get(CommonParams.DF) != null) {
-        analysisRequest.addFieldName(solrParams.get(CommonParams.DF));
-      } else {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Field analysis request must contain one of analysis.fieldtype, analysis.fieldname or df.");
-      }
-    }
-    analysisRequest.setQuery(solrParams.get(AnalysisParams.QUERY, solrParams.get(CommonParams.Q)));
-
-    String value = solrParams.get(AnalysisParams.FIELD_VALUE);
-    if (analysisRequest.getQuery() == null && value == null)  {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "One of analysis.fieldvalue, q, or analysis.query parameters must be specified");
-    }
-
-    Iterable<ContentStream> streams = req.getContentStreams();
-    if (streams != null) {
-      // NOTE: Only the first content stream is currently processed
-      for (ContentStream stream : streams) {
-        Reader reader = null;
-        try {
-          reader = stream.getReader();
-          value = IOUtils.toString(reader);
-        } catch (IOException e) {
-          // do nothing, leave value set to the request parameter
-        }
-        finally {
-          IOUtils.closeQuietly(reader);
-        }
-        break;
-      }
-    }
-
-    analysisRequest.setFieldValue(value);
-    analysisRequest.setShowMatch(solrParams.getBool(AnalysisParams.SHOW_MATCH, false));
-    return analysisRequest;
-  }
-
-  /**
-   * Handles the resolved analysis request and returns the analysis breakdown response as a named list.
-   *
-   * @param request The request to handle.
-   * @param schema  The index schema.
-   *
-   * @return The analysis breakdown as a named list.
-   */
-  protected NamedList<NamedList> handleAnalysisRequest(FieldAnalysisRequest request, IndexSchema schema) {
-    NamedList<NamedList> analysisResults = new SimpleOrderedMap<>();
-
-    NamedList<NamedList> fieldTypeAnalysisResults = new SimpleOrderedMap<>();
-    if (request.getFieldTypes() != null)  {
-      for (String fieldTypeName : request.getFieldTypes()) {
-        FieldType fieldType = schema.getFieldTypes().get(fieldTypeName);
-        fieldTypeAnalysisResults.add(fieldTypeName, analyzeValues(request, fieldType, null));
-      }
-    }
-
-    NamedList<NamedList> fieldNameAnalysisResults = new SimpleOrderedMap<>();
-    if (request.getFieldNames() != null)  {
-      for (String fieldName : request.getFieldNames()) {
-        FieldType fieldType = schema.getFieldType(fieldName);
-        fieldNameAnalysisResults.add(fieldName, analyzeValues(request, fieldType, fieldName));
-      }
-    }
-
-    analysisResults.add("field_types", fieldTypeAnalysisResults);
-    analysisResults.add("field_names", fieldNameAnalysisResults);
-
-    return analysisResults;
-  }
-
-  /**
-   * Analyzes the index value (if it exists) and the query value (if it exists) in the given AnalysisRequest, using
-   * the Analyzers of the given field type.
-   *
-   * @param analysisRequest AnalysisRequest from where the index and query values will be taken
-   * @param fieldType       Type of field whose analyzers will be used
-   * @param fieldName       Name of the field to be analyzed.  Can be {@code null}
-   *
-   * @return NamedList containing the tokens produced by the analyzers of the given field, separated into an index and
-   *         a query group
-   */ // package access for testing
-  NamedList<NamedList> analyzeValues(FieldAnalysisRequest analysisRequest, FieldType fieldType, String fieldName) {
-
-    final String queryValue = analysisRequest.getQuery();
-    final Set<BytesRef> termsToMatch = (queryValue != null && analysisRequest.isShowMatch())
-      ? getQueryTokenSet(queryValue, fieldType.getQueryAnalyzer())
-      : EMPTY_BYTES_SET;
-
-    NamedList<NamedList> analyzeResults = new SimpleOrderedMap<>();
-    if (analysisRequest.getFieldValue() != null) {
-      AnalysisContext context = new AnalysisContext(fieldName, fieldType, fieldType.getIndexAnalyzer(), termsToMatch);
-      NamedList analyzedTokens = analyzeValue(analysisRequest.getFieldValue(), context);
-      analyzeResults.add("index", analyzedTokens);
-    }
-    if (analysisRequest.getQuery() != null) {
-      AnalysisContext context = new AnalysisContext(fieldName, fieldType, fieldType.getQueryAnalyzer());
-      NamedList analyzedTokens = analyzeValue(analysisRequest.getQuery(), context);
-      analyzeResults.add("query", analyzedTokens);
-    }
-
-    return analyzeResults;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java b/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
deleted file mode 100644
index ed5ae0a..0000000
--- a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.comp.StreamComparator;
-import org.apache.solr.client.solrj.io.graph.Traversal;
-import org.apache.solr.client.solrj.io.stream.*;
-import org.apache.solr.client.solrj.io.stream.expr.DefaultStreamFactory;
-import org.apache.solr.client.solrj.io.stream.expr.Explanation;
-import org.apache.solr.client.solrj.io.stream.expr.Expressible;
-import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * @since 6.1.0
- */
-public class GraphHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider {
-
-  private StreamFactory streamFactory = new DefaultStreamFactory();
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private String coreName;
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext request) {
-    return PermissionNameProvider.Name.READ_PERM;
-  }
-
-  public void inform(SolrCore core) {
-
-    /* The stream factory will always contain the zkUrl for the given collection
-     * Adds default streams with their corresponding function names. These
-     * defaults can be overridden or added to in the solrConfig in the stream
-     * RequestHandler def. Example config override
-     *  <lst name="streamFunctions">
-     *    <str name="group">org.apache.solr.client.solrj.io.stream.ReducerStream</str>
-     *    <str name="count">org.apache.solr.client.solrj.io.stream.RecordCountStream</str>
-     *  </lst>
-     * */
-
-    String defaultCollection;
-    String defaultZkhost;
-    CoreContainer coreContainer = core.getCoreContainer();
-    this.coreName = core.getName();
-
-    if(coreContainer.isZooKeeperAware()) {
-      defaultCollection = core.getCoreDescriptor().getCollectionName();
-      defaultZkhost = core.getCoreContainer().getZkController().getZkServerAddress();
-      streamFactory.withCollectionZkHost(defaultCollection, defaultZkhost);
-      streamFactory.withDefaultZkHost(defaultZkhost);
-    }
-
-    // This pulls all the overrides and additions from the config
-    Object functionMappingsObj = initArgs.get("streamFunctions");
-    if(null != functionMappingsObj){
-      NamedList<?> functionMappings = (NamedList<?>)functionMappingsObj;
-      for(Entry<String,?> functionMapping : functionMappings){
-        Class<? extends Expressible> clazz = core.getResourceLoader().findClass((String)functionMapping.getValue(),
-            Expressible.class);
-        streamFactory.withFunctionName(functionMapping.getKey(), clazz);
-      }
-    }
-  }
-
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    SolrParams params = req.getParams();
-    params = adjustParams(params);
-    req.setParams(params);
-
-
-    TupleStream tupleStream = null;
-
-    try {
-      tupleStream = this.streamFactory.constructStream(params.get("expr"));
-    } catch (Exception e) {
-      //Catch exceptions that occur while the stream is being created. This will include streaming expression parse rules.
-      SolrException.log(log, e);
-      Map requestContext = req.getContext();
-      requestContext.put("stream", new DummyErrorStream(e));
-      return;
-    }
-
-    StreamContext context = new StreamContext();
-    context.setSolrClientCache(StreamHandler.clientCache);
-    context.put("core", this.coreName);
-    Traversal traversal = new Traversal();
-    context.put("traversal", traversal);
-    tupleStream.setStreamContext(context);
-    Map requestContext = req.getContext();
-    requestContext.put("stream", new TimerStream(new ExceptionStream(tupleStream)));
-    requestContext.put("traversal", traversal);
-  }
-
-  public String getDescription() {
-    return "StreamHandler";
-  }
-
-  public String getSource() {
-    return null;
-  }
-
-
-  public static class DummyErrorStream extends TupleStream {
-    private Exception e;
-
-    public DummyErrorStream(Exception e) {
-      this.e = e;
-    }
-    public StreamComparator getStreamSort() {
-      return null;
-    }
-
-    public void close() {
-    }
-
-    public void open() {
-    }
-
-    public Exception getException() {
-      return this.e;
-    }
-
-    public void setStreamContext(StreamContext context) {
-    }
-
-    public List<TupleStream> children() {
-      return null;
-    }
-
-    @Override
-    public Explanation toExplanation(StreamFactory factory) throws IOException {
-      return null;
-    }
-
-    public Tuple read() {
-      String msg = e.getMessage();
-      Map m = new HashMap();
-      m.put("EOF", true);
-      m.put("EXCEPTION", msg);
-      return new Tuple(m);
-    }
-  }
-
-
-  private SolrParams adjustParams(SolrParams params) {
-    ModifiableSolrParams adjustedParams = new ModifiableSolrParams();
-    adjustedParams.add(params);
-    adjustedParams.add(CommonParams.OMIT_HEADER, "true");
-    return adjustedParams;
-  }
-
-  public static class TimerStream extends TupleStream {
-
-    private long begin;
-    private TupleStream tupleStream;
-
-    public TimerStream(TupleStream tupleStream) {
-      this.tupleStream = tupleStream;
-    }
-
-    public StreamComparator getStreamSort() {
-      return this.tupleStream.getStreamSort();
-    }
-
-    public void close() throws IOException {
-      this.tupleStream.close();
-    }
-
-    public void open() throws IOException {
-      this.begin = System.nanoTime();
-      this.tupleStream.open();
-    }
-
-    public void setStreamContext(StreamContext context) {
-      this.tupleStream.setStreamContext(context);
-    }
-
-    public List<TupleStream> children() {
-      return this.tupleStream.children();
-    }
-
-    @Override
-    public Explanation toExplanation(StreamFactory factory) throws IOException {
-      return null;
-    }
-
-    public Tuple read() throws IOException {
-      Tuple tuple = this.tupleStream.read();
-      if(tuple.EOF) {
-        long totalTime = (System.nanoTime() - begin) / 1000000;
-        tuple.fields.put("RESPONSE_TIME", totalTime);
-      }
-      return tuple;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/HaversineMetersEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/HaversineMetersEvaluator.java b/solr/core/src/java/org/apache/solr/handler/HaversineMetersEvaluator.java
deleted file mode 100644
index 2e30555..0000000
--- a/solr/core/src/java/org/apache/solr/handler/HaversineMetersEvaluator.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-
-import org.apache.commons.math3.exception.DimensionMismatchException;
-import org.apache.commons.math3.ml.distance.DistanceMeasure;
-import org.apache.lucene.util.SloppyMath;
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.eval.RecursiveEvaluator;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
-import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
-
-public class HaversineMetersEvaluator extends RecursiveEvaluator {
-  protected static final long serialVersionUID = 1L;
-
-  public HaversineMetersEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
-    super(expression, factory);
-  }
-
-
-  @Override
-  public Object evaluate(Tuple tuple) throws IOException {
-    return new HaversineDistance();
-  }
-
-  @Override
-  public Object doWork(Object... values) throws IOException {
-    // Nothing to do here
-    throw new IOException("This call should never occur");
-  }
-
-  public static class HaversineDistance implements DistanceMeasure {
-    private static final long serialVersionUID = -9108154600539125566L;
-
-    public HaversineDistance() {
-    }
-
-    public double compute(double[] a, double[] b) throws DimensionMismatchException {
-      return SloppyMath.haversinMeters(a[0], a[1], b[0], b[1]);
-    }
-  }
-
-}


[35/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
deleted file mode 100644
index 81d56d3..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.AtomicDouble;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.metrics.SolrCoreMetricManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Trigger for the {@link org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType#SEARCHRATE} event.
- */
-public class SearchRateTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String COLLECTIONS_PROP = "collections";
-  public static final String METRIC_PROP = "metric";
-  public static final String MAX_OPS_PROP = "maxOps";
-  public static final String MIN_REPLICAS_PROP = "minReplicas";
-  public static final String ABOVE_RATE_PROP = "aboveRate";
-  public static final String BELOW_RATE_PROP = "belowRate";
-  public static final String ABOVE_NODE_RATE_PROP = "aboveNodeRate";
-  public static final String BELOW_NODE_RATE_PROP = "belowNodeRate";
-  public static final String ABOVE_OP_PROP = "aboveOp";
-  public static final String BELOW_OP_PROP = "belowOp";
-  public static final String ABOVE_NODE_OP_PROP = "aboveNodeOp";
-  public static final String BELOW_NODE_OP_PROP = "belowNodeOp";
-
-  // back-compat
-  public static final String BC_COLLECTION_PROP = "collection";
-  public static final String BC_RATE_PROP = "rate";
-
-
-  public static final String HOT_NODES = "hotNodes";
-  public static final String HOT_COLLECTIONS = "hotCollections";
-  public static final String HOT_SHARDS = "hotShards";
-  public static final String HOT_REPLICAS = "hotReplicas";
-  public static final String COLD_NODES = "coldNodes";
-  public static final String COLD_COLLECTIONS = "coldCollections";
-  public static final String COLD_SHARDS = "coldShards";
-  public static final String COLD_REPLICAS = "coldReplicas";
-  public static final String VIOLATION_PROP = "violationType";
-
-  public static final int DEFAULT_MAX_OPS = 3;
-  public static final String DEFAULT_METRIC = "QUERY./select.requestTimes:1minRate";
-
-  private String metric;
-  private int maxOps;
-  private Integer minReplicas = null;
-  private final Set<String> collections = new HashSet<>();
-  private String shard;
-  private String node;
-  private double aboveRate;
-  private double belowRate;
-  private double aboveNodeRate;
-  private double belowNodeRate;
-  private CollectionParams.CollectionAction aboveOp, belowOp, aboveNodeOp, belowNodeOp;
-  private final Map<String, Long> lastCollectionEvent = new ConcurrentHashMap<>();
-  private final Map<String, Long> lastNodeEvent = new ConcurrentHashMap<>();
-  private final Map<String, Long> lastShardEvent = new ConcurrentHashMap<>();
-  private final Map<String, Long> lastReplicaEvent = new ConcurrentHashMap<>();
-  private final Map<String, Object> state = new HashMap<>();
-
-  public SearchRateTrigger(String name) {
-    super(TriggerEventType.SEARCHRATE, name);
-    this.state.put("lastCollectionEvent", lastCollectionEvent);
-    this.state.put("lastNodeEvent", lastNodeEvent);
-    this.state.put("lastShardEvent", lastShardEvent);
-    this.state.put("lastReplicaEvent", lastReplicaEvent);
-    TriggerUtils.validProperties(validProperties,
-        COLLECTIONS_PROP, AutoScalingParams.SHARD, AutoScalingParams.NODE,
-        METRIC_PROP,
-        MAX_OPS_PROP,
-        MIN_REPLICAS_PROP,
-        ABOVE_OP_PROP,
-        BELOW_OP_PROP,
-        ABOVE_NODE_OP_PROP,
-        BELOW_NODE_OP_PROP,
-        ABOVE_RATE_PROP,
-        BELOW_RATE_PROP,
-        ABOVE_NODE_RATE_PROP,
-        BELOW_NODE_RATE_PROP,
-        // back-compat props
-        BC_COLLECTION_PROP,
-        BC_RATE_PROP);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    // parse config options
-    String collectionsStr = (String)properties.get(COLLECTIONS_PROP);
-    if (collectionsStr != null) {
-      collections.addAll(StrUtils.splitSmart(collectionsStr, ','));
-    }
-    // check back-compat collection prop
-    collectionsStr = (String)properties.get(BC_COLLECTION_PROP);
-    if (collectionsStr != null) {
-      if (!collectionsStr.equals(Policy.ANY)) {
-        collections.add(collectionsStr);
-      }
-    }
-    shard = (String)properties.getOrDefault(AutoScalingParams.SHARD, Policy.ANY);
-    if (!shard.equals(Policy.ANY) && (collections.isEmpty() || collections.size() > 1)) {
-      throw new TriggerValidationException(name, AutoScalingParams.SHARD, "When 'shard' is other than #ANY then exactly one collection name must be set");
-    }
-    node = (String)properties.getOrDefault(AutoScalingParams.NODE, Policy.ANY);
-    metric = (String)properties.getOrDefault(METRIC_PROP, DEFAULT_METRIC);
-
-    String maxOpsStr = String.valueOf(properties.getOrDefault(MAX_OPS_PROP, DEFAULT_MAX_OPS));
-    try {
-      maxOps = Integer.parseInt(maxOpsStr);
-    } catch (Exception e) {
-      throw new TriggerValidationException(name, MAX_OPS_PROP, "invalid value '" + maxOpsStr + "': " + e.toString());
-    }
-
-    Object o = properties.get(MIN_REPLICAS_PROP);
-    if (o != null) {
-      try {
-        minReplicas = Integer.parseInt(o.toString());
-        if (minReplicas < 1) {
-          throw new Exception("must be at least 1, or not set to use 'replicationFactor'");
-        }
-      } catch (Exception e) {
-        throw new TriggerValidationException(name, MIN_REPLICAS_PROP, "invalid value '" + o + "': " + e.toString());
-      }
-    }
-
-    Object above = properties.get(ABOVE_RATE_PROP);
-    Object below = properties.get(BELOW_RATE_PROP);
-    // back-compat rate prop
-    if (properties.containsKey(BC_RATE_PROP)) {
-      above = properties.get(BC_RATE_PROP);
-    }
-    if (above == null && below == null) {
-      throw new TriggerValidationException(name, ABOVE_RATE_PROP, "at least one of '" +
-      ABOVE_RATE_PROP + "' or '" + BELOW_RATE_PROP + "' must be set");
-    }
-    if (above != null) {
-      try {
-        aboveRate = Double.parseDouble(String.valueOf(above));
-      } catch (Exception e) {
-        throw new TriggerValidationException(name, ABOVE_RATE_PROP, "Invalid configuration value: '" + above + "': " + e.toString());
-      }
-    } else {
-      aboveRate = Double.MAX_VALUE;
-    }
-    if (below != null) {
-      try {
-        belowRate = Double.parseDouble(String.valueOf(below));
-      } catch (Exception e) {
-        throw new TriggerValidationException(name, BELOW_RATE_PROP, "Invalid configuration value: '" + below + "': " + e.toString());
-      }
-    } else {
-      belowRate = -1;
-    }
-
-    // node rates
-    above = properties.get(ABOVE_NODE_RATE_PROP);
-    below = properties.get(BELOW_NODE_RATE_PROP);
-    if (above != null) {
-      try {
-        aboveNodeRate = Double.parseDouble(String.valueOf(above));
-      } catch (Exception e) {
-        throw new TriggerValidationException(name, ABOVE_NODE_RATE_PROP, "Invalid configuration value: '" + above + "': " + e.toString());
-      }
-    } else {
-      aboveNodeRate = Double.MAX_VALUE;
-    }
-    if (below != null) {
-      try {
-        belowNodeRate = Double.parseDouble(String.valueOf(below));
-      } catch (Exception e) {
-        throw new TriggerValidationException(name, BELOW_NODE_RATE_PROP, "Invalid configuration value: '" + below + "': " + e.toString());
-      }
-    } else {
-      belowNodeRate = -1;
-    }
-
-    String aboveOpStr = String.valueOf(properties.getOrDefault(ABOVE_OP_PROP, CollectionParams.CollectionAction.ADDREPLICA.toLower()));
-    String belowOpStr = String.valueOf(properties.getOrDefault(BELOW_OP_PROP, CollectionParams.CollectionAction.DELETEREPLICA.toLower()));
-    aboveOp = CollectionParams.CollectionAction.get(aboveOpStr);
-    if (aboveOp == null) {
-      throw new TriggerValidationException(getName(), ABOVE_OP_PROP, "unrecognized value: '" + aboveOpStr + "'");
-    }
-    belowOp = CollectionParams.CollectionAction.get(belowOpStr);
-    if (belowOp == null) {
-      throw new TriggerValidationException(getName(), BELOW_OP_PROP, "unrecognized value: '" + belowOpStr + "'");
-    }
-    Object aboveNodeObj = properties.getOrDefault(ABOVE_NODE_OP_PROP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-    // do NOT set the default to DELETENODE
-    Object belowNodeObj = properties.get(BELOW_NODE_OP_PROP);
-    try {
-      aboveNodeOp = CollectionParams.CollectionAction.get(String.valueOf(aboveNodeObj));
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), ABOVE_NODE_OP_PROP, "unrecognized value: '" + aboveNodeObj + "'");
-    }
-    if (belowNodeObj != null) {
-      try {
-        belowNodeOp = CollectionParams.CollectionAction.get(String.valueOf(belowNodeObj));
-      } catch (Exception e) {
-        throw new TriggerValidationException(getName(), BELOW_NODE_OP_PROP, "unrecognized value: '" + belowNodeObj + "'");
-      }
-    }
-  }
-
-  @VisibleForTesting
-  Map<String, Object> getConfig() {
-    Map<String, Object> config = new HashMap<>();
-    config.put("name", name);
-    config.put(COLLECTIONS_PROP, collections);
-    config.put(AutoScalingParams.SHARD, shard);
-    config.put(AutoScalingParams.NODE, node);
-    config.put(METRIC_PROP, metric);
-    config.put(MAX_OPS_PROP, maxOps);
-    config.put(MIN_REPLICAS_PROP, minReplicas);
-    config.put(ABOVE_RATE_PROP, aboveRate);
-    config.put(BELOW_RATE_PROP, belowRate);
-    config.put(ABOVE_NODE_RATE_PROP, aboveNodeRate);
-    config.put(BELOW_NODE_RATE_PROP, belowNodeRate);
-    config.put(ABOVE_OP_PROP, aboveOp);
-    config.put(ABOVE_NODE_OP_PROP, aboveNodeOp);
-    config.put(BELOW_OP_PROP, belowOp);
-    config.put(BELOW_NODE_OP_PROP, belowNodeOp);
-    return config;
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    lastCollectionEvent.clear();
-    lastNodeEvent.clear();
-    lastShardEvent.clear();
-    lastReplicaEvent.clear();
-    Map<String, Long> collTimes = (Map<String, Long>)state.get("lastCollectionEvent");
-    if (collTimes != null) {
-      lastCollectionEvent.putAll(collTimes);
-    }
-    Map<String, Long> nodeTimes = (Map<String, Long>)state.get("lastNodeEvent");
-    if (nodeTimes != null) {
-      lastNodeEvent.putAll(nodeTimes);
-    }
-    Map<String, Long> shardTimes = (Map<String, Long>)state.get("lastShardEvent");
-    if (shardTimes != null) {
-      lastShardEvent.putAll(shardTimes);
-    }
-    Map<String, Long> replicaTimes = (Map<String, Long>)state.get("lastReplicaEvent");
-    if (replicaTimes != null) {
-      lastReplicaEvent.putAll(replicaTimes);
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof SearchRateTrigger) {
-      SearchRateTrigger that = (SearchRateTrigger)old;
-      assert this.name.equals(that.name);
-      this.lastCollectionEvent.clear();
-      this.lastNodeEvent.clear();
-      this.lastShardEvent.clear();
-      this.lastReplicaEvent.clear();
-      this.lastCollectionEvent.putAll(that.lastCollectionEvent);
-      this.lastNodeEvent.putAll(that.lastNodeEvent);
-      this.lastShardEvent.putAll(that.lastShardEvent);
-      this.lastReplicaEvent.putAll(that.lastReplicaEvent);
-    } else {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-
-  }
-
-  @Override
-  public void run() {
-    AutoScaling.TriggerEventProcessor processor = processorRef.get();
-    if (processor == null) {
-      return;
-    }
-
-    // collection, shard, list(replica + rate)
-    Map<String, Map<String, List<ReplicaInfo>>> collectionRates = new HashMap<>();
-    // node, rate
-    Map<String, AtomicDouble> nodeRates = new HashMap<>();
-    // this replication factor only considers replica types that are searchable
-    // collection, shard, RF
-    Map<String, Map<String, AtomicInteger>> searchableReplicationFactors = new HashMap<>();
-
-    ClusterState clusterState = null;
-    try {
-      clusterState = cloudManager.getClusterStateProvider().getClusterState();
-    } catch (IOException e) {
-      log.warn("Error getting ClusterState", e);
-      return;
-    }
-    for (String node : cloudManager.getClusterStateProvider().getLiveNodes()) {
-      Map<String, ReplicaInfo> metricTags = new HashMap<>();
-      // coll, shard, replica
-      Map<String, Map<String, List<ReplicaInfo>>> infos = cloudManager.getNodeStateProvider().getReplicaInfo(node, Collections.emptyList());
-      infos.forEach((coll, shards) -> {
-        Map<String, AtomicInteger> replPerShard = searchableReplicationFactors.computeIfAbsent(coll, c -> new HashMap<>());
-        shards.forEach((sh, replicas) -> {
-          AtomicInteger repl = replPerShard.computeIfAbsent(sh, s -> new AtomicInteger());
-          replicas.forEach(replica -> {
-            // skip non-active replicas
-            if (replica.getState() != Replica.State.ACTIVE) {
-              return;
-            }
-            repl.incrementAndGet();
-            // we have to translate to the metrics registry name, which uses "_replica_nN" as suffix
-            String replicaName = Utils.parseMetricsReplicaName(coll, replica.getCore());
-            if (replicaName == null) { // should never happen???
-              replicaName = replica.getName(); // which is actually coreNode name...
-            }
-            String registry = SolrCoreMetricManager.createRegistryName(true, coll, sh, replicaName, null);
-            String tag = "metrics:" + registry + ":" + metric;
-            metricTags.put(tag, replica);
-          });
-        });
-      });
-      if (metricTags.isEmpty()) {
-        continue;
-      }
-      Map<String, Object> rates = cloudManager.getNodeStateProvider().getNodeValues(node, metricTags.keySet());
-      if (log.isDebugEnabled()) {
-        log.debug("### rates for node " + node);
-        rates.forEach((tag, rate) -> log.debug("###  " + tag + "\t" + rate));
-      }
-      rates.forEach((tag, rate) -> {
-        ReplicaInfo info = metricTags.get(tag);
-        if (info == null) {
-          log.warn("Missing replica info for response tag " + tag);
-        } else {
-          Map<String, List<ReplicaInfo>> perCollection = collectionRates.computeIfAbsent(info.getCollection(), s -> new HashMap<>());
-          List<ReplicaInfo> perShard = perCollection.computeIfAbsent(info.getShard(), s -> new ArrayList<>());
-          info = (ReplicaInfo)info.clone();
-          info.getVariables().put(AutoScalingParams.RATE, ((Number)rate).doubleValue());
-          perShard.add(info);
-          AtomicDouble perNode = nodeRates.computeIfAbsent(node, s -> new AtomicDouble());
-          perNode.addAndGet(((Number)rate).doubleValue());
-        }
-      });
-    }
-
-    if (log.isDebugEnabled()) {
-      collectionRates.forEach((coll, collRates) -> {
-        log.debug("## Collection: {}", coll);
-        collRates.forEach((s, replicas) -> {
-          log.debug("##  - {}", s);
-          replicas.forEach(ri -> log.debug("##     {}  {}", ri.getCore(), ri.getVariable(AutoScalingParams.RATE)));
-        });
-      });
-    }
-    long now = cloudManager.getTimeSource().getTimeNs();
-    Map<String, Double> hotNodes = new HashMap<>();
-    Map<String, Double> coldNodes = new HashMap<>();
-
-    // check for exceeded rates and filter out those with less than waitFor from previous events
-    nodeRates.entrySet().stream()
-        .filter(entry -> node.equals(Policy.ANY) || node.equals(entry.getKey()))
-        .forEach(entry -> {
-          if (entry.getValue().get() > aboveNodeRate) {
-            if (waitForElapsed(entry.getKey(), now, lastNodeEvent)) {
-              hotNodes.put(entry.getKey(), entry.getValue().get());
-            }
-          } else if (entry.getValue().get() < belowNodeRate) {
-            if (waitForElapsed(entry.getKey(), now, lastNodeEvent)) {
-              coldNodes.put(entry.getKey(), entry.getValue().get());
-            }
-          } else {
-            // no violation - clear waitForElapsed
-            // (violation is only valid if it persists throughout waitFor)
-            lastNodeEvent.remove(entry.getKey());
-          }
-        });
-
-    Map<String, Map<String, Double>> hotShards = new HashMap<>();
-    Map<String, Map<String, Double>> coldShards = new HashMap<>();
-    List<ReplicaInfo> hotReplicas = new ArrayList<>();
-    List<ReplicaInfo> coldReplicas = new ArrayList<>();
-    collectionRates.forEach((coll, shardRates) -> {
-      shardRates.forEach((sh, replicaRates) -> {
-        double totalShardRate = replicaRates.stream()
-            .map(r -> {
-              String elapsedKey = r.getCollection() + "." + r.getCore();
-              if ((Double)r.getVariable(AutoScalingParams.RATE) > aboveRate) {
-                if (waitForElapsed(elapsedKey, now, lastReplicaEvent)) {
-                  hotReplicas.add(r);
-                }
-              } else if ((Double)r.getVariable(AutoScalingParams.RATE) < belowRate) {
-                if (waitForElapsed(elapsedKey, now, lastReplicaEvent)) {
-                  coldReplicas.add(r);
-                }
-              } else {
-                // no violation - clear waitForElapsed
-                lastReplicaEvent.remove(elapsedKey);
-              }
-              return r;
-            })
-            .mapToDouble(r -> (Double)r.getVariable(AutoScalingParams.RATE)).sum();
-        // calculate average shard rate over all searchable replicas (see SOLR-12470)
-        double shardRate = totalShardRate / searchableReplicationFactors.get(coll).get(sh).doubleValue();
-        String elapsedKey = coll + "." + sh;
-        log.debug("-- {}: totalShardRate={}, shardRate={}", elapsedKey, totalShardRate, shardRate);
-        if ((collections.isEmpty() || collections.contains(coll)) &&
-            (shard.equals(Policy.ANY) || shard.equals(sh))) {
-          if (shardRate > aboveRate) {
-            if (waitForElapsed(elapsedKey, now, lastShardEvent)) {
-              hotShards.computeIfAbsent(coll, s -> new HashMap<>()).put(sh, shardRate);
-            }
-          } else if (shardRate < belowRate) {
-            if (waitForElapsed(elapsedKey, now, lastShardEvent)) {
-              coldShards.computeIfAbsent(coll, s -> new HashMap<>()).put(sh, shardRate);
-              log.debug("-- coldShard waitFor elapsed {}", elapsedKey);
-            } else {
-              if (log.isDebugEnabled()) {
-                Long lastTime = lastShardEvent.computeIfAbsent(elapsedKey, s -> now);
-                long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
-                log.debug("-- waitFor didn't elapse for {}, waitFor={}, elapsed={}", elapsedKey, getWaitForSecond(), elapsed);
-              }
-            }
-          } else {
-            // no violation - clear waitForElapsed
-            lastShardEvent.remove(elapsedKey);
-          }
-        }
-      });
-    });
-
-    Map<String, Double> hotCollections = new HashMap<>();
-    Map<String, Double> coldCollections = new HashMap<>();
-    collectionRates.forEach((coll, shardRates) -> {
-      double total = shardRates.entrySet().stream()
-          .mapToDouble(e -> e.getValue().stream()
-              .mapToDouble(r -> (Double)r.getVariable(AutoScalingParams.RATE)).sum()).sum();
-      if (collections.isEmpty() || collections.contains(coll)) {
-        if (total > aboveRate) {
-          if (waitForElapsed(coll, now, lastCollectionEvent)) {
-            hotCollections.put(coll, total);
-          }
-        } else if (total < belowRate) {
-          if (waitForElapsed(coll, now, lastCollectionEvent)) {
-            coldCollections.put(coll, total);
-          }
-        } else {
-          // no violation - clear waitForElapsed
-          lastCollectionEvent.remove(coll);
-        }
-      }
-    });
-
-    if (hotCollections.isEmpty() &&
-        hotShards.isEmpty() &&
-        hotReplicas.isEmpty() &&
-        hotNodes.isEmpty() &&
-        coldCollections.isEmpty() &&
-        coldShards.isEmpty() &&
-        coldReplicas.isEmpty() &&
-        coldNodes.isEmpty()) {
-      return;
-    }
-
-    // generate event
-
-    // find the earliest time when a condition was exceeded
-    final AtomicLong eventTime = new AtomicLong(now);
-    hotCollections.forEach((c, r) -> {
-      long time = lastCollectionEvent.get(c);
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-    coldCollections.forEach((c, r) -> {
-      long time = lastCollectionEvent.get(c);
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-    hotShards.forEach((c, shards) -> {
-      shards.forEach((s, r) -> {
-        long time = lastShardEvent.get(c + "." + s);
-        if (eventTime.get() > time) {
-          eventTime.set(time);
-        }
-      });
-    });
-    coldShards.forEach((c, shards) -> {
-      shards.forEach((s, r) -> {
-        long time = lastShardEvent.get(c + "." + s);
-        if (eventTime.get() > time) {
-          eventTime.set(time);
-        }
-      });
-    });
-    hotReplicas.forEach(r -> {
-      long time = lastReplicaEvent.get(r.getCollection() + "." + r.getCore());
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-    coldReplicas.forEach(r -> {
-      long time = lastReplicaEvent.get(r.getCollection() + "." + r.getCore());
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-    hotNodes.forEach((n, r) -> {
-      long time = lastNodeEvent.get(n);
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-    coldNodes.forEach((n, r) -> {
-      long time = lastNodeEvent.get(n);
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-
-    final List<TriggerEvent.Op> ops = new ArrayList<>();
-    final Set<String> violations = new HashSet<>();
-
-    calculateHotOps(ops, violations, searchableReplicationFactors, hotNodes, hotCollections, hotShards, hotReplicas);
-    calculateColdOps(ops, violations, clusterState, searchableReplicationFactors, coldNodes, coldCollections, coldShards, coldReplicas);
-
-    if (ops.isEmpty()) {
-      return;
-    }
-
-    if (processor.process(new SearchRateEvent(getName(), eventTime.get(), ops,
-        hotNodes, hotCollections, hotShards, hotReplicas,
-        coldNodes, coldCollections, coldShards, coldReplicas, violations))) {
-      // update lastEvent times
-      hotNodes.keySet().forEach(node -> lastNodeEvent.put(node, now));
-      coldNodes.keySet().forEach(node -> lastNodeEvent.put(node, now));
-      hotCollections.keySet().forEach(coll -> lastCollectionEvent.put(coll, now));
-      coldCollections.keySet().forEach(coll -> lastCollectionEvent.put(coll, now));
-      hotShards.entrySet().forEach(e -> e.getValue()
-          .forEach((sh, rate) -> lastShardEvent.put(e.getKey() + "." + sh, now)));
-      coldShards.entrySet().forEach(e -> e.getValue()
-          .forEach((sh, rate) -> lastShardEvent.put(e.getKey() + "." + sh, now)));
-      hotReplicas.forEach(r -> lastReplicaEvent.put(r.getCollection() + "." + r.getCore(), now));
-      coldReplicas.forEach(r -> lastReplicaEvent.put(r.getCollection() + "." + r.getCore(), now));
-    }
-  }
-
-  private void calculateHotOps(List<TriggerEvent.Op> ops,
-                               Set<String> violations,
-                               Map<String, Map<String, AtomicInteger>> searchableReplicationFactors,
-                               Map<String, Double> hotNodes,
-                               Map<String, Double> hotCollections,
-                               Map<String, Map<String, Double>> hotShards,
-                               List<ReplicaInfo> hotReplicas) {
-    // calculate the number of replicas to add to each hot shard, based on how much the rate was
-    // exceeded - but within limits.
-
-    // first resolve a situation when only a node is hot but no collection / shard is hot
-    // TODO: eventually we may want to commission a new node
-    if (!hotNodes.isEmpty()) {
-      if (hotShards.isEmpty() && hotCollections.isEmpty()) {
-        // move replicas around
-        if (aboveNodeOp != null) {
-          hotNodes.forEach((n, r) -> {
-            ops.add(new TriggerEvent.Op(aboveNodeOp, Suggester.Hint.SRC_NODE, n));
-            violations.add(HOT_NODES);
-          });
-        }
-      } else {
-        // ignore - hot shards will result in changes that will change hot node status anyway
-      }
-    }
-    // add replicas
-    Map<String, Map<String, List<Pair<String, String>>>> hints = new HashMap<>();
-
-    // HOT COLLECTIONS
-    // currently we don't do anything for hot collections. Theoretically we could add
-    // 1 replica more to each shard, based on how close to the threshold each shard is
-    // but it's probably better to wait for a shard to become hot and be more precise.
-
-    // HOT SHARDS
-
-    hotShards.forEach((coll, shards) -> shards.forEach((s, r) -> {
-      List<Pair<String, String>> perShard = hints
-          .computeIfAbsent(coll, c -> new HashMap<>())
-          .computeIfAbsent(s, sh -> new ArrayList<>());
-      addReplicaHints(coll, s, r, searchableReplicationFactors.get(coll).get(s).get(), perShard);
-      violations.add(HOT_SHARDS);
-    }));
-
-    // HOT REPLICAS
-    // Hot replicas (while their shards are not hot) may be caused by
-    // dumb clients that use direct replica URLs - this is beyond our control
-    // so ignore them.
-
-    hints.values().forEach(m -> m.values().forEach(lst -> lst.forEach(p -> {
-      ops.add(new TriggerEvent.Op(aboveOp, Suggester.Hint.COLL_SHARD, p));
-    })));
-
-  }
-
-  /**
-   * This method implements a primitive form of proportional controller with a limiter.
-   */
-  private void addReplicaHints(String collection, String shard, double r, int replicationFactor, List<Pair<String, String>> hints) {
-    int numReplicas = (int)Math.round((r - aboveRate) / (double) replicationFactor);
-    // in one event add at least 1 replica
-    if (numReplicas < 1) {
-      numReplicas = 1;
-    }
-    // ... and at most maxOps replicas
-    if (numReplicas > maxOps) {
-      numReplicas = maxOps;
-    }
-    for (int i = 0; i < numReplicas; i++) {
-      hints.add(new Pair(collection, shard));
-    }
-  }
-
-  private void calculateColdOps(List<TriggerEvent.Op> ops,
-                                Set<String> violations,
-                                ClusterState clusterState,
-                                Map<String, Map<String, AtomicInteger>> searchableReplicationFactors,
-                                Map<String, Double> coldNodes,
-                                Map<String, Double> coldCollections,
-                                Map<String, Map<String, Double>> coldShards,
-                                List<ReplicaInfo> coldReplicas) {
-    // COLD COLLECTIONS
-    // Probably can't do anything reasonable about whole cold collections
-    // because they may be needed even if not used.
-
-    // COLD SHARDS & COLD REPLICAS:
-    // We remove cold replicas only from cold shards, otherwise we are susceptible to uneven
-    // replica routing (which is beyond our control).
-    // If we removed replicas from non-cold shards we could accidentally bring that shard into
-    // the hot range, which would result in adding replica, and that replica could again stay cold due to
-    // the same routing issue, which then would lead to removing that replica, etc, etc...
-
-    // Remove cold replicas but only when there's at least a minimum number of searchable
-    // replicas still available (additional non-searchable replicas may exist, too)
-    // NOTE: do this before adding ops for DELETENODE because we don't want to attempt
-    // deleting replicas that have been already moved elsewhere
-    Map<String, Map<String, List<ReplicaInfo>>> byCollectionByShard = new HashMap<>();
-    coldReplicas.forEach(ri -> {
-      byCollectionByShard.computeIfAbsent(ri.getCollection(), c -> new HashMap<>())
-          .computeIfAbsent(ri.getShard(), s -> new ArrayList<>())
-          .add(ri);
-    });
-    coldShards.forEach((coll, perShard) -> {
-      perShard.forEach((shard, rate) -> {
-        List<ReplicaInfo> replicas = byCollectionByShard
-            .getOrDefault(coll, Collections.emptyMap())
-            .getOrDefault(shard, Collections.emptyList());
-        if (replicas.isEmpty()) {
-          return;
-        }
-        // only delete if there's at least minRF searchable replicas left
-        int rf = searchableReplicationFactors.get(coll).get(shard).get();
-        // assume first that we only really need a leader and we may be
-        // allowed to remove other replicas
-        int minRF = 1;
-        // but check the official RF and don't go below that
-        Integer RF = clusterState.getCollection(coll).getReplicationFactor();
-        if (RF != null) {
-          minRF = RF;
-        }
-        // unless minReplicas is set explicitly
-        if (minReplicas != null) {
-          minRF = minReplicas;
-        }
-        if (minRF < 1) {
-          minRF = 1;
-        }
-        if (rf > minRF) {
-          // delete at most maxOps replicas at a time
-          AtomicInteger limit = new AtomicInteger(Math.min(maxOps, rf - minRF));
-          replicas.forEach(ri -> {
-            if (limit.get() == 0) {
-              return;
-            }
-            // don't delete a leader
-            if (ri.getBool(ZkStateReader.LEADER_PROP, false)) {
-              return;
-            }
-            TriggerEvent.Op op = new TriggerEvent.Op(belowOp,
-                Suggester.Hint.COLL_SHARD, new Pair<>(ri.getCollection(), ri.getShard()));
-            op.addHint(Suggester.Hint.REPLICA, ri.getName());
-            ops.add(op);
-            violations.add(COLD_SHARDS);
-            limit.decrementAndGet();
-          });
-        }
-      });
-    });
-
-    // COLD NODES:
-    // Unlike the case of hot nodes, if a node is cold then any monitored
-    // collections / shards / replicas located on that node are cold, too.
-    // HOWEVER, we check only replicas from selected collections / shards,
-    // so deleting a cold node is dangerous because it may interfere with these
-    // non-monitored resources - this is the reason the default belowNodeOp is null / ignored.
-    //
-    // Also, note that due to the way activity is measured only nodes that contain any
-    // monitored resources are considered - there may be cold nodes in the cluster that don't
-    // belong to the monitored collections and they will be ignored.
-    if (belowNodeOp != null) {
-      coldNodes.forEach((node, rate) -> {
-        ops.add(new TriggerEvent.Op(belowNodeOp, Suggester.Hint.SRC_NODE, node));
-        violations.add(COLD_NODES);
-      });
-    }
-
-
-  }
-
-  private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
-    Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
-    long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
-    log.trace("name={}, lastTime={}, elapsed={}, waitFor={}", name, lastTime, elapsed, getWaitForSecond());
-    if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
-      return false;
-    }
-    return true;
-  }
-
-  public static class SearchRateEvent extends TriggerEvent {
-    public SearchRateEvent(String source, long eventTime, List<Op> ops,
-                           Map<String, Double> hotNodes,
-                           Map<String, Double> hotCollections,
-                           Map<String, Map<String, Double>> hotShards,
-                           List<ReplicaInfo> hotReplicas,
-                           Map<String, Double> coldNodes,
-                           Map<String, Double> coldCollections,
-                           Map<String, Map<String, Double>> coldShards,
-                           List<ReplicaInfo> coldReplicas,
-                           Set<String> violations) {
-      super(TriggerEventType.SEARCHRATE, source, eventTime, null);
-      properties.put(TriggerEvent.REQUESTED_OPS, ops);
-      properties.put(HOT_NODES, hotNodes);
-      properties.put(HOT_COLLECTIONS, hotCollections);
-      properties.put(HOT_SHARDS, hotShards);
-      properties.put(HOT_REPLICAS, hotReplicas);
-      properties.put(COLD_NODES, coldNodes);
-      properties.put(COLD_COLLECTIONS, coldCollections);
-      properties.put(COLD_SHARDS, coldShards);
-      properties.put(COLD_REPLICAS, coldReplicas);
-      properties.put(VIOLATION_PROP, violations);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/SystemLogListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/SystemLogListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/SystemLogListener.java
deleted file mode 100644
index c6f0e68..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/SystemLogListener.java
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.StringJoiner;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.IdUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This listener saves events to the {@link CollectionAdminParams#SYSTEM_COLL} collection.
- * <p>Configuration properties:</p>
- * <ul>
- *   <li>collection - optional string, specifies what collection should be used for storing events. Default value
- *   is {@link CollectionAdminParams#SYSTEM_COLL}.</li>
- * </ul>
- */
-public class SystemLogListener extends TriggerListenerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String SOURCE_FIELD = "source_s";
-  public static final String EVENT_SOURCE_FIELD = "event.source_s";
-  public static final String EVENT_TYPE_FIELD = "event.type_s";
-  public static final String STAGE_FIELD = "stage_s";
-  public static final String ACTION_FIELD = "action_s";
-  public static final String MESSAGE_FIELD = "message_t";
-  public static final String BEFORE_ACTIONS_FIELD = "before.actions_ss";
-  public static final String AFTER_ACTIONS_FIELD = "after.actions_ss";
-  public static final String COLLECTIONS_FIELD = "collections_ss";
-  public static final String SOURCE = SystemLogListener.class.getSimpleName();
-  public static final String DOC_TYPE = "autoscaling_event";
-
-  private String collection = CollectionAdminParams.SYSTEM_COLL;
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
-    super.configure(loader, cloudManager, config);
-    collection = (String)config.properties.getOrDefault(CollectionAdminParams.COLLECTION, CollectionAdminParams.SYSTEM_COLL);
-  }
-
-  @Override
-  public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context,
-               Throwable error, String message) throws Exception {
-    try {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField(CommonParams.TYPE, DOC_TYPE);
-      doc.addField(SOURCE_FIELD, SOURCE);
-      doc.addField("id", IdUtils.timeRandomId());
-      doc.addField("event.id_s", event.getId());
-      doc.addField(EVENT_TYPE_FIELD, event.getEventType().toString());
-      doc.addField(EVENT_SOURCE_FIELD, event.getSource());
-      doc.addField("event.time_l", event.getEventTime());
-      doc.addField("timestamp", new Date());
-      addMap("event.property.", doc, event.getProperties());
-      doc.addField(STAGE_FIELD, stage.toString());
-      if (actionName != null) {
-        doc.addField(ACTION_FIELD, actionName);
-      }
-      if (message != null) {
-        doc.addField(MESSAGE_FIELD, message);
-      }
-      addError(doc, error);
-      // add JSON versions of event and context
-      String eventJson = Utils.toJSONString(event);
-      doc.addField("event_str", eventJson);
-      if (context != null) {
-        // capture specifics of operations after compute_plan action
-        addOperations(doc, (List<SolrRequest>)context.getProperties().get("operations"));
-        // capture specifics of responses after execute_plan action
-        addResponses(doc, (List<NamedList<Object>>)context.getProperties().get("responses"));
-        addActions(BEFORE_ACTIONS_FIELD, doc, (List<String>)context.getProperties().get(TriggerEventProcessorStage.BEFORE_ACTION.toString()));
-        addActions(AFTER_ACTIONS_FIELD, doc, (List<String>)context.getProperties().get(TriggerEventProcessorStage.AFTER_ACTION.toString()));
-        String contextJson = Utils.toJSONString(context);
-        doc.addField("context_str", contextJson);
-      }
-      UpdateRequest req = new UpdateRequest();
-      req.add(doc);
-      req.setParam(CollectionAdminParams.COLLECTION, collection);
-      cloudManager.request(req);
-    } catch (Exception e) {
-      if ((e instanceof SolrException) && e.getMessage().contains("Collection not found")) {
-        // relatively benign
-        log.info("Collection " + collection + " does not exist, disabling logging.");
-        enabled = false;
-      } else {
-        log.warn("Exception sending event to collection " + collection, e);
-      }
-    }
-  }
-
-  private void addActions(String field, SolrInputDocument doc, List<String> actions) {
-    if (actions == null) {
-      return;
-    }
-    actions.forEach(a -> doc.addField(field, a));
-  }
-
-  private void addMap(String prefix, SolrInputDocument doc, Map<String, Object> map) {
-    map.forEach((k, v) -> {
-      if (v instanceof Collection) {
-        for (Object o : (Collection)v) {
-          doc.addField(prefix + k + "_ss", String.valueOf(o));
-        }
-      } else {
-        doc.addField(prefix + k + "_ss", String.valueOf(v));
-      }
-    });
-  }
-
-  private void addOperations(SolrInputDocument doc, List<SolrRequest> operations) {
-    if (operations == null || operations.isEmpty()) {
-      return;
-    }
-    Set<String> collections = new HashSet<>();
-    for (SolrRequest req : operations) {
-      SolrParams params = req.getParams();
-      if (params == null) {
-        continue;
-      }
-      if (params.get(CollectionAdminParams.COLLECTION) != null) {
-        collections.add(params.get(CollectionAdminParams.COLLECTION));
-      }
-      // build a whitespace-separated param string
-      StringJoiner paramJoiner = new StringJoiner(" ");
-      paramJoiner.setEmptyValue("");
-      for (Iterator<String> it = params.getParameterNamesIterator(); it.hasNext(); ) {
-        final String name = it.next();
-        final String [] values = params.getParams(name);
-        for (String value : values) {
-          paramJoiner.add(name + "=" + value);
-        }
-      }
-      String paramString = paramJoiner.toString();
-      if (!paramString.isEmpty()) {
-        doc.addField("operations.params_ts", paramString);
-      }
-    }
-    if (!collections.isEmpty()) {
-      doc.addField(COLLECTIONS_FIELD, collections);
-    }
-  }
-
-  private void addResponses(SolrInputDocument doc, List<NamedList<Object>> responses) {
-    if (responses == null || responses.isEmpty()) {
-      return;
-    }
-    for (NamedList<Object> rsp : responses) {
-      Object o = rsp.get("success");
-      if (o != null) {
-        doc.addField("responses_ts", "success " + o);
-      } else {
-        o = rsp.get("failure");
-        if (o != null) {
-          doc.addField("responses_ts", "failure " + o);
-        } else { // something else
-          doc.addField("responses_ts", Utils.toJSONString(rsp));
-        }
-      }
-    }
-  }
-
-  private void addError(SolrInputDocument doc, Throwable error) {
-    if (error == null) {
-      return;
-    }
-    StringWriter sw = new StringWriter();
-    PrintWriter pw = new PrintWriter(sw);
-    error.printStackTrace(pw);
-    pw.flush(); pw.close();
-    doc.addField("error.message_t", error.getMessage());
-    doc.addField("error.details_t", sw.toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerAction.java
deleted file mode 100644
index b873ee6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerAction.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.Closeable;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.core.SolrResourceLoader;
-
-/**
- * Interface for actions performed in response to a trigger being activated
- */
-public interface TriggerAction extends Closeable {
-
-  /**
-   * Called when action is created but before it's initialized and used.
-   * This method should also verify that the configuration parameters are correct.
-   * It may be called multiple times.
-   * @param loader loader to use for instantiating sub-components
-   * @param cloudManager current instance of SolrCloudManager
-   * @param properties configuration properties
-   * @throws TriggerValidationException contains details of invalid configuration parameters.
-   */
-  void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException;
-
-  /**
-   * Called before an action is first used. Any heavy object creation or initialization should
-   * be done in this method instead of the constructor or {@link #configure(SolrResourceLoader, SolrCloudManager, Map)} method.
-   */
-  void init() throws Exception;
-
-  String getName();
-
-  void process(TriggerEvent event, ActionContext context) throws Exception;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionBase.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionBase.java
deleted file mode 100644
index 7a9f34b..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionBase.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.core.SolrResourceLoader;
-
-/**
- * Base class for {@link TriggerAction} implementations.
- */
-public abstract class TriggerActionBase implements TriggerAction {
-
-  protected Map<String, Object> properties = new HashMap<>();
-  protected SolrResourceLoader loader;
-  protected SolrCloudManager cloudManager;
-  /**
-   * Set of valid property names. Subclasses may add to this set
-   * using {@link TriggerUtils#validProperties(Set, String...)}
-   */
-  protected final Set<String> validProperties = new HashSet<>();
-  /**
-   * Set of required property names. Subclasses may add to this set
-   * using {@link TriggerUtils#requiredProperties(Set, Set, String...)}
-   * (required properties are also valid properties).
-   */
-  protected final Set<String> requiredProperties = new HashSet<>();
-
-  protected TriggerActionBase() {
-    // not strictly needed here because they are already checked during instantiation
-    TriggerUtils.validProperties(validProperties, "name", "class");
-  }
-
-  @Override
-  public String getName() {
-    String name = (String) properties.get("name");
-    if (name != null) {
-      return name;
-    } else {
-      return getClass().getSimpleName();
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    this.loader = loader;
-    this.cloudManager = cloudManager;
-    if (properties != null) {
-      this.properties.putAll(properties);
-    }
-    // validate the config
-    Map<String, String> results = new HashMap<>();
-    TriggerUtils.checkProperties(this.properties, results, requiredProperties, validProperties);
-    if (!results.isEmpty()) {
-      throw new TriggerValidationException(getName(), results);
-    }
-  }
-
-  @Override
-  public void init() throws Exception {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionException.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionException.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionException.java
deleted file mode 100644
index 624ce68..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerActionException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-/**
- * Trigger action-specific exception.
- */
-public class TriggerActionException extends Exception {
-
-  public final String triggerName;
-  public final String actionName;
-
-  public TriggerActionException(String triggerName, String actionName, String message, Throwable cause) {
-    super(message, cause);
-    this.triggerName = triggerName;
-    this.actionName = actionName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
deleted file mode 100644
index 214552e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.util.IOUtils;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Base class for {@link org.apache.solr.cloud.autoscaling.AutoScaling.Trigger} implementations.
- * It handles state snapshot / restore in ZK.
- */
-public abstract class TriggerBase implements AutoScaling.Trigger {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final String name;
-  protected SolrCloudManager cloudManager;
-  protected SolrResourceLoader loader;
-  protected DistribStateManager stateManager;
-  protected final Map<String, Object> properties = new HashMap<>();
-  /**
-   * Set of valid property names. Subclasses may add to this set
-   * using {@link TriggerUtils#validProperties(Set, String...)}
-   */
-  protected final Set<String> validProperties = new HashSet<>();
-  /**
-   * Set of required property names. Subclasses may add to this set
-   * using {@link TriggerUtils#requiredProperties(Set, Set, String...)}
-   * (required properties are also valid properties).
-   */
-  protected final Set<String> requiredProperties = new HashSet<>();
-  protected final TriggerEventType eventType;
-  protected int waitForSecond;
-  protected Map<String,Object> lastState;
-  protected final AtomicReference<AutoScaling.TriggerEventProcessor> processorRef = new AtomicReference<>();
-  protected List<TriggerAction> actions;
-  protected boolean enabled;
-  protected boolean isClosed;
-
-
-  protected TriggerBase(TriggerEventType eventType, String name) {
-    this.eventType = eventType;
-    this.name = name;
-
-    // subclasses may modify this set to include other supported properties
-    TriggerUtils.validProperties(validProperties, "name", "class", "event", "enabled", "waitFor", "actions");
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    this.cloudManager = cloudManager;
-    this.loader = loader;
-    this.stateManager = cloudManager.getDistribStateManager();
-    if (properties != null) {
-      this.properties.putAll(properties);
-    }
-    this.enabled = Boolean.parseBoolean(String.valueOf(this.properties.getOrDefault("enabled", "true")));
-    this.waitForSecond = ((Number) this.properties.getOrDefault("waitFor", -1L)).intValue();
-    List<Map<String, Object>> o = (List<Map<String, Object>>) properties.get("actions");
-    if (o != null && !o.isEmpty()) {
-      actions = new ArrayList<>(3);
-      for (Map<String, Object> map : o) {
-        TriggerAction action = null;
-        try {
-          action = loader.newInstance((String)map.get("class"), TriggerAction.class);
-        } catch (Exception e) {
-          throw new TriggerValidationException("action", "exception creating action " + map + ": " + e.toString());
-        }
-        action.configure(loader, cloudManager, map);
-        actions.add(action);
-      }
-    } else {
-      actions = Collections.emptyList();
-    }
-
-
-    Map<String, String> results = new HashMap<>();
-    TriggerUtils.checkProperties(this.properties, results, requiredProperties, validProperties);
-    if (!results.isEmpty()) {
-      throw new TriggerValidationException(name, results);
-    }
-  }
-
-  @Override
-  public void init() throws Exception {
-    try {
-      if (!stateManager.hasData(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH)) {
-        stateManager.makePath(ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH);
-      }
-    } catch (AlreadyExistsException e) {
-      // ignore
-    } catch (InterruptedException | KeeperException | IOException e) {
-      log.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
-      throw e;
-    }
-    for (TriggerAction action : actions) {
-      action.init();
-    }
-  }
-
-  @Override
-  public void setProcessor(AutoScaling.TriggerEventProcessor processor) {
-    processorRef.set(processor);
-  }
-
-  @Override
-  public AutoScaling.TriggerEventProcessor getProcessor() {
-    return processorRef.get();
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public TriggerEventType getEventType() {
-    return eventType;
-  }
-
-  @Override
-  public boolean isEnabled() {
-    return enabled;
-  }
-
-  @Override
-  public int getWaitForSecond() {
-    return waitForSecond;
-  }
-
-  @Override
-  public Map<String, Object> getProperties() {
-    return properties;
-  }
-
-  @Override
-  public List<TriggerAction> getActions() {
-    return actions;
-  }
-
-  @Override
-  public boolean isClosed() {
-    synchronized (this) {
-      return isClosed;
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    synchronized (this) {
-      isClosed = true;
-      IOUtils.closeWhileHandlingException(actions);
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(name, properties);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (obj.getClass().equals(this.getClass())) {
-      TriggerBase that = (TriggerBase) obj;
-      return this.name.equals(that.name)
-          && this.properties.equals(that.properties);
-    }
-    return false;
-  }
-
-  /**
-   * Prepare and return internal state of this trigger in a format suitable for persisting in ZK.
-   * @return map of internal state properties. Note: values must be supported by {@link Utils#toJSON(Object)}.
-   */
-  protected abstract Map<String,Object> getState();
-
-  /**
-   * Restore internal state of this trigger from properties retrieved from ZK.
-   * @param state never null but may be empty.
-   */
-  protected abstract void setState(Map<String,Object> state);
-
-  @Override
-  public void saveState() {
-    Map<String,Object> state = Utils.getDeepCopy(getState(), 10, false, true);
-    if (lastState != null && lastState.equals(state)) {
-      // skip saving if identical
-      return;
-    }
-    byte[] data = Utils.toJSON(state);
-    String path = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + getName();
-    try {
-      if (stateManager.hasData(path)) {
-        // update
-        stateManager.setData(path, data, -1);
-      } else {
-        // create
-        stateManager.createData(path, data, CreateMode.PERSISTENT);
-      }
-      lastState = state;
-    } catch (InterruptedException | BadVersionException | AlreadyExistsException | IOException | KeeperException e) {
-      log.warn("Exception updating trigger state '" + path + "'", e);
-    }
-  }
-
-  @Override
-  public void restoreState() {
-    byte[] data = null;
-    String path = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + getName();
-    try {
-      if (stateManager.hasData(path)) {
-        VersionedData versionedData = stateManager.getData(path);
-        data = versionedData.getData();
-      }
-    } catch (Exception e) {
-      log.warn("Exception getting trigger state '" + path + "'", e);
-    }
-    if (data != null) {
-      Map<String, Object> restoredState = (Map<String, Object>)Utils.fromJSON(data);
-      // make sure lastState is sorted
-      restoredState = Utils.getDeepCopy(restoredState, 10, false, true);
-      setState(restoredState);
-      lastState = restoredState;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
deleted file mode 100644
index 8e3a348..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.MapWriter;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.IdUtils;
-
-/**
- * Trigger event.
- */
-public class TriggerEvent implements MapWriter {
-  public static final String IGNORED = "ignored";
-  public static final String COOLDOWN = "cooldown";
-  public static final String REPLAYING = "replaying";
-  public static final String NODE_NAMES = "nodeNames";
-  public static final String EVENT_TIMES = "eventTimes";
-  public static final String REQUESTED_OPS = "requestedOps";
-  public static final String UNSUPPORTED_OPS = "unsupportedOps";
-
-  public static final class Op implements MapWriter {
-    private final CollectionParams.CollectionAction action;
-    private final EnumMap<Suggester.Hint, Object> hints = new EnumMap<>(Suggester.Hint.class);
-
-    public Op(CollectionParams.CollectionAction action) {
-      this.action = action;
-    }
-
-    public Op(CollectionParams.CollectionAction action, Suggester.Hint hint, Object hintValue) {
-      this.action = action;
-      addHint(hint, hintValue);
-    }
-
-    public void addHint(Suggester.Hint hint, Object value) {
-      hint.validator.accept(value);
-      if (hint.multiValued) {
-        Collection<?> values = value instanceof Collection ? (Collection) value : Collections.singletonList(value);
-        ((Set) hints.computeIfAbsent(hint, h -> new LinkedHashSet<>())).addAll(values);
-      } else {
-        hints.put(hint, value == null ? null : String.valueOf(value));
-      }
-    }
-
-    public CollectionParams.CollectionAction getAction() {
-      return action;
-    }
-
-    public EnumMap<Suggester.Hint, Object> getHints() {
-      return hints;
-    }
-
-    @Override
-    public void writeMap(EntryWriter ew) throws IOException {
-      ew.put("action", action);
-      ew.put("hints", hints);
-    }
-
-    public static Op fromMap(Map<String, Object> map) {
-      if (!map.containsKey("action")) {
-        return null;
-      }
-      CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(String.valueOf(map.get("action")));
-      if (action == null) {
-        return null;
-      }
-      Op op = new Op(action);
-      Map<Object, Object> hints = (Map<Object, Object>)map.get("hints");
-      if (hints != null && !hints.isEmpty()) {
-        hints.forEach((k, v) ->  {
-          Suggester.Hint h = Suggester.Hint.get(k.toString());
-          if (h == null) {
-            return;
-          }
-          if (!(v instanceof Collection)) {
-            v = Collections.singletonList(v);
-          }
-          ((Collection)v).forEach(vv -> {
-            if (vv instanceof Map) {
-              // maybe it's a Pair?
-              Map<String, Object> m = (Map<String, Object>)vv;
-              if (m.containsKey("first") && m.containsKey("second")) {
-                Pair p = Pair.parse(m);
-                if (p != null) {
-                  op.addHint(h, p);
-                  return;
-                }
-              }
-            }
-            op.addHint(h, vv);
-          });
-        });
-      }
-      return op;
-    }
-
-    @Override
-    public String toString() {
-      return "Op{" +
-          "action=" + action +
-          ", hints=" + hints +
-          '}';
-    }
-  }
-
-  protected final String id;
-  protected final String source;
-  protected final long eventTime;
-  protected final TriggerEventType eventType;
-  protected final Map<String, Object> properties = new HashMap<>();
-  protected final boolean ignored;
-
-  public TriggerEvent(TriggerEventType eventType, String source, long eventTime,
-                      Map<String, Object> properties) {
-    this(IdUtils.timeRandomId(eventTime), eventType, source, eventTime, properties, false);
-  }
-
-  public TriggerEvent(TriggerEventType eventType, String source, long eventTime,
-                      Map<String, Object> properties, boolean ignored) {
-    this(IdUtils.timeRandomId(eventTime), eventType, source, eventTime, properties, ignored);
-  }
-
-  public TriggerEvent(String id, TriggerEventType eventType, String source, long eventTime,
-                      Map<String, Object> properties) {
-    this(id, eventType, source, eventTime, properties, false);
-  }
-
-  public TriggerEvent(String id, TriggerEventType eventType, String source, long eventTime,
-                      Map<String, Object> properties, boolean ignored) {
-    this.id = id;
-    this.eventType = eventType;
-    this.source = source;
-    this.eventTime = eventTime;
-    if (properties != null) {
-      this.properties.putAll(properties);
-    }
-    this.ignored = ignored;
-  }
-
-  /**
-   * Unique event id.
-   */
-  public String getId() {
-    return id;
-  }
-
-  /**
-   * Name of the trigger that fired the event.
-   */
-  public String getSource() {
-    return source;
-  }
-
-  /**
-   * Timestamp of the actual event, in nanoseconds.
-   * NOTE: this is NOT the timestamp when the event was fired - events may be fired
-   * much later than the actual condition that generated the event, due to the "waitFor" limit.
-   */
-  public long getEventTime() {
-    return eventTime;
-  }
-
-  /**
-   * Get event properties (modifiable).
-   */
-  public Map<String, Object> getProperties() {
-    return properties;
-  }
-
-  /**
-   * Get a named event property or null if missing.
-   */
-  public Object getProperty(String name) {
-    return properties.get(name);
-  }
-
-  /**
-   * Get a named event property or default value if missing.
-   */
-  public Object getProperty(String name, Object defaultValue) {
-    Object v = properties.get(name);
-    if (v == null) {
-      return defaultValue;
-    } else {
-      return v;
-    }
-  }
-
-  /**
-   * Event type.
-   */
-  public TriggerEventType getEventType() {
-    return eventType;
-  }
-
-  public boolean isIgnored() {
-    return ignored;
-  }
-
-  /**
-   * Set event properties.
-   *
-   * @param properties may be null. A shallow copy of this parameter is used.
-   */
-  public void setProperties(Map<String, Object> properties) {
-    this.properties.clear();
-    if (properties != null) {
-      this.properties.putAll(properties);
-    }
-  }
-
-  @Override
-  public void writeMap(EntryWriter ew) throws IOException {
-    ew.put("id", id);
-    ew.put("source", source);
-    ew.put("eventTime", eventTime);
-    ew.put("eventType", eventType.toString());
-    ew.put("properties", properties);
-    if (ignored)  {
-      ew.put("ignored", true);
-    }
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    TriggerEvent that = (TriggerEvent) o;
-
-    if (eventTime != that.eventTime) return false;
-    if (!id.equals(that.id)) return false;
-    if (!source.equals(that.source)) return false;
-    if (eventType != that.eventType) return false;
-    if (ignored != that.ignored)  return false;
-    return properties.equals(that.properties);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = id.hashCode();
-    result = 31 * result + source.hashCode();
-    result = 31 * result + (int) (eventTime ^ (eventTime >>> 32));
-    result = 31 * result + eventType.hashCode();
-    result = 31 * result + properties.hashCode();
-    result = 31 * result + Boolean.hashCode(ignored);
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    return Utils.toJSONString(this);
-  }
-
-  public static TriggerEvent fromMap(Map<String, Object> map) {
-    String id = (String)map.get("id");
-    String source = (String)map.get("source");
-    long eventTime = ((Number)map.get("eventTime")).longValue();
-    TriggerEventType eventType = TriggerEventType.valueOf((String)map.get("eventType"));
-    Map<String, Object> properties = (Map<String, Object>)map.get("properties");
-    // properly deserialize some well-known complex properties
-    fixOps(TriggerEvent.REQUESTED_OPS, properties);
-    fixOps(TriggerEvent.UNSUPPORTED_OPS, properties);
-    TriggerEvent res = new TriggerEvent(id, eventType, source, eventTime, properties);
-    return res;
-  }
-
-  public static void fixOps(String type, Map<String, Object> properties) {
-    List<Object> ops = (List<Object>)properties.get(type);
-    if (ops != null && !ops.isEmpty()) {
-      for (int i = 0; i < ops.size(); i++) {
-        Object o = ops.get(i);
-        if (o instanceof Map) {
-          TriggerEvent.Op op = TriggerEvent.Op.fromMap((Map)o);
-          if (op != null) {
-            ops.set(i, op);
-          }
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
deleted file mode 100644
index 057d792..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.cloud.Stats;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.common.util.TimeSource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *
- */
-public class TriggerEventQueue {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String ENQUEUE_TIME = "_enqueue_time_";
-  public static final String DEQUEUE_TIME = "_dequeue_time_";
-
-  private final String triggerName;
-  private final TimeSource timeSource;
-  private final DistributedQueue delegate;
-
-  public TriggerEventQueue(SolrCloudManager cloudManager, String triggerName, Stats stats) throws IOException {
-    // TODO: collect stats
-    this.delegate = cloudManager.getDistributedQueueFactory().makeQueue(ZkStateReader.SOLR_AUTOSCALING_EVENTS_PATH + "/" + triggerName);
-    this.triggerName = triggerName;
-    this.timeSource = cloudManager.getTimeSource();
-  }
-
-  public boolean offerEvent(TriggerEvent event) {
-    event.getProperties().put(ENQUEUE_TIME, timeSource.getTimeNs());
-    try {
-      byte[] data = Utils.toJSON(event);
-      delegate.offer(data);
-      return true;
-    } catch (Exception e) {
-      log.warn("Exception adding event " + event + " to queue " + triggerName, e);
-      return false;
-    }
-  }
-
-  public TriggerEvent peekEvent() {
-    byte[] data;
-    try {
-      while ((data = delegate.peek()) != null) {
-        if (data.length == 0) {
-          log.warn("ignoring empty data...");
-          continue;
-        }
-        try {
-          Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
-          return fromMap(map);
-        } catch (Exception e) {
-          log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
-          continue;
-        }
-      }
-    } catch (Exception e) {
-      log.warn("Exception peeking queue of trigger " + triggerName, e);
-    }
-    return null;
-  }
-
-  public TriggerEvent pollEvent() {
-    byte[] data;
-    try {
-      while ((data = delegate.poll()) != null) {
-        if (data.length == 0) {
-          log.warn("ignoring empty data...");
-          continue;
-        }
-        try {
-          Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
-          return fromMap(map);
-        } catch (Exception e) {
-          log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
-          continue;
-        }
-      }
-    } catch (Exception e) {
-      log.warn("Exception polling queue of trigger " + triggerName, e);
-    }
-    return null;
-  }
-
-  private TriggerEvent fromMap(Map<String, Object> map) {
-    TriggerEvent res = TriggerEvent.fromMap(map);
-    res.getProperties().put(DEQUEUE_TIME, timeSource.getTimeNs());
-    return res;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListener.java
deleted file mode 100644
index 234387f..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListener.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.Closeable;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.apache.solr.core.SolrResourceLoader;
-
-/**
- * Implementations of this interface are notified of stages in event processing that they were
- * registered for. Note: instances may be closed and re-created on each auto-scaling config update.
- */
-public interface TriggerListener extends Closeable {
-
-  /**
-   * Called when listener is created but before it's initialized and used.
-   * This method should also verify that the configuration parameters are correct.
-   * It may be called multiple times.
-   * @param loader loader to use for instantiating sub-components
-   * @param cloudManager current instance of SolrCloudManager
-   * @param config coniguration
-   * @throws TriggerValidationException contains details of invalid configuration parameters.
-   */
-  void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException;
-
-  /**
-   * If this method returns false then the listener's {@link #onEvent(TriggerEvent, TriggerEventProcessorStage, String, ActionContext, Throwable, String)}
-   * method should not be called.
-   */
-  boolean isEnabled();
-
-  void init() throws Exception;
-
-  AutoScalingConfig.TriggerListenerConfig getConfig();
-
-  /**
-   * This method is called when either a particular <code>stage</code> or
-   * <code>actionName</code> is reached during event processing.
-   * @param event current event being processed
-   * @param stage {@link TriggerEventProcessorStage} that this listener was registered for, or null
-   * @param actionName {@link TriggerAction} name that this listener was registered for, or null
-   * @param context optional {@link ActionContext} when the processing stage is related to an action, or null
-   * @param error optional {@link Throwable} error, or null
-   * @param message optional message
-   */
-  void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context,
-               Throwable error, String message) throws Exception;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListenerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListenerBase.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListenerBase.java
deleted file mode 100644
index 7a323c7..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerListenerBase.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.core.SolrResourceLoader;
-
-/**
- * Base class for implementations of {@link TriggerListener}.
- */
-public abstract class TriggerListenerBase implements TriggerListener {
-
-  protected AutoScalingConfig.TriggerListenerConfig config;
-  protected SolrCloudManager cloudManager;
-  protected SolrResourceLoader loader;
-  protected boolean enabled;
-  /**
-   * Set of valid property names. Subclasses may add to this set
-   * using {@link TriggerUtils#validProperties(Set, String...)}
-   */
-  protected final Set<String> validProperties = new HashSet<>();
-  /**
-   * Set of required property names. Subclasses may add to this set
-   * using {@link TriggerUtils#requiredProperties(Set, Set, String...)}
-   * (required properties are also valid properties).
-   */
-  protected final Set<String> requiredProperties = new HashSet<>();
-  /**
-   * Subclasses can add to this set if they want to allow arbitrary properties that
-   * start with one of valid prefixes.
-   */
-  protected final Set<String> validPropertyPrefixes = new HashSet<>();
-
-  protected TriggerListenerBase() {
-    TriggerUtils.requiredProperties(requiredProperties, validProperties, "trigger");
-    TriggerUtils.validProperties(validProperties, "name", "class", "stage", "beforeAction", "afterAction", "enabled");
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
-    this.loader = loader;
-    this.cloudManager = cloudManager;
-    this.config = config;
-    this.enabled = Boolean.parseBoolean(String.valueOf(config.properties.getOrDefault("enabled", true)));
-    // validate the config
-    Map<String, String> results = new HashMap<>();
-    // prepare a copy to treat the prefix-based properties
-    Map<String, Object> propsToCheck = new HashMap<>(config.properties);
-    propsToCheck.keySet().removeIf(k ->
-      validPropertyPrefixes.stream().anyMatch(p -> k.startsWith(p)));
-    TriggerUtils.checkProperties(propsToCheck, results, requiredProperties, validProperties);
-    if (!results.isEmpty()) {
-      throw new TriggerValidationException(config.name, results);
-    }
-  }
-
-  @Override
-  public AutoScalingConfig.TriggerListenerConfig getConfig() {
-    return config;
-  }
-
-  @Override
-  public boolean isEnabled() {
-    return enabled;
-  }
-
-  @Override
-  public void init() throws Exception {
-
-  }
-
-  @Override
-  public void close() throws IOException {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerUtils.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerUtils.java
deleted file mode 100644
index 71a1ce4..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerUtils.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- *
- */
-public class TriggerUtils {
-  // validation helper methods
-
-  public static void requiredProperties(Set<String> required, Set<String> valid, String... propertyNames) {
-    required.addAll(Arrays.asList(propertyNames));
-    valid.addAll(Arrays.asList(propertyNames));
-  }
-
-  public static void validProperties(Set<String> valid, String... propertyNames) {
-    valid.addAll(Arrays.asList(propertyNames));
-  }
-
-  public static void checkProperties(Map<String, Object> properties, Map<String, String> results, Set<String> required, Set<String> valid) {
-    checkValidPropertyNames(properties, results, valid);
-    checkRequiredPropertyNames(properties, results, required);
-  }
-
-  public static void checkValidPropertyNames(Map<String, Object> properties, Map<String, String> results, Set<String> valid) {
-    Set<String> currentNames = new HashSet<>(properties.keySet());
-    currentNames.removeAll(valid);
-    if (!currentNames.isEmpty()) {
-      for (String name : currentNames) {
-        results.put(name, "unknown property");
-      }
-    }
-  }
-
-  public static void checkRequiredPropertyNames(Map<String, Object> properties, Map<String, String> results, Set<String> required) {
-    Set<String> requiredNames = new HashSet<>(required);
-    requiredNames.removeAll(properties.keySet());
-    if (!requiredNames.isEmpty()) {
-      for (String name : requiredNames) {
-        results.put(name, "missing required property");
-      }
-    }
-  }
-
-  public static void checkProperty(Map<String, Object> properties, Map<String, String> results, String name, boolean required, Class... acceptClasses) {
-    Object value = properties.get(name);
-    if (value == null) {
-      if (required) {
-        results.put(name, "missing required value");
-      } else {
-        return;
-      }
-    }
-    if (acceptClasses == null || acceptClasses.length == 0) {
-      return;
-    }
-    boolean accepted = false;
-    for (Class clz : acceptClasses) {
-      if (clz.isAssignableFrom(value.getClass())) {
-        accepted = true;
-        break;
-      }
-    }
-    if (!accepted) {
-      results.put(name, "value is not an expected type");
-    }
-  }
-}


[41/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
deleted file mode 100644
index 32715d6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the functionality of creating a collection level snapshot.
- */
-public class CreateSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName =  message.getStr(COLLECTION_PROP);
-    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
-    String asyncId = message.getStr(ASYNC);
-    SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
-    Date creationDate = new Date();
-
-    if(SolrSnapshotManager.snapshotExists(zkClient, collectionName, commitName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-          + " already exists for collection " + collectionName);
-    }
-
-    log.info("Creating a snapshot for collection={} with commitName={}", collectionName, commitName);
-
-    // Create a node in ZK to store the collection level snapshot meta-data.
-    SolrSnapshotManager.createCollectionLevelSnapshot(zkClient, collectionName, new CollectionSnapshotMetaData(commitName));
-    log.info("Created a ZK path to store snapshot information for collection={} with commitName={}", collectionName, commitName);
-
-    Map<String, String> requestMap = new HashMap<>();
-    NamedList shardRequestResults = new NamedList();
-    Map<String, Slice> shardByCoreName = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replica.getState() != State.ACTIVE) {
-          log.info("Replica {} is not active. Hence not sending the createsnapshot request", replica.getCoreName());
-          continue; // Since replica is not active - no point sending a request.
-        }
-
-        String coreName = replica.getStr(CORE_NAME_PROP);
-
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATESNAPSHOT.toString());
-        params.set(NAME, slice.getName());
-        params.set(CORE_NAME_PROP, coreName);
-        params.set(CoreAdminParams.COMMIT_NAME, commitName);
-
-        ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-        log.debug("Sent createsnapshot request to core={} with commitName={}", coreName, commitName);
-
-        shardByCoreName.put(coreName, slice);
-      }
-    }
-
-    // At this point we want to make sure that at-least one replica for every shard
-    // is able to create the snapshot. If that is not the case, then we fail the request.
-    // This is to take care of the situation where e.g. entire shard is unavailable.
-    Set<String> failedShards = new HashSet<>();
-
-    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
-    NamedList success = (NamedList) shardRequestResults.get("success");
-    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
-    if (success != null) {
-      for ( int i = 0 ; i < success.size() ; i++) {
-        NamedList resp = (NamedList)success.getVal(i);
-
-        // Check if this core is the leader for the shard. The idea here is that during the backup
-        // operation we preferably use the snapshot of the "leader" replica since it is most likely
-        // to have latest state.
-        String coreName = (String)resp.get(CoreAdminParams.CORE);
-        Slice slice = shardByCoreName.remove(coreName);
-        boolean leader = (slice.getLeader() != null && slice.getLeader().getCoreName().equals(coreName));
-        resp.add(SolrSnapshotManager.SHARD_ID, slice.getName());
-        resp.add(SolrSnapshotManager.LEADER, leader);
-
-        CoreSnapshotMetaData c = new CoreSnapshotMetaData(resp);
-        replicas.add(c);
-        log.info("Snapshot with commitName {} is created successfully for core {}", commitName, c.getCoreName());
-      }
-    }
-
-    if (!shardByCoreName.isEmpty()) { // One or more failures.
-      log.warn("Unable to create a snapshot with name {} for following cores {}", commitName, shardByCoreName.keySet());
-
-      // Count number of failures per shard.
-      Map<String, Integer> failuresByShardId = new HashMap<>();
-      for (Map.Entry<String,Slice> entry : shardByCoreName.entrySet()) {
-        int f = 0;
-        if (failuresByShardId.get(entry.getValue().getName()) != null) {
-          f = failuresByShardId.get(entry.getValue().getName());
-        }
-        failuresByShardId.put(entry.getValue().getName(), f + 1);
-      }
-
-      // Now that we know number of failures per shard, we can figure out
-      // if at-least one replica per shard was able to create a snapshot or not.
-      DocCollection collectionStatus = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-      for (Map.Entry<String,Integer> entry : failuresByShardId.entrySet()) {
-        int replicaCount = collectionStatus.getSlice(entry.getKey()).getReplicas().size();
-        if (replicaCount <= entry.getValue()) {
-          failedShards.add(entry.getKey());
-        }
-      }
-    }
-
-    if (failedShards.isEmpty()) { // No failures.
-      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Successful, creationDate, replicas);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
-      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
-          commitName, meta.toNamedList());
-    } else {
-      log.warn("Failed to create a snapshot for collection {} with commitName = {}. Snapshot could not be captured for following shards {}",
-          collectionName, commitName, failedShards);
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
-      // which cores have the named snapshot.
-      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Failed, creationDate, replicas);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
-      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
-          commitName, meta.toNamedList());
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to create snapshot on shards " + failedShards);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
deleted file mode 100644
index 6cc2eec..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
+++ /dev/null
@@ -1,43 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DeleteAliasCmd implements OverseerCollectionMessageHandler.Cmd {
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String aliasName = message.getStr(NAME);
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.aliasesManager.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(aliasName, null));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
deleted file mode 100644
index f1767ee..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
+++ /dev/null
@@ -1,207 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.common.NonExistentCoreException;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.admin.MetricsHistoryHandler;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
-import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public DeleteCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    final String collection = message.getStr(NAME);
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-
-    checkNotReferencedByAlias(zkStateReader, collection);
-    checkNotColocatedWith(zkStateReader, collection);
-
-    final boolean deleteHistory = message.getBool(CoreAdminParams.DELETE_METRICS_HISTORY, true);
-
-    boolean removeCounterNode = true;
-    try {
-      // Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
-      // should be taken care of as part of collection delete operation.
-      SolrZkClient zkClient = zkStateReader.getZkClient();
-      SolrSnapshotManager.cleanupCollectionLevelSnapshots(zkClient, collection);
-
-      if (zkStateReader.getClusterState().getCollectionOrNull(collection) == null) {
-        if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
-          // if the collection is not in the clusterstate, but is listed in zk, do nothing, it will just
-          // be removed in the finally - we cannot continue, because the below code will error if the collection
-          // is not in the clusterstate
-          return;
-        }
-      }
-      // remove collection-level metrics history
-      if (deleteHistory) {
-        MetricsHistoryHandler historyHandler = ocmh.overseer.getCoreContainer().getMetricsHistoryHandler();
-        if (historyHandler != null) {
-          String registry = SolrMetricManager.getRegistryName(SolrInfoBean.Group.collection, collection);
-          historyHandler.removeHistory(registry);
-        }
-      }
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
-      params.set(CoreAdminParams.DELETE_INSTANCE_DIR, true);
-      params.set(CoreAdminParams.DELETE_DATA_DIR, true);
-      params.set(CoreAdminParams.DELETE_METRICS_HISTORY, deleteHistory);
-
-      String asyncId = message.getStr(ASYNC);
-      Map<String, String> requestMap = null;
-      if (asyncId != null) {
-        requestMap = new HashMap<>();
-      }
-
-      Set<String> okayExceptions = new HashSet<>(1);
-      okayExceptions.add(NonExistentCoreException.class.getName());
-
-      List<Replica> failedReplicas = ocmh.collectionCmd(message, params, results, null, asyncId, requestMap, okayExceptions);
-      for (Replica failedRepilca : failedReplicas) {
-        boolean isSharedFS = failedRepilca.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && failedRepilca.get("dataDir") != null;
-        if (isSharedFS) {
-          // if the replica use a shared FS and it did not receive the unload message, then counter node should not be removed
-          // because when a new collection with same name is created, new replicas may reuse the old dataDir
-          removeCounterNode = false;
-          break;
-        }
-      }
-
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETE.toLower(), NAME, collection);
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-      // wait for a while until we don't see the collection
-      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean removed = false;
-      while (! timeout.hasTimedOut()) {
-        timeout.sleep(100);
-        removed = !zkStateReader.getClusterState().hasCollection(collection);
-        if (removed) {
-          timeout.sleep(500); // just a bit of time so it's more likely other
-          // readers see on return
-          break;
-        }
-      }
-      if (!removed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not fully remove collection: " + collection);
-      }
-    } finally {
-
-      try {
-        String collectionPath =  ZkStateReader.getCollectionPathRoot(collection);
-        if (zkStateReader.getZkClient().exists(collectionPath, true)) {
-          if (removeCounterNode) {
-            zkStateReader.getZkClient().clean(collectionPath);
-          } else {
-            final String counterNodePath = Assign.getCounterNodePath(collection);
-            zkStateReader.getZkClient().clean(collectionPath, s -> !s.equals(counterNodePath));
-          }
-        }
-      } catch (InterruptedException e) {
-        SolrException.log(log, "Cleaning up collection in zk was interrupted:"
-            + collection, e);
-        Thread.currentThread().interrupt();
-      } catch (KeeperException e) {
-        SolrException.log(log, "Problem cleaning up collection in zk:"
-            + collection, e);
-      }
-    }
-  }
-
-  private void checkNotReferencedByAlias(ZkStateReader zkStateReader, String collection) throws Exception {
-    String alias = referencedByAlias(collection, zkStateReader.getAliases());
-    if (alias != null) {
-      zkStateReader.aliasesManager.update(); // aliases may have been stale; get latest from ZK
-      alias = referencedByAlias(collection, zkStateReader.getAliases());
-      if (alias != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Collection : " + collection + " is part of alias " + alias + " remove or modify the alias before removing this collection.");
-      }
-    }
-  }
-
-  private String referencedByAlias(String collection, Aliases aliases) {
-    Objects.requireNonNull(aliases);
-    return aliases.getCollectionAliasListMap().entrySet().stream()
-        .filter(e -> e.getValue().contains(collection))
-        .map(Map.Entry::getKey) // alias name
-        .findFirst().orElse(null);
-  }
-
-  private void checkNotColocatedWith(ZkStateReader zkStateReader, String collection) throws Exception {
-    DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collection);
-    if (docCollection != null)  {
-      String colocatedWith = docCollection.getStr(COLOCATED_WITH);
-      if (colocatedWith != null) {
-        DocCollection colocatedCollection = zkStateReader.getClusterState().getCollectionOrNull(colocatedWith);
-        if (colocatedCollection != null && collection.equals(colocatedCollection.getStr(WITH_COLLECTION))) {
-          // todo how do we clean up if reverse-link is not present?
-          // can't delete this collection because it is still co-located with another collection
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Collection: " + collection + " is co-located with collection: " + colocatedWith
-                  + " remove the link using modify collection API or delete the co-located collection: " + colocatedWith);
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
deleted file mode 100644
index 5f6e29c..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Locale;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ocmh.checkRequired(message, "node");
-    String node = message.getStr("node");
-    List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
-    List<String> singleReplicas = verifyReplicaAvailability(sourceReplicas, state);
-    if (!singleReplicas.isEmpty()) {
-      results.add("failure", "Can't delete the only existing non-PULL replica(s) on node " + node + ": " + singleReplicas.toString());
-    } else {
-      cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
-    }
-  }
-
-  // collect names of replicas that cannot be deleted
-  static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) {
-    List<String> res = new ArrayList<>();
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      String coll = sourceReplica.getStr(COLLECTION_PROP);
-      String shard = sourceReplica.getStr(SHARD_ID_PROP);
-      String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-      DocCollection collection = state.getCollection(coll);
-      Slice slice = collection.getSlice(shard);
-      if (slice.getReplicas().size() < 2) {
-        // can't delete the only replica in existence
-        res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-      } else { // check replica types
-        int otherNonPullReplicas = 0;
-        for (Replica r : slice.getReplicas()) {
-          if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) {
-            otherNonPullReplicas++;
-          }
-        }
-        // can't delete - there are no other non-pull replicas
-        if (otherNonPullReplicas == 0) {
-          res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-        }
-      }
-    }
-    return res;
-  }
-
-  static void cleanupReplicas(NamedList results,
-                              ClusterState clusterState,
-                              List<ZkNodeProps> sourceReplicas,
-                              OverseerCollectionMessageHandler ocmh,
-                              String node,
-                              String async) throws InterruptedException {
-    CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      String coll = sourceReplica.getStr(COLLECTION_PROP);
-      String shard = sourceReplica.getStr(SHARD_ID_PROP);
-      String type = sourceReplica.getStr(ZkStateReader.REPLICA_TYPE);
-      log.info("Deleting replica type={} for collection={} shard={} on node={}", type, coll, shard, node);
-      NamedList deleteResult = new NamedList();
-      try {
-        if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
-        ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
-          cleanupLatch.countDown();
-          if (deleteResult.get("failure") != null) {
-            synchronized (results) {
-
-              results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
-                  " on node=%s", coll, shard, node));
-            }
-          }
-        });
-      } catch (KeeperException e) {
-        log.warn("Error deleting ", e);
-        cleanupLatch.countDown();
-      } catch (Exception e) {
-        log.warn("Error deleting ", e);
-        cleanupLatch.countDown();
-        throw e;
-      }
-    }
-    log.debug("Waiting for delete node action to complete");
-    cleanupLatch.await(5, TimeUnit.MINUTES);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
deleted file mode 100644
index 4dbc059..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-
-public class DeleteReplicaCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    deleteReplica(clusterState, message, results,null);
-  }
-
-
-  @SuppressWarnings("unchecked")
-  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-          throws KeeperException, InterruptedException {
-    log.debug("deleteReplica() : {}", Utils.toJSONString(message));
-    boolean parallel = message.getBool("parallel", false);
-
-    //If a count is specified the strategy needs be different
-    if (message.getStr(COUNT_PROP) != null) {
-      deleteReplicaBasedOnCount(clusterState, message, results, onComplete, parallel);
-      return;
-    }
-
-
-    ocmh.checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String replicaName = message.getStr(REPLICA_PROP);
-
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Slice slice = coll.getSlice(shard);
-    if (slice == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Invalid shard name : " +  shard + " in collection : " +  collectionName);
-    }
-
-    deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete,  parallel);
-
-  }
-
-
-  /**
-   * Delete replicas based on count for a given collection. If a shard is passed, uses that
-   * else deletes given num replicas across all shards for the given collection.
-   */
-  void deleteReplicaBasedOnCount(ClusterState clusterState,
-                                 ZkNodeProps message,
-                                 NamedList results,
-                                 Runnable onComplete,
-                                 boolean parallel)
-          throws KeeperException, InterruptedException {
-    ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
-    int count = Integer.parseInt(message.getStr(COUNT_PROP));
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Slice slice = null;
-    //Validate if shard is passed.
-    if (shard != null) {
-      slice = coll.getSlice(shard);
-      if (slice == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Invalid shard name : " +  shard +  " in collection : " + collectionName);
-      }
-    }
-
-    Map<Slice, Set<String>> shardToReplicasMapping = new HashMap<Slice, Set<String>>();
-    if (slice != null) {
-      Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(slice, shard, collectionName, count);
-      shardToReplicasMapping.put(slice,replicasToBeDeleted);
-    } else {
-
-      //If there are many replicas left, remove the rest based on count.
-      Collection<Slice> allSlices = coll.getSlices();
-      for (Slice individualSlice : allSlices) {
-        Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(individualSlice, individualSlice.getName(), collectionName, count);
-        shardToReplicasMapping.put(individualSlice, replicasToBeDeleted);
-      }
-    }
-
-    for (Slice shardSlice: shardToReplicasMapping.keySet()) {
-      String shardId = shardSlice.getName();
-      Set<String> replicas = shardToReplicasMapping.get(shardSlice);
-      //callDeleteReplica on all replicas
-      for (String replica: replicas) {
-        log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
-        deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
-      }
-      results.add("shard_id", shardId);
-      results.add("replicas_deleted", replicas);
-    }
-
-  }
-
-
-  /**
-   * Pick replicas to be deleted. Avoid picking the leader.
-   */
-  private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
-    validateReplicaAvailability(slice, shard, collectionName, count);
-    Collection<Replica> allReplicas = slice.getReplicas();
-    Set<String> replicasToBeRemoved = new HashSet<String>();
-    Replica leader = slice.getLeader();
-    for (Replica replica: allReplicas) {
-      if (count == 0) {
-        break;
-      }
-      //Try avoiding to pick up the leader to minimize activity on the cluster.
-      if (leader.getCoreName().equals(replica.getCoreName())) {
-        continue;
-      }
-      replicasToBeRemoved.add(replica.getName());
-      count --;
-    }
-    return replicasToBeRemoved;
-  }
-
-  /**
-   * Validate if there is less replicas than requested to remove. Also error out if there is
-   * only one replica available
-   */
-  private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
-    //If there is a specific shard passed, validate if there any or just 1 replica left
-    if (slice != null) {
-      Collection<Replica> allReplicasForShard = slice.getReplicas();
-      if (allReplicasForShard == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
-                shard + "/"  + collectionName);
-      }
-
-
-      if (allReplicasForShard.size() == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
-                shard + "/" + collectionName + ". Cannot delete that.");
-      }
-
-      if (allReplicasForShard.size() <= count) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
-                shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
-      }
-    }
-  }
-
-  void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
-
-    Replica replica = slice.getReplica(replicaName);
-    if (replica == null) {
-      ArrayList<String> l = new ArrayList<>();
-      for (Replica r : slice.getReplicas())
-        l.add(r.getName());
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
-              shard  + "/" + collectionName + " available replicas are " +  StrUtils.join(l, ','));
-    }
-
-    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
-    // on the command.
-    if (Boolean.parseBoolean(message.getStr(OverseerCollectionMessageHandler.ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Attempted to remove replica : " + collectionName + "/"  + shard + "/" + replicaName +
-              " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
-    }
-
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-    String asyncId = message.getStr(ASYNC);
-    AtomicReference<Map<String, String>> requestMap = new AtomicReference<>(null);
-    if (asyncId != null) {
-      requestMap.set(new HashMap<>(1, 1.0f));
-    }
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
-    params.add(CoreAdminParams.CORE, core);
-
-    params.set(CoreAdminParams.DELETE_INDEX, message.getBool(CoreAdminParams.DELETE_INDEX, true));
-    params.set(CoreAdminParams.DELETE_INSTANCE_DIR, message.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, true));
-    params.set(CoreAdminParams.DELETE_DATA_DIR, message.getBool(CoreAdminParams.DELETE_DATA_DIR, true));
-    params.set(CoreAdminParams.DELETE_METRICS_HISTORY, message.getBool(CoreAdminParams.DELETE_METRICS_HISTORY, true));
-
-    boolean isLive = ocmh.zkStateReader.getClusterState().getLiveNodes().contains(replica.getNodeName());
-    if (isLive) {
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap.get());
-    }
-
-    Callable<Boolean> callable = () -> {
-      try {
-        if (isLive) {
-          ocmh.processResponses(results, shardHandler, false, null, asyncId, requestMap.get());
-
-          //check if the core unload removed the corenode zk entry
-          if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 5000)) return Boolean.TRUE;
-        }
-
-        // try and ensure core info is removed from cluster state
-        ocmh.deleteCoreNode(collectionName, replicaName, replica, core);
-        if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
-        return Boolean.FALSE;
-      } catch (Exception e) {
-        results.add("failure", "Could not complete delete " + e.getMessage());
-        throw e;
-      } finally {
-        if (onComplete != null) onComplete.run();
-      }
-    };
-
-    if (!parallel) {
-      try {
-        if (!callable.call())
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                  "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
-      } catch (InterruptedException | KeeperException e) {
-        throw e;
-      } catch (Exception ex) {
-        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Error waiting for corenode gone", ex);
-      }
-
-    } else {
-      ocmh.tpe.submit(callable);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
deleted file mode 100644
index 2ef2955..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
+++ /dev/null
@@ -1,178 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class DeleteShardCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public DeleteShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
-
-    log.info("Delete shard invoked");
-    Slice slice = clusterState.getCollection(collectionName).getSlice(sliceId);
-    if (slice == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-        "No shard with name " + sliceId + " exists for collection " + collectionName);
-
-    // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
-    // TODO: Add check for range gaps on Slice deletion
-    final Slice.State state = slice.getState();
-    if (!(slice.getRange() == null || state == Slice.State.INACTIVE || state == Slice.State.RECOVERY
-        || state == Slice.State.CONSTRUCTION) || state == Slice.State.RECOVERY_FAILED) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
-          + ". Only non-active (or custom-hashed) slices can be deleted.");
-    }
-
-    if (state == Slice.State.RECOVERY)  {
-      // mark the slice as 'construction' and only then try to delete the cores
-      // see SOLR-9455
-      DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-      propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
-      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-      ZkNodeProps m = new ZkNodeProps(propMap);
-      inQueue.offer(Utils.toJSON(m));
-    }
-
-    String asyncId = message.getStr(ASYNC);
-
-    try {
-      List<ZkNodeProps> replicas = getReplicasForSlice(collectionName, slice);
-      CountDownLatch cleanupLatch = new CountDownLatch(replicas.size());
-      for (ZkNodeProps r : replicas) {
-        final ZkNodeProps replica = r.plus(message.getProperties()).plus("parallel", "true").plus(ASYNC, asyncId);
-        log.info("Deleting replica for collection={} shard={} on node={}", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(CoreAdminParams.NODE));
-        NamedList deleteResult = new NamedList();
-        try {
-          ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, replica, deleteResult, () -> {
-            cleanupLatch.countDown();
-            if (deleteResult.get("failure") != null) {
-              synchronized (results) {
-                results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
-                    " on node=%s", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(NODE_NAME_PROP)));
-              }
-            }
-            SimpleOrderedMap success = (SimpleOrderedMap) deleteResult.get("success");
-            if (success != null) {
-              synchronized (results)  {
-                results.add("success", success);
-              }
-            }
-          });
-        } catch (KeeperException e) {
-          log.warn("Error deleting replica: " + r, e);
-          cleanupLatch.countDown();
-        } catch (Exception e) {
-          log.warn("Error deleting replica: " + r, e);
-          cleanupLatch.countDown();
-          throw e;
-        }
-      }
-      log.debug("Waiting for delete shard action to complete");
-      cleanupLatch.await(5, TimeUnit.MINUTES);
-
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
-          collectionName, ZkStateReader.SHARD_ID_PROP, sliceId);
-      ZkStateReader zkStateReader = ocmh.zkStateReader;
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-      // wait for a while until we don't see the shard
-      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean removed = false;
-      while (!timeout.hasTimedOut()) {
-        timeout.sleep(100);
-        DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-        removed = collection.getSlice(sliceId) == null;
-        if (removed) {
-          timeout.sleep(100); // just a bit of time so it's more likely other readers see on return
-          break;
-        }
-      }
-      if (!removed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not fully remove collection: " + collectionName + " shard: " + sliceId);
-      }
-
-      log.info("Successfully deleted collection: " + collectionName + ", shard: " + sliceId);
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error executing delete operation for collection: " + collectionName + " shard: " + sliceId, e);
-    }
-  }
-
-  private List<ZkNodeProps> getReplicasForSlice(String collectionName, Slice slice) {
-    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
-    for (Replica replica : slice.getReplicas()) {
-      ZkNodeProps props = new ZkNodeProps(
-          COLLECTION_PROP, collectionName,
-          SHARD_ID_PROP, slice.getName(),
-          ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
-          ZkStateReader.REPLICA_PROP, replica.getName(),
-          CoreAdminParams.NODE, replica.getNodeName());
-      sourceReplicas.add(props);
-    }
-    return sourceReplicas;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
deleted file mode 100644
index cf0a234..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the functionality of deleting a collection level snapshot.
- */
-public class DeleteSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName =  message.getStr(COLLECTION_PROP);
-    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
-    String asyncId = message.getStr(ASYNC);
-    Map<String, String> requestMap = new HashMap<>();
-    NamedList shardRequestResults = new NamedList();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
-
-    Optional<CollectionSnapshotMetaData> meta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-    if (!meta.isPresent()) { // Snapshot not found. Nothing to do.
-      return;
-    }
-
-    log.info("Deleting a snapshot for collection={} with commitName={}", collectionName, commitName);
-
-    Set<String> existingCores = new HashSet<>();
-    for (Slice s : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica r : s.getReplicas()) {
-        existingCores.add(r.getCoreName());
-      }
-    }
-
-    Set<String> coresWithSnapshot = new HashSet<>();
-    for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
-      if (existingCores.contains(m.getCoreName())) {
-        coresWithSnapshot.add(m.getCoreName());
-      }
-    }
-
-    log.info("Existing cores with snapshot for collection={} are {}", collectionName, existingCores);
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replica.getState() == State.DOWN) {
-          continue; // Since replica is down - no point sending a request.
-        }
-
-        // Note - when a snapshot is found in_progress state - it is the result of overseer
-        // failure while handling the snapshot creation. Since we don't know the exact set of
-        // replicas to contact at this point, we try on all replicas.
-        if (meta.get().getStatus() == SnapshotStatus.InProgress || coresWithSnapshot.contains(replica.getCoreName())) {
-          String coreName = replica.getStr(CORE_NAME_PROP);
-
-          ModifiableSolrParams params = new ModifiableSolrParams();
-          params.set(CoreAdminParams.ACTION, CoreAdminAction.DELETESNAPSHOT.toString());
-          params.set(NAME, slice.getName());
-          params.set(CORE_NAME_PROP, coreName);
-          params.set(CoreAdminParams.COMMIT_NAME, commitName);
-
-          log.info("Sending deletesnapshot request to core={} with commitName={}", coreName, commitName);
-          ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-        }
-      }
-    }
-
-    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
-    NamedList success = (NamedList) shardRequestResults.get("success");
-    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
-    if (success != null) {
-      for ( int i = 0 ; i < success.size() ; i++) {
-        NamedList resp = (NamedList)success.getVal(i);
-        // Unfortunately async processing logic doesn't provide the "core" name automatically.
-        String coreName = (String)resp.get("core");
-        coresWithSnapshot.remove(coreName);
-      }
-    }
-
-    if (!coresWithSnapshot.isEmpty()) { // One or more failures.
-      log.warn("Failed to delete a snapshot for collection {} with commitName = {}. Snapshot could not be deleted for following cores {}",
-          collectionName, commitName, coresWithSnapshot);
-
-      List<CoreSnapshotMetaData> replicasWithSnapshot = new ArrayList<>();
-      for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
-        if (coresWithSnapshot.contains(m.getCoreName())) {
-          replicasWithSnapshot.add(m);
-        }
-      }
-
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
-      // which cores still contain the named snapshot.
-      CollectionSnapshotMetaData newResult = new CollectionSnapshotMetaData(meta.get().getName(), SnapshotStatus.Failed,
-          meta.get().getCreationDate(), replicasWithSnapshot);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, newResult);
-      log.info("Saved snapshot information for collection={} with commitName={} in Zookeeper as follows", collectionName, commitName,
-          Utils.toJSON(newResult));
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to delete snapshot on cores " + coresWithSnapshot);
-
-    } else {
-      // Delete the ZK path so that we eliminate the references of this snapshot from collection level meta-data.
-      SolrSnapshotManager.deleteCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      log.info("Deleted Zookeeper snapshot metdata for collection={} with commitName={}", collectionName, commitName);
-      log.info("Successfully deleted snapshot for collection={} with commitName={}", collectionName, commitName);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
deleted file mode 100644
index a80fdc0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.util.Set;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-
-/**
- * We use this watcher to wait for any eligible replica in a shard to become active so that it can become a leader.
- */
-public class LeaderRecoveryWatcher implements CollectionStateWatcher {
-  String collectionId;
-  String shardId;
-  String replicaId;
-  String targetCore;
-  SolrCloseableLatch latch;
-
-  /**
-   * Watch for recovery of a replica
-   *
-   * @param collectionId   collection name
-   * @param shardId        shard id
-   * @param replicaId      source replica name (coreNodeName)
-   * @param targetCore     specific target core name - if null then any active replica will do
-   * @param latch countdown when recovered
-   */
-  LeaderRecoveryWatcher(String collectionId, String shardId, String replicaId, String targetCore, SolrCloseableLatch latch) {
-    this.collectionId = collectionId;
-    this.shardId = shardId;
-    this.replicaId = replicaId;
-    this.targetCore = targetCore;
-    this.latch = latch;
-  }
-
-  @Override
-  public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-    if (collectionState == null) { // collection has been deleted - don't wait
-      latch.countDown();
-      return true;
-    }
-    Slice slice = collectionState.getSlice(shardId);
-    if (slice == null) { // shard has been removed - don't wait
-      latch.countDown();
-      return true;
-    }
-    for (Replica replica : slice.getReplicas()) {
-      // check if another replica exists - doesn't have to be the one we're moving
-      // as long as it's active and can become a leader, in which case we don't have to wait
-      // for recovery of specifically the one that we've just added
-      if (!replica.getName().equals(replicaId)) {
-        if (replica.getType().equals(Replica.Type.PULL)) { // not eligible for leader election
-          continue;
-        }
-        // check its state
-        String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-        if (targetCore != null && !targetCore.equals(coreName)) {
-          continue;
-        }
-        if (replica.isActive(liveNodes)) { // recovered - stop waiting
-          latch.countDown();
-          return true;
-        }
-      }
-    }
-    // set the watch again to wait for the new replica to recover
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
deleted file mode 100644
index e5c5de6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.text.ParseException;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerSolrResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.handler.admin.CollectionsHandler;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.util.DateMathParser;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
-import static org.apache.solr.cloud.api.collections.TimeRoutedAlias.CREATE_COLLECTION_PREFIX;
-import static org.apache.solr.cloud.api.collections.TimeRoutedAlias.ROUTED_ALIAS_NAME_CORE_PROP;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * (Internal) For "time routed aliases", both deletes old collections and creates new collections
- * associated with routed aliases.
- *
- * Note: this logic is within an Overseer because we want to leverage the mutual exclusion
- * property afforded by the lock it obtains on the alias name.
- *
- * @since 7.3
- * @lucene.internal
- */
-public class MaintainRoutedAliasCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String IF_MOST_RECENT_COLL_NAME = "ifMostRecentCollName"; //TODO rename to createAfter
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public MaintainRoutedAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  /**
-   * Invokes this command from the client.  If there's a problem it will throw an exception.
-   * Please note that is important to never add async to this invocation. This method must
-   * block (up to the standard OCP timeout) to prevent large batches of add's from sending a message
-   * to the overseer for every document added in TimeRoutedAliasUpdateProcessor.
-   */
-  public static NamedList remoteInvoke(CollectionsHandler collHandler, String aliasName, String mostRecentCollName)
-      throws Exception {
-    final String operation = CollectionParams.CollectionAction.MAINTAINROUTEDALIAS.toLower();
-    Map<String, Object> msg = new HashMap<>();
-    msg.put(Overseer.QUEUE_OPERATION, operation);
-    msg.put(CollectionParams.NAME, aliasName);
-    msg.put(MaintainRoutedAliasCmd.IF_MOST_RECENT_COLL_NAME, mostRecentCollName);
-    final SolrResponse rsp = collHandler.sendToOCPQueue(new ZkNodeProps(msg));
-    if (rsp.getException() != null) {
-      throw rsp.getException();
-    }
-    return rsp.getResponse();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    //---- PARSE PRIMARY MESSAGE PARAMS
-    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
-    final String aliasName = message.getStr(NAME);
-    // the client believes this is the mostRecent collection name.  We assert this if provided.
-    final String ifMostRecentCollName = message.getStr(IF_MOST_RECENT_COLL_NAME); // optional
-
-    // TODO collection param (or intervalDateMath override?), useful for data capped collections
-
-    //---- PARSE ALIAS INFO FROM ZK
-    final ZkStateReader.AliasesManager aliasesManager = ocmh.zkStateReader.aliasesManager;
-    final Aliases aliases = aliasesManager.getAliases();
-    final Map<String, String> aliasMetadata = aliases.getCollectionAliasProperties(aliasName);
-    if (aliasMetadata == null) {
-      throw newAliasMustExistException(aliasName); // if it did exist, we'd have a non-null map
-    }
-    final TimeRoutedAlias timeRoutedAlias = new TimeRoutedAlias(aliasName, aliasMetadata);
-
-    final List<Map.Entry<Instant, String>> parsedCollections =
-        timeRoutedAlias.parseCollections(aliases, () -> newAliasMustExistException(aliasName));
-
-    //---- GET MOST RECENT COLL
-    final Map.Entry<Instant, String> mostRecentEntry = parsedCollections.get(0);
-    final Instant mostRecentCollTimestamp = mostRecentEntry.getKey();
-    final String mostRecentCollName = mostRecentEntry.getValue();
-    if (ifMostRecentCollName != null) {
-      if (!mostRecentCollName.equals(ifMostRecentCollName)) {
-        // Possibly due to race conditions in URPs on multiple leaders calling us at the same time
-        String msg = IF_MOST_RECENT_COLL_NAME + " expected " + ifMostRecentCollName + " but it's " + mostRecentCollName;
-        if (parsedCollections.stream().map(Map.Entry::getValue).noneMatch(ifMostRecentCollName::equals)) {
-          msg += ". Furthermore this collection isn't in the list of collections referenced by the alias.";
-        }
-        log.info(msg);
-        results.add("message", msg);
-        return;
-      }
-    } else if (mostRecentCollTimestamp.isAfter(Instant.now())) {
-      final String msg = "Most recent collection is in the future, so we won't create another.";
-      log.info(msg);
-      results.add("message", msg);
-      return;
-    }
-
-    //---- COMPUTE NEXT COLLECTION NAME
-    final Instant nextCollTimestamp = timeRoutedAlias.computeNextCollTimestamp(mostRecentCollTimestamp);
-    final String createCollName = TimeRoutedAlias.formatCollectionNameFromInstant(aliasName, nextCollTimestamp);
-
-    //---- DELETE OLDEST COLLECTIONS AND REMOVE FROM ALIAS (if configured)
-    NamedList deleteResults = deleteOldestCollectionsAndUpdateAlias(timeRoutedAlias, aliasesManager, nextCollTimestamp);
-    if (deleteResults != null) {
-      results.add("delete", deleteResults);
-    }
-
-    //---- CREATE THE COLLECTION
-    NamedList createResults = createCollectionAndWait(clusterState, aliasName, aliasMetadata,
-        createCollName, ocmh);
-    if (createResults != null) {
-      results.add("create", createResults);
-    }
-
-    //---- UPDATE THE ALIAS WITH NEW COLLECTION
-    aliasesManager.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        return curAliases;
-      } else {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
-        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
-        newTargetCollections.add(createCollName);
-        newTargetCollections.addAll(curTargetCollections);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      }
-    });
-
-  }
-
-  /**
-   * Deletes some of the oldest collection(s) based on {@link TimeRoutedAlias#getAutoDeleteAgeMath()}. If not present
-   * then does nothing.  Returns non-null results if something was deleted (or if we tried to).
-   * {@code now} is the date from which the math is relative to.
-   */
-  NamedList deleteOldestCollectionsAndUpdateAlias(TimeRoutedAlias timeRoutedAlias,
-                                                  ZkStateReader.AliasesManager aliasesManager,
-                                                  Instant now) throws Exception {
-    final String autoDeleteAgeMathStr = timeRoutedAlias.getAutoDeleteAgeMath();
-    if (autoDeleteAgeMathStr == null) {
-      return null;
-    }
-    final Instant delBefore;
-    try {
-      delBefore = new DateMathParser(Date.from(now), timeRoutedAlias.getTimeZone()).parseMath(autoDeleteAgeMathStr).toInstant();
-    } catch (ParseException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); // note: should not happen by this point
-    }
-
-    String aliasName = timeRoutedAlias.getAliasName();
-
-    Collection<String> collectionsToDelete = new LinkedHashSet<>();
-
-    // First update the alias    (there may be no change to make!)
-    aliasesManager.applyModificationAndExportToZk(curAliases -> {
-      // note: we could re-parse the TimeRoutedAlias object from curAliases but I don't think there's a point to it.
-
-      final List<Map.Entry<Instant, String>> parsedCollections =
-          timeRoutedAlias.parseCollections(curAliases, () -> newAliasMustExistException(aliasName));
-
-      //iterating from newest to oldest, find the first collection that has a time <= "before".  We keep this collection
-      // (and all newer to left) but we delete older collections, which are the ones that follow.
-      // This logic will always keep the first collection, which we can't delete.
-      int numToKeep = 0;
-      for (Map.Entry<Instant, String> parsedCollection : parsedCollections) {
-        numToKeep++;
-        final Instant colInstant = parsedCollection.getKey();
-        if (colInstant.isBefore(delBefore) || colInstant.equals(delBefore)) {
-          break;
-        }
-      }
-      if (numToKeep == parsedCollections.size()) {
-        log.debug("No old time routed collections to delete.");
-        return curAliases;
-      }
-
-      final List<String> targetList = curAliases.getCollectionAliasListMap().get(aliasName);
-      // remember to delete these... (oldest to newest)
-      for (int i = targetList.size() - 1; i >= numToKeep; i--) {
-        collectionsToDelete.add(targetList.get(i));
-      }
-      // new alias list has only "numToKeep" first items
-      final List<String> collectionsToKeep = targetList.subList(0, numToKeep);
-      final String collectionsToKeepStr = StrUtils.join(collectionsToKeep, ',');
-      return curAliases.cloneWithCollectionAlias(aliasName, collectionsToKeepStr);
-    });
-
-    if (collectionsToDelete.isEmpty()) {
-      return null;
-    }
-
-    log.info("Removing old time routed collections: {}", collectionsToDelete);
-    // Should this be done asynchronously?  If we got "ASYNC" then probably.
-    //   It would shorten the time the Overseer holds a lock on the alias name
-    //   (deleting the collections will be done later and not use that lock).
-    //   Don't bother about parallel; it's unusual to have more than 1.
-    // Note we don't throw an exception here under most cases; instead the response will have information about
-    //   how each delete request went, possibly including a failure message.
-    final CollectionsHandler collHandler = ocmh.overseer.getCoreContainer().getCollectionsHandler();
-    NamedList results = new NamedList();
-    for (String collection : collectionsToDelete) {
-      final SolrParams reqParams = CollectionAdminRequest.deleteCollection(collection).getParams();
-      SolrQueryResponse rsp = new SolrQueryResponse();
-      collHandler.handleRequestBody(new LocalSolrQueryRequest(null, reqParams), rsp);
-      results.add(collection, rsp.getValues());
-    }
-    return results;
-  }
-
-  /**
-   * Creates a collection (for use in a routed alias), waiting for it to be ready before returning.
-   * If the collection already exists then this is not an error.
-   * IMPORTANT: Only call this from an {@link OverseerCollectionMessageHandler.Cmd}.
-   */
-  static NamedList createCollectionAndWait(ClusterState clusterState, String aliasName, Map<String, String> aliasMetadata,
-                                           String createCollName, OverseerCollectionMessageHandler ocmh) throws Exception {
-    // Map alias metadata starting with a prefix to a create-collection API request
-    final ModifiableSolrParams createReqParams = new ModifiableSolrParams();
-    for (Map.Entry<String, String> e : aliasMetadata.entrySet()) {
-      if (e.getKey().startsWith(CREATE_COLLECTION_PREFIX)) {
-        createReqParams.set(e.getKey().substring(CREATE_COLLECTION_PREFIX.length()), e.getValue());
-      }
-    }
-    if (createReqParams.get(COLL_CONF) == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "We require an explicit " + COLL_CONF );
-    }
-    createReqParams.set(NAME, createCollName);
-    createReqParams.set("property." + ROUTED_ALIAS_NAME_CORE_PROP, aliasName);
-    // a CollectionOperation reads params and produces a message (Map) that is supposed to be sent to the Overseer.
-    //   Although we could create the Map without it, there are a fair amount of rules we don't want to reproduce.
-    final Map<String, Object> createMsgMap = CollectionsHandler.CollectionOperation.CREATE_OP.execute(
-        new LocalSolrQueryRequest(null, createReqParams),
-        null,
-        ocmh.overseer.getCoreContainer().getCollectionsHandler());
-    createMsgMap.put(Overseer.QUEUE_OPERATION, "create");
-
-    NamedList results = new NamedList();
-    try {
-      // Since we are running in the Overseer here, send the message directly to the Overseer CreateCollectionCmd.
-      // note: there's doesn't seem to be any point in locking on the collection name, so we don't. We currently should
-      //   already have a lock on the alias name which should be sufficient.
-      ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, new ZkNodeProps(createMsgMap), results);
-    } catch (SolrException e) {
-      // The collection might already exist, and that's okay -- we can adopt it.
-      if (!e.getMessage().contains("collection already exists")) {
-        throw e;
-      }
-    }
-
-    CollectionsHandler.waitForActiveCollection(createCollName, ocmh.overseer.getCoreContainer(),
-        new OverseerSolrResponse(results));
-    return results;
-  }
-
-  private SolrException newAliasMustExistException(String aliasName) {
-    return new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-        "Alias " + aliasName + " does not exist.");
-  }
-
-}


[12/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
deleted file mode 100644
index b569fe8..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
+++ /dev/null
@@ -1,964 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import javax.imageio.ImageIO;
-import java.awt.Color;
-import java.awt.image.BufferedImage;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.atomic.DoubleAdder;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.NodeStateProvider;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.cloud.LeaderElector;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.Base64;
-import org.apache.solr.common.util.JavaBinCodec;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.rrd.SolrRrdBackendFactory;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.zookeeper.KeeperException;
-import org.rrd4j.ConsolFun;
-import org.rrd4j.DsType;
-import org.rrd4j.core.ArcDef;
-import org.rrd4j.core.Archive;
-import org.rrd4j.core.Datasource;
-import org.rrd4j.core.DsDef;
-import org.rrd4j.core.FetchData;
-import org.rrd4j.core.FetchRequest;
-import org.rrd4j.core.RrdDb;
-import org.rrd4j.core.RrdDef;
-import org.rrd4j.core.Sample;
-import org.rrd4j.graph.RrdGraph;
-import org.rrd4j.graph.RrdGraphDef;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.stream.Collectors.toMap;
-import static org.apache.solr.common.params.CommonParams.ID;
-
-/**
- *
- */
-public class MetricsHistoryHandler extends RequestHandlerBase implements PermissionNameProvider, Closeable {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final List<String> DEFAULT_CORE_COUNTERS = new ArrayList<>();
-  public static final List<String> DEFAULT_CORE_GAUGES = new ArrayList<>();
-  public static final List<String> DEFAULT_NODE_GAUGES = new ArrayList<>();
-  public static final List<String> DEFAULT_JVM_GAUGES = new ArrayList<>();
-
-  public static final String NUM_SHARDS_KEY = "numShards";
-  public static final String NUM_REPLICAS_KEY = "numReplicas";
-  public static final String NUM_NODES_KEY = "numNodes";
-
-  public static final List<String> DEFAULT_COLLECTION_GAUGES = new ArrayList<>();
-
-  static {
-    DEFAULT_JVM_GAUGES.add("memory.heap.used");
-    DEFAULT_JVM_GAUGES.add("os.processCpuLoad");
-    DEFAULT_JVM_GAUGES.add("os.systemLoadAverage");
-
-    DEFAULT_NODE_GAUGES.add("CONTAINER.fs.coreRoot.usableSpace");
-
-    DEFAULT_CORE_GAUGES.add("INDEX.sizeInBytes");
-
-    DEFAULT_CORE_COUNTERS.add("QUERY./select.requests");
-    DEFAULT_CORE_COUNTERS.add("UPDATE./update.requests");
-
-    DEFAULT_COLLECTION_GAUGES.add(NUM_SHARDS_KEY);
-    DEFAULT_COLLECTION_GAUGES.add(NUM_REPLICAS_KEY);
-  }
-
-  public static final String COLLECT_PERIOD_PROP = "collectPeriod";
-  public static final String SYNC_PERIOD_PROP = "syncPeriod";
-  public static final String ENABLE_PROP = "enable";
-  public static final String ENABLE_REPLICAS_PROP = "enableReplicas";
-  public static final String ENABLE_NODES_PROP = "enableNodes";
-
-  public static final int DEFAULT_COLLECT_PERIOD = 60;
-  public static final String URI_PREFIX = "solr:";
-
-  private final SolrRrdBackendFactory factory;
-  private final String nodeName;
-  private final SolrClient solrClient;
-  private final MetricsHandler metricsHandler;
-  private final SolrCloudManager cloudManager;
-  private final TimeSource timeSource;
-  private final int collectPeriod;
-  private final Map<String, List<String>> counters = new HashMap<>();
-  private final Map<String, List<String>> gauges = new HashMap<>();
-  private final String overseerUrlScheme;
-
-  private final Map<String, RrdDb> knownDbs = new ConcurrentHashMap<>();
-
-  private ScheduledThreadPoolExecutor collectService;
-  private boolean logMissingCollection = true;
-  private boolean enable;
-  private boolean enableReplicas;
-  private boolean enableNodes;
-  private String versionString;
-
-  public MetricsHistoryHandler(String nodeName, MetricsHandler metricsHandler,
-        SolrClient solrClient, SolrCloudManager cloudManager, Map<String, Object> pluginArgs) {
-
-    Map<String, Object> args = new HashMap<>();
-    // init from optional solr.xml config
-    if (pluginArgs != null) {
-      args.putAll(pluginArgs);
-    }
-    // override from ZK if available
-    if (cloudManager != null) {
-      Map<String, Object> props = (Map<String, Object>)cloudManager.getClusterStateProvider()
-          .getClusterProperty("metrics", Collections.emptyMap())
-          .getOrDefault("history", Collections.emptyMap());
-      args.putAll(props);
-
-      overseerUrlScheme = cloudManager.getClusterStateProvider().getClusterProperty("urlScheme", "http");
-    } else {
-      overseerUrlScheme = "http";
-    }
-
-    this.nodeName = nodeName;
-    this.enable = Boolean.parseBoolean(String.valueOf(args.getOrDefault(ENABLE_PROP, "true")));
-    // default to false - don't collect local per-replica metrics
-    this.enableReplicas = Boolean.parseBoolean(String.valueOf(args.getOrDefault(ENABLE_REPLICAS_PROP, "false")));
-    this.enableNodes = Boolean.parseBoolean(String.valueOf(args.getOrDefault(ENABLE_NODES_PROP, "false")));
-    this.collectPeriod = Integer.parseInt(String.valueOf(args.getOrDefault(COLLECT_PERIOD_PROP, DEFAULT_COLLECT_PERIOD)));
-    int syncPeriod = Integer.parseInt(String.valueOf(args.getOrDefault(SYNC_PERIOD_PROP, SolrRrdBackendFactory.DEFAULT_SYNC_PERIOD)));
-
-    this.solrClient = solrClient;
-    this.metricsHandler = metricsHandler;
-    this.cloudManager = cloudManager;
-    this.timeSource = cloudManager != null ? cloudManager.getTimeSource() : TimeSource.NANO_TIME;
-    factory = new SolrRrdBackendFactory(solrClient, CollectionAdminParams.SYSTEM_COLL,
-            syncPeriod, this.timeSource);
-
-    counters.put(Group.core.toString(), DEFAULT_CORE_COUNTERS);
-    counters.put(Group.node.toString(), Collections.emptyList());
-    counters.put(Group.jvm.toString(), Collections.emptyList());
-    counters.put(Group.collection.toString(), Collections.emptyList());
-    gauges.put(Group.core.toString(), DEFAULT_CORE_GAUGES);
-    gauges.put(Group.node.toString(), DEFAULT_NODE_GAUGES);
-    gauges.put(Group.jvm.toString(), DEFAULT_JVM_GAUGES);
-    gauges.put(Group.collection.toString(), DEFAULT_COLLECTION_GAUGES);
-
-    versionString = this.getClass().getPackage().getImplementationVersion();
-    if (versionString == null) {
-      versionString = "?.?.?";
-    }
-    if (versionString.length() > 24) {
-      versionString = versionString.substring(0, 24) + "...";
-    }
-
-    if (enable) {
-      collectService = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
-          new DefaultSolrThreadFactory("MetricsHistoryHandler"));
-      collectService.setRemoveOnCancelPolicy(true);
-      collectService.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-      collectService.scheduleWithFixedDelay(() -> collectMetrics(),
-          timeSource.convertDelay(TimeUnit.SECONDS, collectPeriod, TimeUnit.MILLISECONDS),
-          timeSource.convertDelay(TimeUnit.SECONDS, collectPeriod, TimeUnit.MILLISECONDS),
-          TimeUnit.MILLISECONDS);
-      checkSystemCollection();
-    }
-  }
-
-  // check that .system exists
-  public void checkSystemCollection() {
-    if (cloudManager != null) {
-      try {
-        if (cloudManager.isClosed() || Thread.interrupted()) {
-          factory.setPersistent(false);
-          return;
-        }
-        ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
-        DocCollection systemColl = clusterState.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
-        if (systemColl == null) {
-          if (logMissingCollection) {
-            log.info("No " + CollectionAdminParams.SYSTEM_COLL + " collection, keeping metrics history in memory.");
-            logMissingCollection = false;
-          }
-          factory.setPersistent(false);
-          return;
-        } else {
-          boolean ready = false;
-          for (Replica r : systemColl.getReplicas()) {
-            if (r.isActive(clusterState.getLiveNodes())) {
-              ready = true;
-              break;
-            }
-          }
-          if (!ready) {
-            log.debug(CollectionAdminParams.SYSTEM_COLL + "collection not ready yet, keeping metrics history in memory");
-            factory.setPersistent(false);
-            return;
-          }
-        }
-      } catch (Exception e) {
-        if (logMissingCollection) {
-          log.warn("Error getting cluster state, keeping metrics history in memory", e);
-        }
-        logMissingCollection = false;
-        factory.setPersistent(false);
-        return;
-      }
-      logMissingCollection = true;
-      factory.setPersistent(true);
-    } else {
-      try {
-        solrClient.query(CollectionAdminParams.SYSTEM_COLL, new SolrQuery(CommonParams.Q, "*:*", CommonParams.ROWS, "0"));
-        factory.setPersistent(true);
-        logMissingCollection = true;
-      } catch (Exception e) {
-        if (logMissingCollection) {
-          log.info("No " + CollectionAdminParams.SYSTEM_COLL + " collection, keeping metrics history in memory.");
-        }
-        logMissingCollection = false;
-        factory.setPersistent(false);
-      }
-    }
-  }
-
-  public SolrClient getSolrClient() {
-    return solrClient;
-  }
-
-  public void removeHistory(String registry) throws IOException {
-    registry = SolrMetricManager.enforcePrefix(registry);
-    knownDbs.remove(registry);
-    factory.remove(registry);
-  }
-
-  @VisibleForTesting
-  public SolrRrdBackendFactory getFactory() {
-    return factory;
-  }
-
-  private String getOverseerLeader() {
-    // non-ZK node has no Overseer
-    if (cloudManager == null) {
-      return null;
-    }
-    ZkNodeProps props = null;
-    try {
-      VersionedData data = cloudManager.getDistribStateManager().getData(
-          Overseer.OVERSEER_ELECT + "/leader");
-      if (data != null && data.getData() != null) {
-        props = ZkNodeProps.load(data.getData());
-      }
-    } catch (KeeperException | IOException | NoSuchElementException e) {
-      log.warn("Could not obtain overseer's address, skipping.", e);
-      return null;
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      return null;
-    }
-    if (props == null) {
-      return null;
-    }
-    String oid = props.getStr(ID);
-    if (oid == null) {
-      return null;
-    }
-    String nodeName = null;
-    try {
-      nodeName = LeaderElector.getNodeName(oid);
-    } catch (Exception e) {
-      log.warn("Unknown format of leader id, skipping: " + oid, e);
-      return null;
-    }
-    return nodeName;
-  }
-
-  private boolean amIOverseerLeader() {
-    return amIOverseerLeader(null);
-  }
-
-  private boolean amIOverseerLeader(String leader) {
-    if (leader == null) {
-      leader = getOverseerLeader();
-    }
-    if (leader == null) {
-      return false;
-    } else {
-      return nodeName.equals(leader);
-    }
-  }
-
-  private void collectMetrics() {
-    log.debug("-- collectMetrics");
-    try {
-      checkSystemCollection();
-    } catch (Exception e) {
-      log.warn("Error checking for .system collection, keeping metrics history in memory", e);
-      factory.setPersistent(false);
-    }
-    // get metrics
-    collectLocalReplicaMetrics();
-    collectGlobalMetrics();
-  }
-
-  private void collectLocalReplicaMetrics() {
-    List<Group> groups = new ArrayList<>();
-    if (enableNodes) {
-      groups.add(Group.jvm);
-      groups.add(Group.node);
-    }
-    if (enableReplicas) {
-      groups.add(Group.core);
-    }
-    for (Group group : groups) {
-      if (Thread.interrupted()) {
-        return;
-      }
-      log.debug("--  collecting local " + group + "...");
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.add(MetricsHandler.GROUP_PARAM, group.toString());
-      params.add(MetricsHandler.COMPACT_PARAM, "true");
-      counters.get(group.toString()).forEach(c -> params.add(MetricsHandler.PREFIX_PARAM, c));
-      gauges.get(group.toString()).forEach(c -> params.add(MetricsHandler.PREFIX_PARAM, c));
-      AtomicReference<Object> result = new AtomicReference<>();
-      try {
-        metricsHandler.handleRequest(params, (k, v) -> {
-          if (k.equals("metrics")) {
-            result.set(v);
-          }
-        });
-        NamedList nl = (NamedList)result.get();
-        if (nl != null) {
-          for (Iterator<Map.Entry<String, Object>> it = nl.iterator(); it.hasNext(); ) {
-            Map.Entry<String, Object> entry = it.next();
-            String registry = entry.getKey();
-            if (group != Group.core) { // add nodeName suffix
-              registry = registry + "." + nodeName;
-            }
-
-            RrdDb db = getOrCreateDb(registry, group);
-            if (db == null) {
-              continue;
-            }
-            // set the timestamp
-            Sample s = db.createSample(TimeUnit.SECONDS.convert(timeSource.getEpochTimeNs(), TimeUnit.NANOSECONDS));
-            NamedList<Object> values = (NamedList<Object>)entry.getValue();
-            AtomicBoolean dirty = new AtomicBoolean(false);
-            counters.get(group.toString()).forEach(c -> {
-              Number val = (Number)values.get(c);
-              if (val != null) {
-                dirty.set(true);
-                s.setValue(c, val.doubleValue());
-              }
-            });
-            gauges.get(group.toString()).forEach(c -> {
-              Number val = (Number)values.get(c);
-              if (val != null) {
-                dirty.set(true);
-                s.setValue(c, val.doubleValue());
-              }
-            });
-            if (dirty.get()) {
-              s.update();
-            }
-          }
-        }
-      } catch (Exception e) {
-        e.printStackTrace();
-      }
-    }
-  }
-
-  private void collectGlobalMetrics() {
-    if (!amIOverseerLeader()) {
-      return;
-    }
-    Set<String> nodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-    NodeStateProvider nodeStateProvider = cloudManager.getNodeStateProvider();
-    Set<String> collTags = new HashSet<>();
-    collTags.addAll(counters.get(Group.core.toString()));
-    collTags.addAll(gauges.get(Group.core.toString()));
-
-    Set<String> nodeTags = new HashSet<>();
-    String nodePrefix = "metrics:" + SolrMetricManager.getRegistryName(Group.node) + ":";
-    counters.get(Group.node.toString()).forEach(name -> {
-      nodeTags.add(nodePrefix + name);
-    });
-    gauges.get(Group.node.toString()).forEach(name -> {
-      nodeTags.add(nodePrefix + name);
-    });
-    String jvmPrefix = "metrics:" + SolrMetricManager.getRegistryName(Group.jvm) + ":";
-    counters.get(Group.jvm.toString()).forEach(name -> {
-      nodeTags.add(jvmPrefix + name);
-    });
-    gauges.get(Group.jvm.toString()).forEach(name -> {
-      nodeTags.add(jvmPrefix + name);
-    });
-
-    // per-registry totals
-    // XXX at the moment the type of metrics that we collect allows
-    // adding all partial values. At some point it may be necessary to implement
-    // other aggregation functions.
-    // group : registry : name : value
-    Map<Group, Map<String, Map<String, Number>>> totals = new HashMap<>();
-
-    // collect and aggregate per-collection totals
-    for (String node : nodes) {
-      if (cloudManager.isClosed() || Thread.interrupted()) {
-        return;
-      }
-      // add core-level stats
-      Map<String, Map<String, List<ReplicaInfo>>> infos = nodeStateProvider.getReplicaInfo(node, collTags);
-      infos.forEach((coll, shards) -> {
-        shards.forEach((sh, replicas) -> {
-          String registry = SolrMetricManager.getRegistryName(Group.collection, coll);
-          Map<String, Number> perReg = totals
-              .computeIfAbsent(Group.collection, g -> new HashMap<>())
-              .computeIfAbsent(registry, r -> new HashMap<>());
-          replicas.forEach(ri -> {
-            collTags.forEach(tag -> {
-              double value = ((Number)ri.getVariable(tag, 0.0)).doubleValue();
-              DoubleAdder adder = (DoubleAdder)perReg.computeIfAbsent(tag, t -> new DoubleAdder());
-              adder.add(value);
-            });
-          });
-        });
-      });
-      // add node-level stats
-      Map<String, Object> nodeValues = nodeStateProvider.getNodeValues(node, nodeTags);
-      for (Group g : Arrays.asList(Group.node, Group.jvm)) {
-        String registry = SolrMetricManager.getRegistryName(g);
-        Map<String, Number> perReg = totals
-            .computeIfAbsent(g, gr -> new HashMap<>())
-            .computeIfAbsent(registry, r -> new HashMap<>());
-        Set<String> names = new HashSet<>();
-        names.addAll(counters.get(g.toString()));
-        names.addAll(gauges.get(g.toString()));
-        names.forEach(name -> {
-          String tag = "metrics:" + registry + ":" + name;
-          double value = ((Number)nodeValues.getOrDefault(tag, 0.0)).doubleValue();
-          DoubleAdder adder = (DoubleAdder)perReg.computeIfAbsent(name, t -> new DoubleAdder());
-          adder.add(value);
-        });
-      }
-    }
-
-    // add numNodes
-    String nodeReg = SolrMetricManager.getRegistryName(Group.node);
-    Map<String, Number> perNodeReg = totals
-        .computeIfAbsent(Group.node, gr -> new HashMap<>())
-        .computeIfAbsent(nodeReg, r -> new HashMap<>());
-    perNodeReg.put(NUM_NODES_KEY, nodes.size());
-
-    // add some global collection-level stats
-    try {
-      ClusterState state = cloudManager.getClusterStateProvider().getClusterState();
-      state.forEachCollection(coll -> {
-        String registry = SolrMetricManager.getRegistryName(Group.collection, coll.getName());
-        Map<String, Number> perReg = totals
-            .computeIfAbsent(Group.collection, g -> new HashMap<>())
-            .computeIfAbsent(registry, r -> new HashMap<>());
-        Slice[] slices = coll.getActiveSlicesArr();
-        perReg.put(NUM_SHARDS_KEY, slices.length);
-        DoubleAdder numActiveReplicas = new DoubleAdder();
-        for (Slice s : slices) {
-          s.forEach(r -> {
-            if (r.isActive(state.getLiveNodes())) {
-              numActiveReplicas.add(1.0);
-            }
-          });
-        }
-        perReg.put(NUM_REPLICAS_KEY, numActiveReplicas);
-      });
-    } catch (IOException e) {
-      log.warn("Exception getting cluster state", e);
-    }
-
-    // now update the db-s
-    totals.forEach((group, perGroup) -> {
-      perGroup.forEach((reg, perReg) -> {
-        RrdDb db = getOrCreateDb(reg, group);
-        if (db == null) {
-          return;
-        }
-        try {
-          // set the timestamp
-          Sample s = db.createSample(TimeUnit.SECONDS.convert(timeSource.getEpochTimeNs(), TimeUnit.NANOSECONDS));
-          AtomicBoolean dirty = new AtomicBoolean(false);
-          List<Group> groups = new ArrayList<>();
-          groups.add(group);
-          if (group == Group.collection) {
-            groups.add(Group.core);
-          }
-          for (Group g : groups) {
-            counters.get(g.toString()).forEach(c -> {
-              Number val = perReg.get(c);
-              if (val != null) {
-                dirty.set(true);
-                s.setValue(c, val.doubleValue());
-              }
-            });
-            gauges.get(g.toString()).forEach(c -> {
-              Number val = perReg.get(c);
-              if (val != null) {
-                dirty.set(true);
-                s.setValue(c, val.doubleValue());
-              }
-            });
-          }
-          if (dirty.get()) {
-            s.update();
-          }
-        } catch (Exception e) {
-        }
-      });
-    });
-  }
-
-  private RrdDef createDef(String registry, Group group) {
-    registry = SolrMetricManager.enforcePrefix(registry);
-
-    // base sampling period is collectPeriod - samples more frequent than
-    // that will be dropped, samples less frequent will be interpolated
-    RrdDef def = new RrdDef(URI_PREFIX + registry, collectPeriod);
-    // set the start time early enough so that the first sample is always later
-    // than the start of the archive
-    def.setStartTime(TimeUnit.SECONDS.convert(timeSource.getEpochTimeNs(), TimeUnit.NANOSECONDS) - def.getStep());
-
-    // add datasources
-    List<Group> groups = new ArrayList<>();
-    groups.add(group);
-    if (group == Group.collection) {
-      groups.add(Group.core);
-    }
-    for (Group g : groups) {
-      // use NaN when more than 1 sample is missing
-      counters.get(g.toString()).forEach(name ->
-          def.addDatasource(name, DsType.COUNTER, collectPeriod * 2, Double.NaN, Double.NaN));
-      gauges.get(g.toString()).forEach(name ->
-          def.addDatasource(name, DsType.GAUGE, collectPeriod * 2, Double.NaN, Double.NaN));
-    }
-    if (groups.contains(Group.node)) {
-      // add nomNodes gauge
-      def.addDatasource(NUM_NODES_KEY, DsType.GAUGE, collectPeriod * 2, Double.NaN, Double.NaN);
-    }
-
-    // add archives
-
-    // use AVERAGE consolidation,
-    // use NaN when >50% samples are missing
-    def.addArchive(ConsolFun.AVERAGE, 0.5, 1, 240); // 4 hours
-    def.addArchive(ConsolFun.AVERAGE, 0.5, 10, 288); // 48 hours
-    def.addArchive(ConsolFun.AVERAGE, 0.5, 60, 336); // 2 weeks
-    def.addArchive(ConsolFun.AVERAGE, 0.5, 240, 180); // 2 months
-    def.addArchive(ConsolFun.AVERAGE, 0.5, 1440, 365); // 1 year
-    return def;
-  }
-
-  private RrdDb getOrCreateDb(String registry, Group group) {
-    RrdDb db = knownDbs.computeIfAbsent(registry, r -> {
-      RrdDef def = createDef(r, group);
-      try {
-        RrdDb newDb = new RrdDb(def, factory);
-        return newDb;
-      } catch (IOException e) {
-        return null;
-      }
-    });
-    return db;
-  }
-
-  @Override
-  public void close() {
-    log.debug("Closing " + hashCode());
-    if (collectService != null) {
-      collectService.shutdownNow();
-    }
-    if (factory != null) {
-      factory.close();
-    }
-    knownDbs.clear();
-  }
-
-  public enum Cmd {
-    LIST, STATUS, GET, DELETE;
-
-    static final Map<String, Cmd> actions = Collections.unmodifiableMap(
-        Stream.of(Cmd.values())
-            .collect(toMap(Cmd::toLower, Function.identity())));
-
-    public static Cmd get(String p) {
-      return p == null ? null : actions.get(p.toLowerCase(Locale.ROOT));
-    }
-
-    public String toLower() {
-      return toString().toLowerCase(Locale.ROOT);
-    }
-  }
-
-  public enum Format {
-    LIST, STRING, GRAPH;
-
-    static final Map<String, Format> formats = Collections.unmodifiableMap(
-        Stream.of(Format.values())
-            .collect(toMap(Format::toLower, Function.identity())));
-
-    public static Format get(String p) {
-      return p == null ? null : formats.get(p.toLowerCase(Locale.ROOT));
-    }
-
-    public String toLower() {
-      return toString().toLowerCase(Locale.ROOT);
-    }
-  }
-
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    String actionStr = req.getParams().get(CommonParams.ACTION);
-    if (actionStr == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'action' is a required param");
-    }
-    Cmd cmd = Cmd.get(actionStr);
-    if (cmd == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown 'action' param '" + actionStr + "', supported actions: " + Cmd.actions);
-    }
-    final SimpleOrderedMap<Object> res = new SimpleOrderedMap<>();
-    rsp.add("metrics", res);
-    switch (cmd) {
-      case LIST:
-        int rows = req.getParams().getInt(CommonParams.ROWS, SolrRrdBackendFactory.DEFAULT_MAX_DBS);
-        List<Pair<String, Long>> lst = factory.list(rows);
-        lst.forEach(p -> {
-          SimpleOrderedMap<Object> data = new SimpleOrderedMap<>();
-          // RrdDb always uses seconds - convert here for compatibility
-          data.add("lastModified", TimeUnit.SECONDS.convert(p.second(), TimeUnit.MILLISECONDS));
-          data.add("node", nodeName);
-          res.add(p.first(), data);
-        });
-        break;
-      case GET:
-        String name = req.getParams().get(CommonParams.NAME);
-        if (name == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'name' is a required param");
-        }
-        String[] dsNames = req.getParams().getParams("ds");
-        String formatStr = req.getParams().get("format", Format.LIST.toString());
-        Format format = Format.get(formatStr);
-        if (format == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown 'format' param '" + formatStr + "', supported formats: " + Format.formats);
-        }
-        if (factory.exists(name)) {
-          // get a throwaway copy (safe to close and discard)
-          RrdDb db = new RrdDb(URI_PREFIX + name, true, factory);
-          SimpleOrderedMap<Object> data = new SimpleOrderedMap<>();
-          data.add("data", getDbData(db, dsNames, format, req.getParams()));
-          data.add("lastModified", db.getLastUpdateTime());
-          data.add("node", nodeName);
-          res.add(name, data);
-          db.close();
-        }
-        break;
-      case STATUS:
-        name = req.getParams().get(CommonParams.NAME);
-        if (name == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'name' is a required param");
-        }
-        if (factory.exists(name)) {
-          // get a throwaway copy (safe to close and discard)
-          RrdDb db = new RrdDb(URI_PREFIX + name, true, factory);
-          SimpleOrderedMap<Object> status = new SimpleOrderedMap<>();
-          status.add("status", getDbStatus(db));
-          status.add("node", nodeName);
-          res.add(name, status);
-          db.close();
-        }
-        break;
-      case DELETE:
-        name = req.getParams().get(CommonParams.NAME);
-        if (name == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'name' is a required param");
-        }
-        if (name.equalsIgnoreCase("all") || name.equals("*")) {
-          factory.removeAll();
-        } else {
-          factory.remove(name);
-        }
-        rsp.add("success", "ok");
-        break;
-    }
-    // when using in-memory DBs non-overseer node has no access to overseer DBs - in this case
-    // forward the request to Overseer leader if available
-    if (!factory.isPersistent()) {
-      String leader = getOverseerLeader();
-      if (leader != null && !amIOverseerLeader(leader)) {
-        // get & merge remote response
-        NamedList<Object> remoteRes = handleRemoteRequest(leader, req);
-        mergeRemoteRes(rsp, remoteRes);
-      }
-    }
-    SimpleOrderedMap<Object> apiState = new SimpleOrderedMap<>();
-    apiState.add("enableReplicas", enableReplicas);
-    apiState.add("enableNodes", enableNodes);
-    apiState.add("mode", enable ? (factory.isPersistent() ? "index" : "memory") : "inactive");
-    if (!factory.isPersistent()) {
-      apiState.add("message", "WARNING: metrics history is not being persisted. Create .system collection to start persisting history.");
-    }
-    rsp.add("state", apiState);
-    rsp.getResponseHeader().add("zkConnected", cloudManager != null);
-  }
-
-  private NamedList<Object> handleRemoteRequest(String nodeName, SolrQueryRequest req) {
-    String baseUrl = Utils.getBaseUrlForNodeName(nodeName, overseerUrlScheme);
-    String url;
-    try {
-      URL u = new URL(baseUrl);
-      u = new URL(u.getProtocol(), u.getHost(), u.getPort(), "/api/cluster/metrics/history");
-      url = u.toString();
-    } catch (MalformedURLException e) {
-      log.warn("Invalid Overseer url '" + baseUrl + "', unable to fetch remote metrics history", e);
-      return null;
-    }
-    // always use javabin
-    ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
-    params.set(CommonParams.WT, "javabin");
-    url = url + "?" + params.toString();
-    try {
-      byte[] data = cloudManager.httpRequest(url, SolrRequest.METHOD.GET, null, null, HttpClientUtil.DEFAULT_CONNECT_TIMEOUT, true);
-      // response is always a NamedList
-      try (JavaBinCodec codec = new JavaBinCodec()) {
-        return (NamedList<Object>)codec.unmarshal(new ByteArrayInputStream(data));
-      }
-    } catch (IOException e) {
-      log.warn("Exception forwarding request to Overseer at " + url, e);
-      return null;
-    }
-  }
-
-  private void mergeRemoteRes(SolrQueryResponse rsp, NamedList<Object> remoteRes) {
-    if (remoteRes == null || remoteRes.get("metrics") == null) {
-      return;
-    }
-    NamedList<Object> remoteMetrics = (NamedList<Object>)remoteRes.get("metrics");
-    SimpleOrderedMap localMetrics = (SimpleOrderedMap) rsp.getValues().get("metrics");
-    remoteMetrics.forEach((k, v) -> localMetrics.add(k, v));
-  }
-
-  private NamedList<Object> getDbStatus(RrdDb db) throws IOException {
-    NamedList<Object> res = new SimpleOrderedMap<>();
-    res.add("lastModified", db.getLastUpdateTime());
-    RrdDef def = db.getRrdDef();
-    res.add("step", def.getStep());
-    res.add("datasourceCount", db.getDsCount());
-    res.add("archiveCount", db.getArcCount());
-    res.add("datasourceNames", Arrays.asList(db.getDsNames()));
-    List<Object> dss = new ArrayList<>(db.getDsCount());
-    res.add("datasources", dss);
-    for (DsDef dsDef : def.getDsDefs()) {
-      Map<String, Object> map = new LinkedHashMap<>();
-      map.put("datasource", dsDef.dump());
-      Datasource ds = db.getDatasource(dsDef.getDsName());
-      map.put("lastValue", ds.getLastValue());
-      dss.add(map);
-    }
-    List<Object> archives = new ArrayList<>(db.getArcCount());
-    res.add("archives", archives);
-    ArcDef[] arcDefs = def.getArcDefs();
-    for (int i = 0; i < db.getArcCount(); i++) {
-      Archive a = db.getArchive(i);
-      Map<String, Object> map = new LinkedHashMap<>();
-      map.put("archive", arcDefs[i].dump());
-      map.put("steps", a.getSteps());
-      map.put("consolFun", a.getConsolFun().name());
-      map.put("xff", a.getXff());
-      map.put("startTime", a.getStartTime());
-      map.put("endTime", a.getEndTime());
-      map.put("rows", a.getRows());
-      archives.add(map);
-    }
-
-    return res;
-  }
-
-  private NamedList<Object> getDbData(RrdDb db, String[] dsNames, Format format, SolrParams params) throws IOException {
-    NamedList<Object> res = new SimpleOrderedMap<>();
-    if (dsNames == null || dsNames.length == 0) {
-      dsNames = db.getDsNames();
-    }
-    StringBuilder str = new StringBuilder();
-    RrdDef def = db.getRrdDef();
-    ArcDef[] arcDefs = def.getArcDefs();
-    for (ArcDef arcDef : arcDefs) {
-      SimpleOrderedMap<Object> map = new SimpleOrderedMap<>();
-      res.add(arcDef.dump(), map);
-      Archive a = db.getArchive(arcDef.getConsolFun(), arcDef.getSteps());
-      // startTime / endTime, arcStep are in seconds
-      FetchRequest fr = db.createFetchRequest(arcDef.getConsolFun(),
-          a.getStartTime() - a.getArcStep(),
-          a.getEndTime() + a.getArcStep());
-      FetchData fd = fr.fetchData();
-      if (format != Format.GRAPH) {
-        // add timestamps separately from values
-        long[] timestamps = fd.getTimestamps();
-        if (format == Format.LIST) {
-          // Arrays.asList works only on arrays of Objects
-          map.add("timestamps", Arrays.stream(timestamps).boxed().collect(Collectors.toList()));
-        } else {
-          str.setLength(0);
-          for (int i = 0; i < timestamps.length; i++) {
-            if (i > 0) {
-              str.append('\n');
-            }
-            str.append(String.valueOf(timestamps[i]));
-          }
-          map.add("timestamps", str.toString());
-        }
-      }
-      SimpleOrderedMap<Object> values = new SimpleOrderedMap<>();
-      map.add("values", values);
-      for (String name : dsNames) {
-        double[] vals = fd.getValues(name);
-        switch (format) {
-          case GRAPH:
-            RrdGraphDef graphDef = new RrdGraphDef();
-            graphDef.setTitle(name);
-            graphDef.datasource(name, fd);
-            graphDef.setStartTime(a.getStartTime() - a.getArcStep());
-            graphDef.setEndTime(a.getEndTime() + a.getArcStep());
-            graphDef.setPoolUsed(false);
-            graphDef.setAltAutoscale(true);
-            graphDef.setAltYGrid(true);
-            graphDef.setAltYMrtg(true);
-            graphDef.setSignature("Apache Solr " + versionString);
-            graphDef.setNoLegend(true);
-            graphDef.setAntiAliasing(true);
-            graphDef.setTextAntiAliasing(true);
-            graphDef.setWidth(500);
-            graphDef.setHeight(175);
-            graphDef.setTimeZone(TimeZone.getDefault());
-            graphDef.setLocale(Locale.ROOT);
-            // redraw immediately
-            graphDef.setLazy(false);
-            // area with a border
-            graphDef.area(name, new Color(0xffb860), null);
-            graphDef.line(name, Color.RED, null, 1.0f);
-            RrdGraph graph = new RrdGraph(graphDef);
-            BufferedImage bi = new BufferedImage(
-                graph.getRrdGraphInfo().getWidth(),
-                graph.getRrdGraphInfo().getHeight(),
-                BufferedImage.TYPE_INT_RGB);
-            graph.render(bi.getGraphics());
-            ByteArrayOutputStream baos = new ByteArrayOutputStream();
-            ImageIO.write(bi, "png", baos);
-            values.add(name, Base64.byteArrayToBase64(baos.toByteArray()));
-            break;
-          case STRING:
-            str.setLength(0);
-            for (int i = 0; i < vals.length; i++) {
-              if (i > 0) {
-                str.append('\n');
-              }
-              str.append(String.valueOf(vals[i]));
-            }
-            values.add(name, str.toString());
-            break;
-          case LIST:
-            values.add(name, Arrays.stream(vals).boxed().collect(Collectors.toList()));
-            break;
-        }
-      }
-    }
-    return res;
-  }
-
-  @Override
-  public String getDescription() {
-    return "A handler for metrics history";
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext request) {
-    return Name.METRICS_HISTORY_READ_PERM;
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this, "metrics.history");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
deleted file mode 100644
index 8bdc478..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.util.Map;
-
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * @since solr 1.2
- */
-public class PluginInfoHandler extends RequestHandlerBase
-{
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception 
-  {
-    SolrParams params = req.getParams();
-    
-    boolean stats = params.getBool( "stats", false );
-    rsp.add( "plugins", getSolrInfoBeans( req.getCore(), stats ) );
-    rsp.setHttpCaching(false);
-  }
-  
-  private static SimpleOrderedMap<Object> getSolrInfoBeans( SolrCore core, boolean stats )
-  {
-    SimpleOrderedMap<Object> list = new SimpleOrderedMap<>();
-    for (SolrInfoBean.Category cat : SolrInfoBean.Category.values())
-    {
-      SimpleOrderedMap<Object> category = new SimpleOrderedMap<>();
-      list.add( cat.name(), category );
-      Map<String, SolrInfoBean> reg = core.getInfoRegistry();
-      for (Map.Entry<String,SolrInfoBean> entry : reg.entrySet()) {
-        SolrInfoBean m = entry.getValue();
-        if (m.getCategory() != cat) continue;
-
-        String na = "Not Declared";
-        SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
-        category.add( entry.getKey(), info );
-
-        info.add( NAME,          (m.getName()       !=null ? m.getName()        : na) );
-        info.add( "description", (m.getDescription()!=null ? m.getDescription() : na) );
-
-        if (stats) {
-          info.add( "stats", m.getMetricsSnapshot());
-        }
-      }
-    }
-    return list;
-  }
-  
-  
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Registry";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java b/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java
deleted file mode 100644
index d064e78..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Objects;
-
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkShardTerms;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.admin.CoreAdminHandler.CallInfo;
-import org.apache.solr.util.TestInjection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-class PrepRecoveryOp implements CoreAdminHandler.CoreAdminOp {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void execute(CallInfo it) throws Exception {
-    assert TestInjection.injectPrepRecoveryOpPauseForever();
-
-    final SolrParams params = it.req.getParams();
-
-    String cname = params.get(CoreAdminParams.CORE);
-    if (cname == null) {
-      cname = "";
-    }
-
-    String nodeName = params.get("nodeName");
-    String coreNodeName = params.get("coreNodeName");
-    Replica.State waitForState = Replica.State.getState(params.get(ZkStateReader.STATE_PROP));
-    Boolean checkLive = params.getBool("checkLive");
-    Boolean onlyIfLeader = params.getBool("onlyIfLeader");
-    Boolean onlyIfLeaderActive = params.getBool("onlyIfLeaderActive");
-
-
-    CoreContainer coreContainer = it.handler.coreContainer;
-    // wait long enough for the leader conflict to work itself out plus a little extra
-    int conflictWaitMs = coreContainer.getZkController().getLeaderConflictResolveWait();
-    int maxTries = (int) Math.round(conflictWaitMs / 1000) + 3;
-    log.info("Going to wait for coreNodeName: {}, state: {}, checkLive: {}, onlyIfLeader: {}, onlyIfLeaderActive: {}, maxTime: {} s",
-        coreNodeName, waitForState, checkLive, onlyIfLeader, onlyIfLeaderActive, maxTries);
-    
-    Replica.State state = null;
-    boolean live = false;
-    int retry = 0;
-    while (true) {
-      try (SolrCore core = coreContainer.getCore(cname)) {
-        if (core == null && retry == Math.min(30, maxTries)) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:"
-              + cname);
-        }
-        if (core != null) {
-          if (onlyIfLeader != null && onlyIfLeader) {
-            if (!core.getCoreDescriptor().getCloudDescriptor().isLeader()) {
-              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "We are not the leader");
-            }
-          }
-
-          // wait until we are sure the recovering node is ready
-          // to accept updates
-          CloudDescriptor cloudDescriptor = core.getCoreDescriptor()
-              .getCloudDescriptor();
-          String collectionName = cloudDescriptor.getCollectionName();
-
-          if (retry % 15 == 0) {
-            if (retry > 0 && log.isInfoEnabled())
-              log.info("After " + retry + " seconds, core " + cname + " (" +
-                  cloudDescriptor.getShardId() + " of " +
-                  cloudDescriptor.getCollectionName() + ") still does not have state: " +
-                  waitForState + "; forcing ClusterState update from ZooKeeper");
-
-            // force a cluster state update
-            coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName);
-          }
-
-          ClusterState clusterState = coreContainer.getZkController().getClusterState();
-          DocCollection collection = clusterState.getCollection(collectionName);
-          Slice slice = collection.getSlice(cloudDescriptor.getShardId());
-          if (slice != null) {
-            final Replica replica = slice.getReplicasMap().get(coreNodeName);
-            if (replica != null) {
-              state = replica.getState();
-              live = clusterState.liveNodesContain(nodeName);
-
-              final Replica.State localState = cloudDescriptor.getLastPublished();
-
-              // TODO: This is funky but I've seen this in testing where the replica asks the
-              // leader to be in recovery? Need to track down how that happens ... in the meantime,
-              // this is a safeguard
-              boolean leaderDoesNotNeedRecovery = (onlyIfLeader != null &&
-                  onlyIfLeader &&
-                  core.getName().equals(replica.getStr("core")) &&
-                  waitForState == Replica.State.RECOVERING &&
-                  localState == Replica.State.ACTIVE &&
-                  state == Replica.State.ACTIVE);
-
-              if (leaderDoesNotNeedRecovery) {
-                log.warn("Leader " + core.getName() + " ignoring request to be in the recovering state because it is live and active.");
-              }
-
-              ZkShardTerms shardTerms = coreContainer.getZkController().getShardTerms(collectionName, slice.getName());
-              // if the replica is waiting for leader to see recovery state, the leader should refresh its terms
-              if (waitForState == Replica.State.RECOVERING && shardTerms.registered(coreNodeName) && shardTerms.skipSendingUpdatesTo(coreNodeName)) {
-                // The replica changed it term, then published itself as RECOVERING.
-                // This core already see replica as RECOVERING
-                // so it is guarantees that a live-fetch will be enough for this core to see max term published
-                shardTerms.refreshTerms();
-              }
-
-              boolean onlyIfActiveCheckResult = onlyIfLeaderActive != null && onlyIfLeaderActive && localState != Replica.State.ACTIVE;
-              log.info("In WaitForState(" + waitForState + "): collection=" + collectionName + ", shard=" + slice.getName() +
-                  ", thisCore=" + core.getName() + ", leaderDoesNotNeedRecovery=" + leaderDoesNotNeedRecovery +
-                  ", isLeader? " + core.getCoreDescriptor().getCloudDescriptor().isLeader() +
-                  ", live=" + live + ", checkLive=" + checkLive + ", currentState=" + state.toString() + ", localState=" + localState + ", nodeName=" + nodeName +
-                  ", coreNodeName=" + coreNodeName + ", onlyIfActiveCheckResult=" + onlyIfActiveCheckResult + ", nodeProps: " + replica);
-
-              if (!onlyIfActiveCheckResult && replica != null && (state == waitForState || leaderDoesNotNeedRecovery)) {
-                if (checkLive == null) {
-                  break;
-                } else if (checkLive && live) {
-                  break;
-                } else if (!checkLive && !live) {
-                  break;
-                }
-              }
-            }
-          }
-        }
-
-        if (retry++ == maxTries) {
-          String collection = null;
-          String leaderInfo = null;
-          String shardId = null;
-          
-          try {
-            CloudDescriptor cloudDescriptor =
-                core.getCoreDescriptor().getCloudDescriptor();
-            collection = cloudDescriptor.getCollectionName();
-            shardId = cloudDescriptor.getShardId();
-            leaderInfo = coreContainer.getZkController().
-                getZkStateReader().getLeaderUrl(collection, shardId, 5000);
-          } catch (Exception exc) {
-            leaderInfo = "Not available due to: " + exc;
-          }
-
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "I was asked to wait on state " + waitForState + " for "
-                  + shardId + " in " + collection + " on " + nodeName
-                  + " but I still do not see the requested state. I see state: "
-                  + Objects.toString(state) + " live:" + live + " leader from ZK: " + leaderInfo);
-        }
-
-        if (coreContainer.isShutDown()) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Solr is shutting down");
-        }
-      }
-      Thread.sleep(1000);
-    }
-
-    log.info("Waited coreNodeName: " + coreNodeName + ", state: " + waitForState
-        + ", checkLive: " + checkLive + ", onlyIfLeader: " + onlyIfLeader + " for: " + retry + " seconds.");
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
deleted file mode 100644
index 57a7492..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.util.Enumeration;
-
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.util.RedactionUtils;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- *
- * @since solr 1.2
- */
-public class PropertiesRequestHandler extends RequestHandlerBase
-{
-
-  public static final String REDACT_STRING = RedactionUtils.getRedactString();
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException 
-  {
-    NamedList<String> props = new SimpleOrderedMap<>();
-    String name = req.getParams().get(NAME);
-    if( name != null ) {
-      String property = getSecuredPropertyValue(name);
-      props.add( name, property);
-    }
-    else {
-      Enumeration<?> enumeration = System.getProperties().propertyNames();
-      while(enumeration.hasMoreElements()){
-        name = (String) enumeration.nextElement();
-        props.add(name, getSecuredPropertyValue(name));
-      }
-    }
-    rsp.add( "system.properties", props );
-    rsp.setHttpCaching(false);
-  }
-
-  private String getSecuredPropertyValue(String name) {
-    if(RedactionUtils.isSystemPropertySensitive(name)){
-      return REDACT_STRING;
-    }
-    return System.getProperty(name);
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Get System Properties";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
deleted file mode 100644
index f0819bd..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.cloud.LeaderElector;
-import org.apache.solr.cloud.OverseerTaskProcessor;
-import org.apache.solr.cloud.overseer.SliceMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.LEADER_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_AT_ONCE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_WAIT_SECONDS_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REBALANCELEADERS;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-class RebalanceLeaders {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  final SolrQueryRequest req;
-  final SolrQueryResponse rsp;
-  final CollectionsHandler collectionsHandler;
-  final CoreContainer coreContainer;
-
-  RebalanceLeaders(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler collectionsHandler) {
-    this.req = req;
-    this.rsp = rsp;
-    this.collectionsHandler = collectionsHandler;
-    coreContainer = collectionsHandler.getCoreContainer();
-  }
-
-  void execute() throws KeeperException, InterruptedException {
-    req.getParams().required().check(COLLECTION_PROP);
-
-    String collectionName = req.getParams().get(COLLECTION_PROP);
-    if (StringUtils.isBlank(collectionName)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "The " + COLLECTION_PROP + " is required for the Rebalance Leaders command."));
-    }
-    coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName);
-    ClusterState clusterState = coreContainer.getZkController().getClusterState();
-    DocCollection dc = clusterState.getCollection(collectionName);
-    if (dc == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-    }
-    Map<String, String> currentRequests = new HashMap<>();
-    int max = req.getParams().getInt(MAX_AT_ONCE_PROP, Integer.MAX_VALUE);
-    if (max <= 0) max = Integer.MAX_VALUE;
-    int maxWaitSecs = req.getParams().getInt(MAX_WAIT_SECONDS_PROP, 60);
-    NamedList<Object> results = new NamedList<>();
-
-    boolean keepGoing = true;
-    for (Slice slice : dc.getSlices()) {
-      ensurePreferredIsLeader(results, slice, currentRequests);
-      if (currentRequests.size() == max) {
-        log.info("Queued " + max + " leader reassignments, waiting for some to complete.");
-        keepGoing = waitForLeaderChange(currentRequests, maxWaitSecs, false, results);
-        if (keepGoing == false) {
-          break; // If we've waited longer than specified, don't continue to wait!
-        }
-      }
-    }
-    if (keepGoing == true) {
-      keepGoing = waitForLeaderChange(currentRequests, maxWaitSecs, true, results);
-    }
-    if (keepGoing == true) {
-      log.info("All leader reassignments completed.");
-    } else {
-      log.warn("Exceeded specified timeout of ." + maxWaitSecs + "' all leaders may not have been reassigned");
-    }
-
-    rsp.getValues().addAll(results);
-  }
-
-  private void ensurePreferredIsLeader(NamedList<Object> results,
-                                       Slice slice, Map<String, String> currentRequests) throws KeeperException, InterruptedException {
-    final String inactivePreferreds = "inactivePreferreds";
-    final String alreadyLeaders = "alreadyLeaders";
-    String collectionName = req.getParams().get(COLLECTION_PROP);
-
-    for (Replica replica : slice.getReplicas()) {
-      // Tell the replica to become the leader if we're the preferred leader AND active AND not the leader already
-      if (replica.getBool(SliceMutator.PREFERRED_LEADER_PROP, false) == false) {
-        continue;
-      }
-      // OK, we are the preferred leader, are we the actual leader?
-      if (replica.getBool(LEADER_PROP, false)) {
-        //We're a preferred leader, but we're _also_ the leader, don't need to do anything.
-        NamedList<Object> noops = (NamedList<Object>) results.get(alreadyLeaders);
-        if (noops == null) {
-          noops = new NamedList<>();
-          results.add(alreadyLeaders, noops);
-        }
-        NamedList<Object> res = new NamedList<>();
-        res.add("status", "success");
-        res.add("msg", "Already leader");
-        res.add("shard", slice.getName());
-        res.add("nodeName", replica.getNodeName());
-        noops.add(replica.getName(), res);
-        return; // already the leader, do nothing.
-      }
-
-      // We're the preferred leader, but someone else is leader. Only become leader if we're active.
-      if (replica.getState() != Replica.State.ACTIVE) {
-        NamedList<Object> inactives = (NamedList<Object>) results.get(inactivePreferreds);
-        if (inactives == null) {
-          inactives = new NamedList<>();
-          results.add(inactivePreferreds, inactives);
-        }
-        NamedList<Object> res = new NamedList<>();
-        res.add("status", "skipped");
-        res.add("msg", "Node is a referredLeader, but it's inactive. Skipping");
-        res.add("shard", slice.getName());
-        res.add("nodeName", replica.getNodeName());
-        inactives.add(replica.getName(), res);
-        return; // Don't try to become the leader if we're not active!
-      }
-
-      // Replica is the preferred leader but not the actual leader, do something about that.
-      // "Something" is
-      // 1> if the preferred leader isn't first in line, tell it to re-queue itself.
-      // 2> tell the actual leader to re-queue itself.
-
-      ZkStateReader zkStateReader = coreContainer.getZkController().getZkStateReader();
-
-      List<String> electionNodes = OverseerTaskProcessor.getSortedElectionNodes(zkStateReader.getZkClient(),
-          ZkStateReader.getShardLeadersElectPath(collectionName, slice.getName()));
-
-      if (electionNodes.size() < 2) { // if there's only one node in the queue, should already be leader and we shouldn't be here anyway.
-        log.info("Rebalancing leaders and slice " + slice.getName() + " has less than two elements in the leader " +
-            "election queue, but replica " + replica.getName() + " doesn't think it's the leader.");
-        return;
-      }
-
-      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same, then the whole
-      // string is used, but that sorts nodes with the same sequence number by their session IDs from ZK.
-      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us and are
-      // watching the leader node..
-
-      String firstWatcher = electionNodes.get(1);
-
-      if (LeaderElector.getNodeName(firstWatcher).equals(replica.getName()) == false) {
-        makeReplicaFirstWatcher(collectionName, slice, replica);
-      }
-
-      String coreName = slice.getReplica(LeaderElector.getNodeName(electionNodes.get(0))).getStr(CORE_NAME_PROP);
-      rejoinElection(collectionName, slice, electionNodes.get(0), coreName, false);
-      waitForNodeChange(collectionName, slice, electionNodes.get(0));
-
-
-      return; // Done with this slice, skip the rest of the replicas.
-    }
-  }
-  // Put the replica in at the head of the queue and send all nodes with the same sequence number to the back of the list
-  void makeReplicaFirstWatcher(String collectionName, Slice slice, Replica replica)
-      throws KeeperException, InterruptedException {
-
-    ZkStateReader zkStateReader = coreContainer.getZkController().getZkStateReader();
-    List<String> electionNodes = OverseerTaskProcessor.getSortedElectionNodes(zkStateReader.getZkClient(),
-        ZkStateReader.getShardLeadersElectPath(collectionName, slice.getName()));
-
-    // First, queue up the preferred leader at the head of the queue.
-    int newSeq = -1;
-    for (String electionNode : electionNodes) {
-      if (LeaderElector.getNodeName(electionNode).equals(replica.getName())) {
-        String coreName = slice.getReplica(LeaderElector.getNodeName(electionNode)).getStr(CORE_NAME_PROP);
-        rejoinElection(collectionName, slice, electionNode, coreName, true);
-        newSeq = waitForNodeChange(collectionName, slice, electionNode);
-        break;
-      }
-    }
-    if (newSeq == -1) {
-      return; // let's not continue if we didn't get what we expect. Possibly we're offline etc..
-    }
-
-    // Now find other nodes that have the same sequence number as this node and re-queue them at the end of the queue.
-    electionNodes = OverseerTaskProcessor.getSortedElectionNodes(zkStateReader.getZkClient(),
-        ZkStateReader.getShardLeadersElectPath(collectionName, slice.getName()));
-
-    for (String thisNode : electionNodes) {
-      if (LeaderElector.getSeq(thisNode) > newSeq) {
-        break;
-      }
-      if (LeaderElector.getNodeName(thisNode).equals(replica.getName())) {
-        continue;
-      }
-      if (LeaderElector.getSeq(thisNode) == newSeq) {
-        String coreName = slice.getReplica(LeaderElector.getNodeName(thisNode)).getStr(CORE_NAME_PROP);
-        rejoinElection(collectionName, slice, thisNode, coreName, false);
-        waitForNodeChange(collectionName, slice, thisNode);
-      }
-    }
-  }
-
-  int waitForNodeChange(String collectionName, Slice slice, String electionNode) throws InterruptedException, KeeperException {
-    String nodeName = LeaderElector.getNodeName(electionNode);
-    int oldSeq = LeaderElector.getSeq(electionNode);
-    for (int idx = 0; idx < 600; ++idx) {
-      ZkStateReader zkStateReader = coreContainer.getZkController().getZkStateReader();
-      List<String> electionNodes = OverseerTaskProcessor.getSortedElectionNodes(zkStateReader.getZkClient(),
-          ZkStateReader.getShardLeadersElectPath(collectionName, slice.getName()));
-      for (String testNode : electionNodes) {
-        if (LeaderElector.getNodeName(testNode).equals(nodeName) && oldSeq != LeaderElector.getSeq(testNode)) {
-          return LeaderElector.getSeq(testNode);
-        }
-      }
-
-      Thread.sleep(100);
-    }
-    return -1;
-  }
-  
-  private void rejoinElection(String collectionName, Slice slice, String electionNode, String core,
-                              boolean rejoinAtHead) throws KeeperException, InterruptedException {
-    Replica replica = slice.getReplica(LeaderElector.getNodeName(electionNode));
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(COLLECTION_PROP, collectionName);
-    propMap.put(SHARD_ID_PROP, slice.getName());
-    propMap.put(QUEUE_OPERATION, REBALANCELEADERS.toLower());
-    propMap.put(CORE_NAME_PROP, core);
-    propMap.put(CORE_NODE_NAME_PROP, replica.getName());
-    propMap.put(ZkStateReader.BASE_URL_PROP, replica.getProperties().get(ZkStateReader.BASE_URL_PROP));
-    propMap.put(REJOIN_AT_HEAD_PROP, Boolean.toString(rejoinAtHead)); // Get ourselves to be first in line.
-    propMap.put(ELECTION_NODE_PROP, electionNode);
-    String asyncId = REBALANCELEADERS.toLower() + "_" + core + "_" + Math.abs(System.nanoTime());
-    propMap.put(ASYNC, asyncId);
-    collectionsHandler.sendToOCPQueue(new ZkNodeProps(propMap)); // ignore response; we construct our own
-  }
-
-  // currentAsyncIds - map of request IDs and reporting data (value)
-  // maxWaitSecs - How long are we going to wait? Defaults to 30 seconds.
-  // waitForAll - if true, do not return until all assignments have been made.
-  // results - a place to stash results for reporting back to the user.
-  //
-  private boolean waitForLeaderChange(Map<String, String> currentAsyncIds, final int maxWaitSecs,
-                                      Boolean waitForAll, NamedList<Object> results)
-      throws KeeperException, InterruptedException {
-
-    if (currentAsyncIds.size() == 0) return true;
-
-    for (int idx = 0; idx < maxWaitSecs * 10; ++idx) {
-      Iterator<Map.Entry<String, String>> iter = currentAsyncIds.entrySet().iterator();
-      boolean foundChange = false;
-      while (iter.hasNext()) {
-        Map.Entry<String, String> pair = iter.next();
-        String asyncId = pair.getKey();
-        if (coreContainer.getZkController().getOverseerFailureMap().contains(asyncId)) {
-          coreContainer.getZkController().getOverseerFailureMap().remove(asyncId);
-          coreContainer.getZkController().clearAsyncId(asyncId);
-          NamedList<Object> fails = (NamedList<Object>) results.get("failures");
-          if (fails == null) {
-            fails = new NamedList<>();
-            results.add("failures", fails);
-          }
-          NamedList<Object> res = new NamedList<>();
-          res.add("status", "failed");
-          res.add("msg", "Failed to assign '" + pair.getValue() + "' to be leader");
-          fails.add(asyncId.substring(REBALANCELEADERS.toLower().length()), res);
-          iter.remove();
-          foundChange = true;
-        } else if (coreContainer.getZkController().getOverseerCompletedMap().contains(asyncId)) {
-          coreContainer.getZkController().getOverseerCompletedMap().remove(asyncId);
-          coreContainer.getZkController().clearAsyncId(asyncId);
-          NamedList<Object> successes = (NamedList<Object>) results.get("successes");
-          if (successes == null) {
-            successes = new NamedList<>();
-            results.add("successes", successes);
-          }
-          NamedList<Object> res = new NamedList<>();
-          res.add("status", "success");
-          res.add("msg", "Assigned '" + pair.getValue() + "' to be leader");
-          successes.add(asyncId.substring(REBALANCELEADERS.toLower().length()), res);
-          iter.remove();
-          foundChange = true;
-        }
-      }
-      // We're done if we're processing a few at a time or all requests are processed.
-      if ((foundChange && waitForAll == false) || currentAsyncIds.size() == 0) {
-        return true;
-      }
-      Thread.sleep(100); //TODO: Is there a better thing to do than sleep here?
-    }
-    return false;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/RequestApplyUpdatesOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RequestApplyUpdatesOp.java b/solr/core/src/java/org/apache/solr/handler/admin/RequestApplyUpdatesOp.java
deleted file mode 100644
index fbb484d..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/RequestApplyUpdatesOp.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.util.concurrent.Future;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.UpdateLog;
-
-class RequestApplyUpdatesOp implements CoreAdminHandler.CoreAdminOp {
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.NAME);
-    CoreAdminOperation.log().info("Applying buffered updates on core: " + cname);
-    CoreContainer coreContainer = it.handler.coreContainer;
-    try (SolrCore core = coreContainer.getCore(cname)) {
-      if (core == null)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core [" + cname + "] not found");
-      UpdateLog updateLog = core.getUpdateHandler().getUpdateLog();
-      if (updateLog.getState() != UpdateLog.State.BUFFERING) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Core " + cname + " not in buffering state");
-      }
-      Future<UpdateLog.RecoveryInfo> future = updateLog.applyBufferedUpdates();
-      if (future == null) {
-        CoreAdminOperation.log().info("No buffered updates available. core=" + cname);
-        it.rsp.add("core", cname);
-        it.rsp.add("status", "EMPTY_BUFFER");
-        return;
-      }
-      UpdateLog.RecoveryInfo report = future.get();
-      if (report.failed) {
-        SolrException.log(CoreAdminOperation.log(), "Replay failed");
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Replay failed");
-      }
-      coreContainer.getZkController().publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
-      it.rsp.add("core", cname);
-      it.rsp.add("status", "BUFFER_APPLIED");
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      CoreAdminOperation.log().warn("Recovery was interrupted", e);
-    } catch (Exception e) {
-      if (e instanceof SolrException)
-        throw (SolrException) e;
-      else
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not apply buffered updates", e);
-    } finally {
-      if (it.req != null) it.req.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java b/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java
deleted file mode 100644
index 55eb70a..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.solr.cloud.SyncStrategy;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.admin.CoreAdminHandler.CallInfo;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.RefCounted;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class RequestSyncShardOp implements CoreAdminHandler.CoreAdminOp {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void execute(CallInfo it) throws Exception {
-    final SolrParams params = it.req.getParams();
-
-    log.info("I have been requested to sync up my shard");
-
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    ZkController zkController = it.handler.coreContainer.getZkController();
-    if (zkController == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only valid for SolrCloud");
-    }
-
-    SyncStrategy syncStrategy = null;
-    try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
-
-      if (core != null) {
-        syncStrategy = new SyncStrategy(core.getCoreContainer());
-
-        Map<String, Object> props = new HashMap<>();
-        props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl());
-        props.put(ZkStateReader.CORE_NAME_PROP, cname);
-        props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName());
-
-        boolean success = syncStrategy.sync(zkController, core, new ZkNodeProps(props), true).isSuccess();
-        // solrcloud_debug
-        if (log.isDebugEnabled()) {
-          try {
-            RefCounted<SolrIndexSearcher> searchHolder = core
-                .getNewestSearcher(false);
-            SolrIndexSearcher searcher = searchHolder.get();
-            try {
-              log.debug(core.getCoreContainer()
-                  .getZkController().getNodeName()
-                  + " synched "
-                  + searcher.count(new MatchAllDocsQuery()));
-            } finally {
-              searchHolder.decref();
-            }
-          } catch (Exception e) {
-            log.debug("Error in solrcloud_debug block", e);
-          }
-        }
-        if (!success) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Sync Failed");
-        }
-      } else {
-        SolrException.log(log, "Could not find core to call sync:" + cname);
-      }
-    } finally {
-      // no recoveryStrat close for now
-      if (syncStrategy != null) {
-        syncStrategy.close();
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/RestoreCoreOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RestoreCoreOp.java b/solr/core/src/java/org/apache/solr/handler/admin/RestoreCoreOp.java
deleted file mode 100644
index a53324a..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/RestoreCoreOp.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.net.URI;
-import java.util.Optional;
-
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.handler.RestoreCore;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-class RestoreCoreOp implements CoreAdminHandler.CoreAdminOp {
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    final SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.CORE);
-    String name = params.required().get(NAME);
-
-    ZkController zkController = it.handler.coreContainer.getZkController();
-    if (zkController == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only valid for SolrCloud");
-    }
-
-    String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY);
-    BackupRepository repository = it.handler.coreContainer.newBackupRepository(Optional.ofNullable(repoName));
-
-    String location = repository.getBackupLocation(params.get(CoreAdminParams.BACKUP_LOCATION));
-    if (location == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-          + " parameter or as a default repository property");
-    }
-
-    URI locationUri = repository.createURI(location);
-    try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
-      CloudDescriptor cd = core.getCoreDescriptor().getCloudDescriptor();
-      // this core must be the only replica in its shard otherwise
-      // we cannot guarantee consistency between replicas because when we add data (or restore index) to this replica
-      Slice slice = zkController.getClusterState().getCollection(cd.getCollectionName()).getSlice(cd.getShardId());
-      if (slice.getReplicas().size() != 1) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Failed to restore core=" + core.getName() + ", the core must be the only replica in its shard");
-      }
-      RestoreCore restoreCore = new RestoreCore(repository, core, locationUri, name);
-      boolean success = restoreCore.doRestore();
-      if (!success) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed to restore core=" + core.getName());
-      }
-      // other replicas to-be-created will know that they are out of date by
-      // looking at their term : 0 compare to term of this core : 1
-      zkController.getShardTerms(cd.getCollectionName(), cd.getShardId()).ensureHighestTermsAreNotZero();
-    }
-  }
-}


[17/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java
deleted file mode 100644
index 4441024..0000000
--- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.update.CommitUpdateCommand;
-import org.apache.solr.update.RollbackUpdateCommand;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-
-/**
- * Common helper functions for RequestHandlers
- * 
- *
- * @since solr 1.2
- */
-public class RequestHandlerUtils
-{
-  /**
-   * A common way to mark the response format as experimental
-   */
-  public static void addExperimentalFormatWarning( SolrQueryResponse rsp )
-  {
-    rsp.add( "WARNING", "This response format is experimental.  It is likely to change in the future." ); 
-  }
-
-
-  /**
-   * Check the request parameters and decide if it should commit or optimize.
-   * If it does, it will check other related parameters such as "waitFlush" and "waitSearcher"
-   */
-  public static boolean handleCommit(SolrQueryRequest req, UpdateRequestProcessor processor, SolrParams params, boolean force ) throws IOException
-  {
-    if( params == null) {
-      params = new MapSolrParams( new HashMap<String, String>() ); 
-    }
-    
-    boolean optimize = params.getBool( UpdateParams.OPTIMIZE, false );
-    boolean commit   = params.getBool( UpdateParams.COMMIT,   false );
-    boolean softCommit = params.getBool( UpdateParams.SOFT_COMMIT,   false );
-    boolean prepareCommit = params.getBool( UpdateParams.PREPARE_COMMIT,   false );
-
-
-    if( optimize || commit || softCommit || prepareCommit || force ) {
-      CommitUpdateCommand cmd = new CommitUpdateCommand(req, optimize );
-      updateCommit(cmd, params);
-      processor.processCommit( cmd );
-      return true;
-    }
-    
-    
-    return false;
-  }
-
-  
-  private static Set<String> commitParams = new HashSet<>(Arrays.asList(new String[]{UpdateParams.OPEN_SEARCHER, UpdateParams.WAIT_SEARCHER, UpdateParams.SOFT_COMMIT, UpdateParams.EXPUNGE_DELETES, UpdateParams.MAX_OPTIMIZE_SEGMENTS, UpdateParams.PREPARE_COMMIT}));
-
-  public static void validateCommitParams(SolrParams params) {
-    Iterator<String> i = params.getParameterNamesIterator();
-    while (i.hasNext()) {
-      String key = i.next();
-      if (!commitParams.contains(key)) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown commit parameter '" + key + "'");
-      }
-    }
-  }
-  
-  /**
-   * Modify UpdateCommand based on request parameters
-   */
-  public static void updateCommit(CommitUpdateCommand cmd, SolrParams params) {
-    if( params == null ) return;
-
-    cmd.openSearcher = params.getBool( UpdateParams.OPEN_SEARCHER, cmd.openSearcher );
-    cmd.waitSearcher = params.getBool( UpdateParams.WAIT_SEARCHER, cmd.waitSearcher );
-    cmd.softCommit = params.getBool( UpdateParams.SOFT_COMMIT, cmd.softCommit );
-    cmd.expungeDeletes = params.getBool( UpdateParams.EXPUNGE_DELETES, cmd.expungeDeletes );
-    cmd.maxOptimizeSegments = params.getInt( UpdateParams.MAX_OPTIMIZE_SEGMENTS, cmd.maxOptimizeSegments );
-    cmd.prepareCommit = params.getBool( UpdateParams.PREPARE_COMMIT,   cmd.prepareCommit );
-  }
-
-
-  /**
-   * @since Solr 1.4
-   */
-  public static boolean handleRollback(SolrQueryRequest req, UpdateRequestProcessor processor, SolrParams params, boolean force ) throws IOException
-  {
-    if( params == null ) {
-      params = new MapSolrParams( new HashMap<String, String>() ); 
-    }
-    
-    boolean rollback = params.getBool( UpdateParams.ROLLBACK, false );
-    
-    if( rollback || force ) {
-      RollbackUpdateCommand cmd = new RollbackUpdateCommand(req);
-      processor.processRollback( cmd );
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * @since 6.7
-   */
-  public static void setWt(SolrQueryRequest req, String wt) {
-    SolrParams params = req.getParams();
-    if (params.get(CommonParams.WT) != null) return;//wt is set by user
-    Map<String, String> map = new HashMap<>(1);
-    map.put(CommonParams.WT, wt);
-    map.put("indent", "true");
-    req.setParams(SolrParams.wrapDefaults(params, new MapSolrParams(map)));
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
deleted file mode 100644
index e750631..0000000
--- a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Locale;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.core.DirectoryFactory;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class RestoreCore implements Callable<Boolean> {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final String backupName;
-  private final URI backupLocation;
-  private final SolrCore core;
-  private final BackupRepository backupRepo;
-
-  public RestoreCore(BackupRepository backupRepo, SolrCore core, URI location, String name) {
-    this.backupRepo = backupRepo;
-    this.core = core;
-    this.backupLocation = location;
-    this.backupName = name;
-  }
-
-  @Override
-  public Boolean call() throws Exception {
-    return doRestore();
-  }
-
-  public boolean doRestore() throws Exception {
-
-    URI backupPath = backupRepo.resolve(backupLocation, backupName);
-    SimpleDateFormat dateFormat = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT);
-    String restoreIndexName = "restore." + dateFormat.format(new Date());
-    String restoreIndexPath = core.getDataDir() + restoreIndexName;
-
-    String indexDirPath = core.getIndexDir();
-    Directory restoreIndexDir = null;
-    Directory indexDir = null;
-    try {
-
-      restoreIndexDir = core.getDirectoryFactory().get(restoreIndexPath,
-          DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
-
-      //Prefer local copy.
-      indexDir = core.getDirectoryFactory().get(indexDirPath,
-          DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
-
-      //Move all files from backupDir to restoreIndexDir
-      for (String filename : backupRepo.listAll(backupPath)) {
-        checkInterrupted();
-        log.info("Copying file {} to restore directory ", filename);
-        try (IndexInput indexInput = backupRepo.openInput(backupPath, filename, IOContext.READONCE)) {
-          Long checksum = null;
-          try {
-            checksum = CodecUtil.retrieveChecksum(indexInput);
-          } catch (Exception e) {
-            log.warn("Could not read checksum from index file: " + filename, e);
-          }
-          long length = indexInput.length();
-          IndexFetcher.CompareResult compareResult = IndexFetcher.compareFile(indexDir, filename, length, checksum);
-          if (!compareResult.equal ||
-              (IndexFetcher.filesToAlwaysDownloadIfNoChecksums(filename, length, compareResult))) {
-            backupRepo.copyFileTo(backupPath, filename, restoreIndexDir);
-          } else {
-            //prefer local copy
-            restoreIndexDir.copyFrom(indexDir, filename, filename, IOContext.READONCE);
-          }
-        } catch (Exception e) {
-          log.warn("Exception while restoring the backup index ", e);
-          throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Exception while restoring the backup index", e);
-        }
-      }
-      log.debug("Switching directories");
-      core.modifyIndexProps(restoreIndexName);
-
-      boolean success;
-      try {
-        core.getUpdateHandler().newIndexWriter(false);
-        openNewSearcher();
-        success = true;
-        log.info("Successfully restored to the backup index");
-      } catch (Exception e) {
-        //Rollback to the old index directory. Delete the restore index directory and mark the restore as failed.
-        log.warn("Could not switch to restored index. Rolling back to the current index", e);
-        Directory dir = null;
-        try {
-          dir = core.getDirectoryFactory().get(core.getDataDir(), DirectoryFactory.DirContext.META_DATA,
-              core.getSolrConfig().indexConfig.lockType);
-          dir.deleteFile(IndexFetcher.INDEX_PROPERTIES);
-        } finally {
-          if (dir != null) {
-            core.getDirectoryFactory().release(dir);
-          }
-        }
-
-        core.getDirectoryFactory().doneWithDirectory(restoreIndexDir);
-        core.getDirectoryFactory().remove(restoreIndexDir);
-        core.getUpdateHandler().newIndexWriter(false);
-        openNewSearcher();
-        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Exception while restoring the backup index", e);
-      }
-      if (success) {
-        core.getDirectoryFactory().doneWithDirectory(indexDir);
-        // Cleanup all index files not associated with any *named* snapshot.
-        core.deleteNonSnapshotIndexFiles(indexDirPath);
-      }
-
-      return true;
-    } finally {
-      if (restoreIndexDir != null) {
-        core.getDirectoryFactory().release(restoreIndexDir);
-      }
-      if (indexDir != null) {
-        core.getDirectoryFactory().release(indexDir);
-      }
-    }
-  }
-
-  private void checkInterrupted() throws InterruptedException {
-    if (Thread.currentThread().isInterrupted()) {
-      throw new InterruptedException("Stopping restore process. Thread was interrupted.");
-    }
-  }
-
-  private void openNewSearcher() throws Exception {
-    Future[] waitSearcher = new Future[1];
-    core.getSearcher(true, false, waitSearcher, true);
-    if (waitSearcher[0] != null) {
-      waitSearcher[0].get();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
deleted file mode 100644
index 6b0330a..0000000
--- a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.calcite.config.Lex;
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.comp.StreamComparator;
-import org.apache.solr.client.solrj.io.stream.ExceptionStream;
-import org.apache.solr.client.solrj.io.stream.TupleStream;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.sql.CalciteSolrDriver;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class SQLHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static String defaultZkhost = null;
-  private static String defaultWorkerCollection = null;
-
-  static final String sqlNonCloudErrorMsg = "/sql handler only works in Solr Cloud mode";
-
-  private boolean isCloud = false;
-
-  public void inform(SolrCore core) {
-    CoreContainer coreContainer = core.getCoreContainer();
-
-    if(coreContainer.isZooKeeperAware()) {
-      defaultZkhost = core.getCoreContainer().getZkController().getZkServerAddress();
-      defaultWorkerCollection = core.getCoreDescriptor().getCollectionName();
-      isCloud = true;
-    }
-  }
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext request) {
-    return PermissionNameProvider.Name.READ_PERM;
-  }
-
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
-    params = adjustParams(params);
-    req.setParams(params);
-
-    String sql = params.get("stmt");
-    // Set defaults for parameters
-    params.set("numWorkers", params.getInt("numWorkers", 1));
-    params.set("workerCollection", params.get("workerCollection", defaultWorkerCollection));
-    params.set("workerZkhost", params.get("workerZkhost", defaultZkhost));
-    params.set("aggregationMode", params.get("aggregationMode", "facet"));
-
-    TupleStream tupleStream = null;
-    try {
-
-      if(!isCloud) {
-        throw new IllegalStateException(sqlNonCloudErrorMsg);
-      }
-
-      if(sql == null) {
-        throw new Exception("stmt parameter cannot be null");
-      }
-
-      String url = CalciteSolrDriver.CONNECT_STRING_PREFIX;
-
-      Properties properties = new Properties();
-      // Add all query parameters
-      Iterator<String> parameterNamesIterator = params.getParameterNamesIterator();
-      while(parameterNamesIterator.hasNext()) {
-        String param = parameterNamesIterator.next();
-        properties.setProperty(param, params.get(param));
-      }
-
-      // Set these last to ensure that they are set properly
-      properties.setProperty("lex", Lex.MYSQL.toString());
-      properties.setProperty("zk", defaultZkhost);
-
-      String driverClass = CalciteSolrDriver.class.getCanonicalName();
-
-      // JDBC driver requires metadata from the SQLHandler. Default to false since this adds a new Metadata stream.
-      boolean includeMetadata = params.getBool("includeMetadata", false);
-      tupleStream = new SqlHandlerStream(url, sql, null, properties, driverClass, includeMetadata);
-
-      tupleStream = new StreamHandler.TimerStream(new ExceptionStream(tupleStream));
-
-      rsp.add("result-set", tupleStream);
-    } catch(Exception e) {
-      //Catch the SQL parsing and query transformation exceptions.
-      if(tupleStream != null) {
-        tupleStream.close();
-      }
-      SolrException.log(log, e);
-      rsp.add("result-set", new StreamHandler.DummyErrorStream(e));
-    }
-  }
-
-  public String getDescription() {
-    return "SQLHandler";
-  }
-
-  public String getSource() {
-    return null;
-  }
-
-  /*
-   * Only necessary for SolrJ JDBC driver since metadata has to be passed back
-   */
-  private static class SqlHandlerStream extends CalciteJDBCStream {
-    private final boolean includeMetadata;
-    private boolean firstTuple = true;
-    List<String> metadataFields = new ArrayList<>();
-    Map<String, String> metadataAliases = new HashMap<>();
-
-    SqlHandlerStream(String connectionUrl, String sqlQuery, StreamComparator definedSort,
-                     Properties connectionProperties, String driverClassName, boolean includeMetadata)
-        throws IOException {
-      super(connectionUrl, sqlQuery, definedSort, connectionProperties, driverClassName);
-
-      this.includeMetadata = includeMetadata;
-    }
-
-    @Override
-    public Tuple read() throws IOException {
-      // Return a metadata tuple as the first tuple and then pass through to the JDBCStream.
-      if(firstTuple) {
-        try {
-          Map<String, Object> fields = new HashMap<>();
-
-          firstTuple = false;
-
-          ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
-
-          for(int i = 1; i <= resultSetMetaData.getColumnCount(); i++) {
-            String columnName = resultSetMetaData.getColumnName(i);
-            String columnLabel = resultSetMetaData.getColumnLabel(i);
-            metadataFields.add(columnName);
-            metadataAliases.put(columnName, columnLabel);
-          }
-
-          if(includeMetadata) {
-            fields.put("isMetadata", true);
-            fields.put("fields", metadataFields);
-            fields.put("aliases", metadataAliases);
-            return new Tuple(fields);
-          }
-        } catch (SQLException e) {
-          throw new IOException(e);
-        }
-      }
-
-      Tuple tuple = super.read();
-      if(!tuple.EOF) {
-        tuple.fieldNames = metadataFields;
-        tuple.fieldLabels = metadataAliases;
-      }
-      return tuple;
-    }
-  }
-
-  private ModifiableSolrParams adjustParams(SolrParams params) {
-    ModifiableSolrParams adjustedParams = new ModifiableSolrParams();
-    adjustedParams.add(params);
-    adjustedParams.add(CommonParams.OMIT_HEADER, "true");
-    return adjustedParams;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java b/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
deleted file mode 100644
index fb84e84..0000000
--- a/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.ManagedIndexSchema;
-import org.apache.solr.schema.SchemaManager;
-import org.apache.solr.schema.ZkIndexSchemaReader;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Collections.singletonMap;
-import static org.apache.solr.common.params.CommonParams.JSON;
-import static org.apache.solr.schema.IndexSchema.SchemaProps.Handler.COPY_FIELDS;
-import static org.apache.solr.schema.IndexSchema.SchemaProps.Handler.DYNAMIC_FIELDS;
-import static org.apache.solr.schema.IndexSchema.SchemaProps.Handler.FIELDS;
-import static org.apache.solr.schema.IndexSchema.SchemaProps.Handler.FIELD_TYPES;
-
-public class SchemaHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private boolean isImmutableConfigSet = false;
-
-  private static final Map<String, String> level2;
-
-  static {
-    Map s = Utils.makeMap(
-        FIELD_TYPES.nameLower, null,
-        FIELDS.nameLower, "fl",
-        DYNAMIC_FIELDS.nameLower, "fl",
-        COPY_FIELDS.nameLower, null
-    );
-
-    level2 = Collections.unmodifiableMap(s);
-  }
-
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    RequestHandlerUtils.setWt(req, JSON);
-    String httpMethod = (String) req.getContext().get("httpMethod");
-    if ("POST".equals(httpMethod)) {
-      if (isImmutableConfigSet) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ConfigSet is immutable");
-      }
-      if (req.getContentStreams() == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no stream");
-      }
-
-      try {
-        List errs = new SchemaManager(req).performOperations();
-        if (!errs.isEmpty())
-          throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST,"error processing commands", errs);
-      } catch (IOException e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error reading input String " + e.getMessage(), e);
-      }
-    } else {
-      handleGET(req, rsp);
-    }
-  }
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext ctx) {
-    switch (ctx.getHttpMethod()) {
-      case "GET":
-        return PermissionNameProvider.Name.SCHEMA_READ_PERM;
-      case "POST":
-        return PermissionNameProvider.Name.SCHEMA_EDIT_PERM;
-      default:
-        return null;
-    }
-  }
-
-  private void handleGET(SolrQueryRequest req, SolrQueryResponse rsp) {
-    try {
-      String path = (String) req.getContext().get("path");
-      switch (path) {
-        case "/schema":
-          rsp.add(IndexSchema.SCHEMA, req.getSchema().getNamedPropertyValues());
-          break;
-        case "/schema/version":
-          rsp.add(IndexSchema.VERSION, req.getSchema().getVersion());
-          break;
-        case "/schema/uniquekey":
-          rsp.add(IndexSchema.UNIQUE_KEY, req.getSchema().getUniqueKeyField().getName());
-          break;
-        case "/schema/similarity":
-          rsp.add(IndexSchema.SIMILARITY, req.getSchema().getSimilarityFactory().getNamedPropertyValues());
-          break;
-        case "/schema/name": {
-          final String schemaName = req.getSchema().getSchemaName();
-          if (null == schemaName) {
-            String message = "Schema has no name";
-            throw new SolrException(SolrException.ErrorCode.NOT_FOUND, message);
-          }
-          rsp.add(IndexSchema.NAME, schemaName);
-          break;
-        }
-        case "/schema/zkversion": {
-          int refreshIfBelowVersion = -1;
-          Object refreshParam = req.getParams().get("refreshIfBelowVersion");
-          if (refreshParam != null)
-            refreshIfBelowVersion = (refreshParam instanceof Number) ? ((Number) refreshParam).intValue()
-                : Integer.parseInt(refreshParam.toString());
-          int zkVersion = -1;
-          IndexSchema schema = req.getSchema();
-          if (schema instanceof ManagedIndexSchema) {
-            ManagedIndexSchema managed = (ManagedIndexSchema) schema;
-            zkVersion = managed.getSchemaZkVersion();
-            if (refreshIfBelowVersion != -1 && zkVersion < refreshIfBelowVersion) {
-              log.info("REFRESHING SCHEMA (refreshIfBelowVersion=" + refreshIfBelowVersion +
-                  ", currentVersion=" + zkVersion + ") before returning version!");
-              ZkSolrResourceLoader zkSolrResourceLoader = (ZkSolrResourceLoader) req.getCore().getResourceLoader();
-              ZkIndexSchemaReader zkIndexSchemaReader = zkSolrResourceLoader.getZkIndexSchemaReader();
-              managed = zkIndexSchemaReader.refreshSchemaFromZk(refreshIfBelowVersion);
-              zkVersion = managed.getSchemaZkVersion();
-            }
-          }
-          rsp.add("zkversion", zkVersion);
-          break;
-        }
-        default: {
-          List<String> parts = StrUtils.splitSmart(path, '/');
-          if (parts.get(0).isEmpty()) parts.remove(0);
-          if (parts.size() > 1 && level2.containsKey(parts.get(1))) {
-            String realName = parts.get(1);
-            String fieldName = IndexSchema.nameMapping.get(realName);
-
-            String pathParam = level2.get(realName);
-            if (parts.size() > 2) {
-              req.setParams(SolrParams.wrapDefaults(new MapSolrParams(singletonMap(pathParam, parts.get(2))), req.getParams()));
-            }
-            Map propertyValues = req.getSchema().getNamedPropertyValues(realName, req.getParams());
-            Object o = propertyValues.get(fieldName);
-            if(parts.size()> 2) {
-              String name = parts.get(2);
-              if (o instanceof List) {
-                List list = (List) o;
-                for (Object obj : list) {
-                  if (obj instanceof SimpleOrderedMap) {
-                    SimpleOrderedMap simpleOrderedMap = (SimpleOrderedMap) obj;
-                    if(name.equals(simpleOrderedMap.get("name"))) {
-                      rsp.add(fieldName.substring(0, realName.length() - 1), simpleOrderedMap);
-                      return;
-                    }
-                  }
-                }
-              }
-              throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "No such path " + path);
-            } else {
-              rsp.add(fieldName, o);
-            }
-            return;
-          }
-
-          throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "No such path " + path);
-        }
-      }
-
-    } catch (Exception e) {
-      rsp.setException(e);
-    }
-  }
-
-  private static Set<String> subPaths = new HashSet<>(Arrays.asList(
-      "version",
-      "uniquekey",
-      "name",
-      "similarity",
-      "defaultsearchfield",
-      "solrqueryparser",
-      "zkversion"
-  ));
-  static {
-    subPaths.addAll(level2.keySet());
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String subPath) {
-    List<String> parts = StrUtils.splitSmart(subPath, '/');
-    if (parts.get(0).isEmpty()) parts.remove(0);
-    String prefix =  parts.get(0);
-    if(subPaths.contains(prefix)) return this;
-
-    return null;
-  }
-
-  @Override
-  public String getDescription() {
-    return "CRUD operations over the Solr schema";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    isImmutableConfigSet = SolrConfigHandler.getImmutable(core);
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this, "core.SchemaRead",
-        "core.SchemaRead.fields",
-        "core.SchemaRead.copyFields",
-        "core.SchemaEdit",
-        "core.SchemaRead.dynamicFields_fieldTypes"
-        );
-
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
deleted file mode 100644
index 2c3c691..0000000
--- a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.nio.file.Paths;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.function.Consumer;
-
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.DirectoryFactory.DirContext;
-import org.apache.solr.core.IndexDeletionPolicyWrapper;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.backup.repository.BackupRepository.PathType;
-import org.apache.solr.core.backup.repository.LocalFileSystemRepository;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * <p> Provides functionality equivalent to the snapshooter script </p>
- * This is no longer used in standard replication.
- *
- *
- * @since solr 1.4
- */
-public class SnapShooter {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private SolrCore solrCore;
-  private String snapshotName = null;
-  private String directoryName = null;
-  private URI baseSnapDirPath = null;
-  private URI snapshotDirPath = null;
-  private BackupRepository backupRepo = null;
-  private String commitName; // can be null
-
-  @Deprecated
-  public SnapShooter(SolrCore core, String location, String snapshotName) {
-    String snapDirStr = null;
-    // Note - This logic is only applicable to the usecase where a shared file-system is exposed via
-    // local file-system interface (primarily for backwards compatibility). For other use-cases, users
-    // will be required to specify "location" where the backup should be stored.
-    if (location == null) {
-      snapDirStr = core.getDataDir();
-    } else {
-      snapDirStr = core.getCoreDescriptor().getInstanceDir().resolve(location).normalize().toString();
-    }
-    initialize(new LocalFileSystemRepository(), core, Paths.get(snapDirStr).toUri(), snapshotName, null);
-  }
-
-  public SnapShooter(BackupRepository backupRepo, SolrCore core, URI location, String snapshotName, String commitName) {
-    initialize(backupRepo, core, location, snapshotName, commitName);
-  }
-
-  private void initialize(BackupRepository backupRepo, SolrCore core, URI location, String snapshotName, String commitName) {
-    this.solrCore = Objects.requireNonNull(core);
-    this.backupRepo = Objects.requireNonNull(backupRepo);
-    this.baseSnapDirPath = location;
-    this.snapshotName = snapshotName;
-    if (snapshotName != null) {
-      directoryName = "snapshot." + snapshotName;
-    } else {
-      SimpleDateFormat fmt = new SimpleDateFormat(DATE_FMT, Locale.ROOT);
-      directoryName = "snapshot." + fmt.format(new Date());
-    }
-    this.snapshotDirPath = backupRepo.resolve(location, directoryName);
-    this.commitName = commitName;
-  }
-
-  public BackupRepository getBackupRepository() {
-    return backupRepo;
-  }
-
-  /**
-   * Gets the parent directory of the snapshots. This is the {@code location}
-   * given in the constructor.
-   */
-  public URI getLocation() {
-    return this.baseSnapDirPath;
-  }
-
-  public void validateDeleteSnapshot() {
-    Objects.requireNonNull(this.snapshotName);
-
-    boolean dirFound = false;
-    String[] paths;
-    try {
-      paths = backupRepo.listAll(baseSnapDirPath);
-      for (String path : paths) {
-        if (path.equals(this.directoryName)
-            && backupRepo.getPathType(baseSnapDirPath.resolve(path)) == PathType.DIRECTORY) {
-          dirFound = true;
-          break;
-        }
-      }
-      if(dirFound == false) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Snapshot " + snapshotName + " cannot be found in directory: " + baseSnapDirPath);
-      }
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to find snapshot " + snapshotName + " in directory: " + baseSnapDirPath, e);
-    }
-  }
-
-  protected void deleteSnapAsync(final ReplicationHandler replicationHandler) {
-    new Thread(() -> deleteNamedSnapshot(replicationHandler)).start();
-  }
-
-  public void validateCreateSnapshot() throws IOException {
-    // Note - Removed the current behavior of creating the directory hierarchy.
-    // Do we really need to provide this support?
-    if (!backupRepo.exists(baseSnapDirPath)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          " Directory does not exist: " + snapshotDirPath);
-    }
-
-    if (backupRepo.exists(snapshotDirPath)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Snapshot directory already exists: " + snapshotDirPath);
-    }
-  }
-
-  public NamedList createSnapshot() throws Exception {
-    IndexCommit indexCommit;
-    if (commitName != null) {
-      indexCommit = getIndexCommitFromName();
-      return createSnapshot(indexCommit);
-    } else {
-      indexCommit = getIndexCommit();
-      IndexDeletionPolicyWrapper deletionPolicy = solrCore.getDeletionPolicy();
-      deletionPolicy.saveCommitPoint(indexCommit.getGeneration());
-      try {
-        return createSnapshot(indexCommit);
-      } finally {
-        deletionPolicy.releaseCommitPoint(indexCommit.getGeneration());
-      }
-    }
-  }
-
-  private IndexCommit getIndexCommit() throws IOException {
-    IndexDeletionPolicyWrapper delPolicy = solrCore.getDeletionPolicy();
-    IndexCommit indexCommit = delPolicy.getLatestCommit();
-    if (indexCommit != null) {
-      return indexCommit;
-    }
-    return solrCore.withSearcher(searcher -> searcher.getIndexReader().getIndexCommit());
-  }
-
-  private IndexCommit getIndexCommitFromName() throws IOException {
-    assert commitName !=null;
-    IndexCommit indexCommit;
-    SolrSnapshotMetaDataManager snapshotMgr = solrCore.getSnapshotMetaDataManager();
-    Optional<IndexCommit> commit = snapshotMgr.getIndexCommitByName(commitName);
-    if (commit.isPresent()) {
-      indexCommit = commit.get();
-    } else {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to find an index commit with name " + commitName +
-          " for core " + solrCore.getName());
-    }
-    return indexCommit;
-  }
-
-  public void createSnapAsync(final int numberToKeep, Consumer<NamedList> result) throws IOException {
-    IndexCommit indexCommit;
-    if (commitName != null) {
-      indexCommit = getIndexCommitFromName();
-    } else {
-      indexCommit = getIndexCommit();
-    }
-    createSnapAsync(indexCommit, numberToKeep, result);
-  }
-
-  private void createSnapAsync(final IndexCommit indexCommit, final int numberToKeep, Consumer<NamedList> result) {
-    //TODO should use Solr's ExecutorUtil
-    new Thread(() -> {
-      try {
-        result.accept(createSnapshot(indexCommit));
-      } catch (Exception e) {
-        log.error("Exception while creating snapshot", e);
-        NamedList snapShootDetails = new NamedList<>();
-        snapShootDetails.add("exception", e.getMessage());
-        result.accept(snapShootDetails);
-      } finally {
-        solrCore.getDeletionPolicy().releaseCommitPoint(indexCommit.getGeneration());
-      }
-      if (snapshotName == null) {
-        try {
-          deleteOldBackups(numberToKeep);
-        } catch (IOException e) {
-          log.warn("Unable to delete old snapshots ", e);
-        }
-      }
-    }).start();
-
-  }
-
-  // note: remember to reserve the indexCommit first so it won't get deleted concurrently
-  protected NamedList createSnapshot(final IndexCommit indexCommit) throws Exception {
-    assert indexCommit != null;
-    log.info("Creating backup snapshot " + (snapshotName == null ? "<not named>" : snapshotName) + " at " + baseSnapDirPath);
-    boolean success = false;
-    try {
-      NamedList<Object> details = new NamedList<>();
-      details.add("startTime", new Date().toString());//bad; should be Instant.now().toString()
-
-      Collection<String> files = indexCommit.getFileNames();
-      Directory dir = solrCore.getDirectoryFactory().get(solrCore.getIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
-      try {
-        for(String fileName : files) {
-          backupRepo.copyFileFrom(dir, fileName, snapshotDirPath);
-        }
-      } finally {
-        solrCore.getDirectoryFactory().release(dir);
-      }
-
-      details.add("fileCount", files.size());
-      details.add("status", "success");
-      details.add("snapshotCompletedAt", new Date().toString());//bad; should be Instant.now().toString()
-      details.add("snapshotName", snapshotName);
-      log.info("Done creating backup snapshot: " + (snapshotName == null ? "<not named>" : snapshotName) +
-          " at " + baseSnapDirPath);
-      success = true;
-      return details;
-    } finally {
-      if (!success) {
-        try {
-          backupRepo.deleteDirectory(snapshotDirPath);
-        } catch (Exception excDuringDelete) {
-          log.warn("Failed to delete "+snapshotDirPath+" after snapshot creation failed due to: "+excDuringDelete);
-        }
-      }
-    }
-  }
-
-  private void deleteOldBackups(int numberToKeep) throws IOException {
-    String[] paths = backupRepo.listAll(baseSnapDirPath);
-    List<OldBackupDirectory> dirs = new ArrayList<>();
-    for (String f : paths) {
-      if (backupRepo.getPathType(baseSnapDirPath.resolve(f)) == PathType.DIRECTORY) {
-        OldBackupDirectory obd = new OldBackupDirectory(baseSnapDirPath, f);
-        if (obd.getTimestamp().isPresent()) {
-          dirs.add(obd);
-        }
-      }
-    }
-    if (numberToKeep > dirs.size() -1) {
-      return;
-    }
-    Collections.sort(dirs);
-    int i=1;
-    for (OldBackupDirectory dir : dirs) {
-      if (i++ > numberToKeep) {
-        backupRepo.deleteDirectory(dir.getPath());
-      }
-    }
-  }
-
-  protected void deleteNamedSnapshot(ReplicationHandler replicationHandler) {
-    log.info("Deleting snapshot: " + snapshotName);
-
-    NamedList<Object> details = new NamedList<>();
-
-    try {
-      URI path = baseSnapDirPath.resolve("snapshot." + snapshotName);
-      backupRepo.deleteDirectory(path);
-
-      details.add("status", "success");
-      details.add("snapshotDeletedAt", new Date().toString());
-
-    } catch (IOException e) {
-      details.add("status", "Unable to delete snapshot: " + snapshotName);
-      log.warn("Unable to delete snapshot: " + snapshotName, e);
-    }
-
-    replicationHandler.snapShootDetails = details;
-  }
-
-  public static final String DATE_FMT = "yyyyMMddHHmmssSSS";
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
deleted file mode 100644
index 53d543f..0000000
--- a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
+++ /dev/null
@@ -1,898 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.ConfigOverlay;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.RequestParams;
-import org.apache.solr.core.SolrConfig;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.SchemaManager;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.SolrPluginUtils;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static com.google.common.base.Strings.isNullOrEmpty;
-import static java.util.Collections.singletonList;
-import static org.apache.solr.common.params.CoreAdminParams.NAME;
-import static org.apache.solr.common.util.StrUtils.formatString;
-import static org.apache.solr.common.util.Utils.makeMap;
-import static org.apache.solr.core.ConfigOverlay.NOT_EDITABLE;
-import static org.apache.solr.core.ConfigOverlay.ZNODEVER;
-import static org.apache.solr.core.ConfigSetProperties.IMMUTABLE_CONFIGSET_ARG;
-import static org.apache.solr.core.PluginInfo.APPENDS;
-import static org.apache.solr.core.PluginInfo.DEFAULTS;
-import static org.apache.solr.core.PluginInfo.INVARIANTS;
-import static org.apache.solr.core.RequestParams.USEPARAM;
-import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_CLASS;
-import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME;
-import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME_IN_OVERLAY;
-import static org.apache.solr.schema.FieldType.CLASS_NAME;
-
-public class SolrConfigHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final String CONFIGSET_EDITING_DISABLED_ARG = "disable.configEdit";
-  public static final boolean configEditing_disabled = Boolean.getBoolean(CONFIGSET_EDITING_DISABLED_ARG);
-  private static final Map<String, SolrConfig.SolrPluginInfo> namedPlugins;
-  private Lock reloadLock = new ReentrantLock(true);
-
-  public Lock getReloadLock() {
-    return reloadLock;
-  }
-
-  private boolean isImmutableConfigSet = false;
-
-  static {
-    Map<String, SolrConfig.SolrPluginInfo> map = new HashMap<>();
-    for (SolrConfig.SolrPluginInfo plugin : SolrConfig.plugins) {
-      if (plugin.options.contains(REQUIRE_NAME) || plugin.options.contains(REQUIRE_NAME_IN_OVERLAY)) {
-        map.put(plugin.getCleanTag().toLowerCase(Locale.ROOT), plugin);
-      }
-    }
-    namedPlugins = Collections.unmodifiableMap(map);
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-
-    RequestHandlerUtils.setWt(req, CommonParams.JSON);
-    String httpMethod = (String) req.getContext().get("httpMethod");
-    Command command = new Command(req, rsp, httpMethod);
-    if ("POST".equals(httpMethod)) {
-      if (configEditing_disabled || isImmutableConfigSet) {
-        final String reason = configEditing_disabled ? "due to " + CONFIGSET_EDITING_DISABLED_ARG : "because ConfigSet is immutable";
-        throw new SolrException(SolrException.ErrorCode.FORBIDDEN, " solrconfig editing is not enabled " + reason);
-      }
-      try {
-        command.handlePOST();
-      } finally {
-        RequestHandlerUtils.addExperimentalFormatWarning(rsp);
-      }
-    } else {
-      command.handleGET();
-    }
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    isImmutableConfigSet = getImmutable(core);
-  }
-
-  public static boolean getImmutable(SolrCore core) {
-    NamedList configSetProperties = core.getConfigSetProperties();
-    if(configSetProperties == null) return false;
-    Object immutable = configSetProperties.get(IMMUTABLE_CONFIGSET_ARG);
-    return immutable != null ? Boolean.parseBoolean(immutable.toString()) : false;
-  }
-
-
-  private class Command {
-    private final SolrQueryRequest req;
-    private final SolrQueryResponse resp;
-    private final String method;
-    private String path;
-    List<String> parts;
-
-    private Command(SolrQueryRequest req, SolrQueryResponse resp, String httpMethod) {
-      this.req = req;
-      this.resp = resp;
-      this.method = httpMethod;
-      path = (String) req.getContext().get("path");
-      if (path == null) path = getDefaultPath();
-      parts = StrUtils.splitSmart(path, '/');
-      if (parts.get(0).isEmpty()) parts.remove(0);
-    }
-
-    private String getDefaultPath() {
-      return "/config";
-    }
-
-    private void handleGET() {
-      if (parts.size() == 1) {
-        //this is the whole config. sent out the whole payload
-        resp.add("config", getConfigDetails(null, req));
-      } else {
-        if (ConfigOverlay.NAME.equals(parts.get(1))) {
-          resp.add(ConfigOverlay.NAME, req.getCore().getSolrConfig().getOverlay());
-        } else if (RequestParams.NAME.equals(parts.get(1))) {
-          if (parts.size() == 3) {
-            RequestParams params = req.getCore().getSolrConfig().getRequestParams();
-            RequestParams.ParamSet p = params.getParams(parts.get(2));
-            Map m = new LinkedHashMap<>();
-            m.put(ZNODEVER, params.getZnodeVersion());
-            if (p != null) {
-              m.put(RequestParams.NAME, makeMap(parts.get(2), p.toMap(new LinkedHashMap<>())));
-            }
-            resp.add(SolrQueryResponse.NAME, m);
-          } else {
-            resp.add(SolrQueryResponse.NAME, req.getCore().getSolrConfig().getRequestParams());
-          }
-
-        } else {
-          if (ZNODEVER.equals(parts.get(1))) {
-            resp.add(ZNODEVER, Utils.makeMap(
-                ConfigOverlay.NAME, req.getCore().getSolrConfig().getOverlay().getZnodeVersion(),
-                RequestParams.NAME, req.getCore().getSolrConfig().getRequestParams().getZnodeVersion()));
-            boolean isStale = false;
-            int expectedVersion = req.getParams().getInt(ConfigOverlay.NAME, -1);
-            int actualVersion = req.getCore().getSolrConfig().getOverlay().getZnodeVersion();
-            if (expectedVersion > actualVersion) {
-              log.info("expecting overlay version {} but my version is {}", expectedVersion, actualVersion);
-              isStale = true;
-            } else if (expectedVersion != -1) {
-              log.info("I already have the expected version {} of config", expectedVersion);
-            }
-            expectedVersion = req.getParams().getInt(RequestParams.NAME, -1);
-            actualVersion = req.getCore().getSolrConfig().getRequestParams().getZnodeVersion();
-            if (expectedVersion > actualVersion) {
-              log.info("expecting params version {} but my version is {}", expectedVersion, actualVersion);
-              isStale = true;
-            } else if (expectedVersion != -1) {
-              log.info("I already have the expected version {} of params", expectedVersion);
-            }
-            if (isStale && req.getCore().getResourceLoader() instanceof ZkSolrResourceLoader) {
-              new Thread(() -> {
-                if (!reloadLock.tryLock()) {
-                  log.info("Another reload is in progress . Not doing anything");
-                  return;
-                }
-                try {
-                  log.info("Trying to update my configs");
-                  SolrCore.getConfListener(req.getCore(), (ZkSolrResourceLoader) req.getCore().getResourceLoader()).run();
-                } catch (Exception e) {
-                  log.error("Unable to refresh conf ", e);
-                } finally {
-                  reloadLock.unlock();
-                }
-              }, SolrConfigHandler.class.getSimpleName() + "-refreshconf").start();
-            } else {
-              log.info("isStale {} , resourceloader {}", isStale, req.getCore().getResourceLoader().getClass().getName());
-            }
-
-          } else {
-            Map<String, Object> m = getConfigDetails(parts.get(1), req);
-            Map<String, Object> val = makeMap(parts.get(1), m.get(parts.get(1)));
-            String componentName = req.getParams().get("componentName");
-            if (componentName != null) {
-              Map map = (Map) val.get(parts.get(1));
-              if (map != null) {
-                val.put(parts.get(1), makeMap(componentName, map.get(componentName)));
-              }
-            }
-
-            resp.add("config", val);
-          }
-        }
-      }
-    }
-
-    private Map<String, Object> getConfigDetails(String componentType, SolrQueryRequest req) {
-      String componentName = componentType == null ? null : req.getParams().get("componentName");
-      boolean showParams = req.getParams().getBool("expandParams", false);
-      Map<String, Object> map = this.req.getCore().getSolrConfig().toMap(new LinkedHashMap<>());
-      if (componentType != null && !SolrRequestHandler.TYPE.equals(componentType)) return map;
-      Map reqHandlers = (Map) map.get(SolrRequestHandler.TYPE);
-      if (reqHandlers == null) map.put(SolrRequestHandler.TYPE, reqHandlers = new LinkedHashMap<>());
-      List<PluginInfo> plugins = this.req.getCore().getImplicitHandlers();
-      for (PluginInfo plugin : plugins) {
-        if (SolrRequestHandler.TYPE.equals(plugin.type)) {
-          if (!reqHandlers.containsKey(plugin.name)) {
-            reqHandlers.put(plugin.name, plugin);
-          }
-        }
-      }
-      if (!showParams) return map;
-      for (Object o : reqHandlers.entrySet()) {
-        Map.Entry e = (Map.Entry) o;
-        if (componentName == null || e.getKey().equals(componentName)) {
-          Map<String, Object> m = expandUseParams(req, e.getValue());
-          e.setValue(m);
-        }
-      }
-
-      return map;
-    }
-
-    private Map<String, Object> expandUseParams(SolrQueryRequest req,
-                                                Object plugin) {
-
-      Map<String, Object> pluginInfo = null;
-      if (plugin instanceof Map) {
-        pluginInfo = (Map) plugin;
-      } else if (plugin instanceof PluginInfo) {
-        pluginInfo = ((PluginInfo) plugin).toMap(new LinkedHashMap<>());
-      }
-      String useParams = (String) pluginInfo.get(USEPARAM);
-      String useparamsInReq = req.getOriginalParams().get(USEPARAM);
-      if (useParams != null || useparamsInReq != null) {
-        Map m = new LinkedHashMap<>();
-        pluginInfo.put("_useParamsExpanded_", m);
-        List<String> params = new ArrayList<>();
-        if (useParams != null) params.addAll(StrUtils.splitSmart(useParams, ','));
-        if (useparamsInReq != null) params.addAll(StrUtils.splitSmart(useparamsInReq, ','));
-        for (String param : params) {
-          RequestParams.ParamSet p = this.req.getCore().getSolrConfig().getRequestParams().getParams(param);
-          if (p != null) {
-            m.put(param, p);
-          } else {
-            m.put(param, "[NOT AVAILABLE]");
-          }
-        }
-
-
-        LocalSolrQueryRequest r = new LocalSolrQueryRequest(req.getCore(), req.getOriginalParams());
-        r.getContext().put(USEPARAM, useParams);
-        NamedList nl = new PluginInfo(SolrRequestHandler.TYPE, pluginInfo).initArgs;
-        SolrPluginUtils.setDefaults(r,
-            getSolrParamsFromNamedList(nl, DEFAULTS),
-            getSolrParamsFromNamedList(nl, APPENDS),
-            getSolrParamsFromNamedList(nl, INVARIANTS));
-        //SolrParams.wrapDefaults(maskUseParams, req.getParams())
-
-        MapSolrParams mask = new MapSolrParams(ImmutableMap.<String, String>builder()
-            .put("componentName", "")
-            .put("expandParams", "")
-            .build());
-        pluginInfo.put("_effectiveParams_",
-            SolrParams.wrapDefaults(mask, r.getParams()));
-      }
-      return pluginInfo;
-    }
-
-
-    private void handlePOST() throws IOException {
-      List<CommandOperation> ops = CommandOperation.readCommands(req.getContentStreams(), resp.getValues());
-      if (ops == null) return;
-      try {
-        for (; ; ) {
-          ArrayList<CommandOperation> opsCopy = new ArrayList<>(ops.size());
-          for (CommandOperation op : ops) opsCopy.add(op.getCopy());
-          try {
-            if (parts.size() > 1 && RequestParams.NAME.equals(parts.get(1))) {
-              RequestParams params = RequestParams.getFreshRequestParams(req.getCore().getResourceLoader(), req.getCore().getSolrConfig().getRequestParams());
-              handleParams(opsCopy, params);
-            } else {
-              ConfigOverlay overlay = SolrConfig.getConfigOverlay(req.getCore().getResourceLoader());
-              handleCommands(opsCopy, overlay);
-            }
-            break;//succeeded . so no need to go over the loop again
-          } catch (ZkController.ResourceModifiedInZkException e) {
-            //retry
-            log.info("Race condition, the node is modified in ZK by someone else " + e.getMessage());
-          }
-        }
-      } catch (Exception e) {
-        resp.setException(e);
-        resp.add(CommandOperation.ERR_MSGS, singletonList(SchemaManager.getErrorStr(e)));
-      }
-
-    }
-
-
-    private void handleParams(ArrayList<CommandOperation> ops, RequestParams params) {
-      for (CommandOperation op : ops) {
-        switch (op.name) {
-          case SET:
-          case UPDATE: {
-            Map<String, Object> map = op.getDataMap();
-            if (op.hasError()) break;
-
-            for (Map.Entry<String, Object> entry : map.entrySet()) {
-
-              Map val;
-              String key = entry.getKey();
-              if (isNullOrEmpty(key)) {
-                op.addError("null key ");
-                continue;
-              }
-              key = key.trim();
-              String err = validateName(key);
-              if (err != null) {
-                op.addError(err);
-                continue;
-              }
-
-              try {
-                val = (Map) entry.getValue();
-              } catch (Exception e1) {
-                op.addError("invalid params for key : " + key);
-                continue;
-              }
-
-              if (val.containsKey("")) {
-                op.addError("Empty keys are not allowed in params");
-                continue;
-              }
-
-              RequestParams.ParamSet old = params.getParams(key);
-              if (op.name.equals(UPDATE)) {
-                if (old == null) {
-                  op.addError(formatString("unknown paramset {0} cannot update ", key));
-                  continue;
-                }
-                params = params.setParams(key, old.update(val));
-              } else {
-                Long version = old == null ? 0 : old.getVersion() + 1;
-                params = params.setParams(key, RequestParams.createParamSet(val, version));
-              }
-
-            }
-            break;
-
-          }
-          case "delete": {
-            List<String> name = op.getStrs(CommandOperation.ROOT_OBJ);
-            if (op.hasError()) break;
-            for (String s : name) {
-              if (params.getParams(s) == null) {
-                op.addError(formatString("can't delete . No such params ''{0}'' exist", s));
-              }
-              params = params.setParams(s, null);
-            }
-          }
-        }
-      }
-
-
-      List errs = CommandOperation.captureErrors(ops);
-      if (!errs.isEmpty()) {
-        throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST,"error processing params", errs);
-      }
-
-      SolrResourceLoader loader = req.getCore().getResourceLoader();
-      if (loader instanceof ZkSolrResourceLoader) {
-        ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) loader;
-        if (ops.isEmpty()) {
-          ZkController.touchConfDir(zkLoader);
-        } else {
-          log.debug("persisting params data : {}", Utils.toJSONString(params.toMap(new LinkedHashMap<>())));
-          int latestVersion = ZkController.persistConfigResourceToZooKeeper(zkLoader,
-              params.getZnodeVersion(), RequestParams.RESOURCE, params.toByteArray(), true);
-
-          log.debug("persisted to version : {} ", latestVersion);
-          waitForAllReplicasState(req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName(),
-              req.getCore().getCoreContainer().getZkController(), RequestParams.NAME, latestVersion, 30);
-        }
-
-      } else {
-        SolrResourceLoader.persistConfLocally(loader, RequestParams.RESOURCE, params.toByteArray());
-        req.getCore().getSolrConfig().refreshRequestParams();
-      }
-
-    }
-
-    private void handleCommands(List<CommandOperation> ops, ConfigOverlay overlay) throws IOException {
-      for (CommandOperation op : ops) {
-        switch (op.name) {
-          case SET_PROPERTY:
-            overlay = applySetProp(op, overlay);
-            break;
-          case UNSET_PROPERTY:
-            overlay = applyUnset(op, overlay);
-            break;
-          case SET_USER_PROPERTY:
-            overlay = applySetUserProp(op, overlay);
-            break;
-          case UNSET_USER_PROPERTY:
-            overlay = applyUnsetUserProp(op, overlay);
-            break;
-          default: {
-            List<String> pcs = StrUtils.splitSmart(op.name.toLowerCase(Locale.ROOT), '-');
-            if (pcs.size() != 2) {
-              op.addError(formatString("Unknown operation ''{0}'' ", op.name));
-            } else {
-              String prefix = pcs.get(0);
-              String name = pcs.get(1);
-              if (cmdPrefixes.contains(prefix) && namedPlugins.containsKey(name)) {
-                SolrConfig.SolrPluginInfo info = namedPlugins.get(name);
-                if ("delete".equals(prefix)) {
-                  overlay = deleteNamedComponent(op, overlay, info.getCleanTag());
-                } else {
-                  overlay = updateNamedPlugin(info, op, overlay, prefix.equals("create") || prefix.equals("add"));
-                }
-              } else {
-                op.unknownOperation();
-              }
-            }
-          }
-        }
-      }
-      List errs = CommandOperation.captureErrors(ops);
-      if (!errs.isEmpty()) {
-        throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST,"error processing commands", errs);
-      }
-
-      SolrResourceLoader loader = req.getCore().getResourceLoader();
-      if (loader instanceof ZkSolrResourceLoader) {
-        int latestVersion = ZkController.persistConfigResourceToZooKeeper((ZkSolrResourceLoader) loader, overlay.getZnodeVersion(),
-            ConfigOverlay.RESOURCE_NAME, overlay.toByteArray(), true);
-        log.info("Executed config commands successfully and persisted to ZK {}", ops);
-        waitForAllReplicasState(req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName(),
-            req.getCore().getCoreContainer().getZkController(),
-            ConfigOverlay.NAME,
-            latestVersion, 30);
-      } else {
-        SolrResourceLoader.persistConfLocally(loader, ConfigOverlay.RESOURCE_NAME, overlay.toByteArray());
-        req.getCore().getCoreContainer().reload(req.getCore().getName());
-        log.info("Executed config commands successfully and persited to File System {}", ops);
-      }
-
-    }
-
-    private ConfigOverlay deleteNamedComponent(CommandOperation op, ConfigOverlay overlay, String typ) {
-      String name = op.getStr(CommandOperation.ROOT_OBJ);
-      if (op.hasError()) return overlay;
-      if (overlay.getNamedPlugins(typ).containsKey(name)) {
-        return overlay.deleteNamedPlugin(name, typ);
-      } else {
-        op.addError(formatString("NO such {0} ''{1}'' ", typ, name));
-        return overlay;
-      }
-    }
-
-    private ConfigOverlay updateNamedPlugin(SolrConfig.SolrPluginInfo info, CommandOperation op, ConfigOverlay overlay, boolean isCeate) {
-      String name = op.getStr(NAME);
-      String clz = info.options.contains(REQUIRE_CLASS) ? op.getStr(CLASS_NAME) : op.getStr(CLASS_NAME, null);
-      op.getMap(DEFAULTS, null);
-      op.getMap(PluginInfo.INVARIANTS, null);
-      op.getMap(PluginInfo.APPENDS, null);
-      if (op.hasError()) return overlay;
-      if (!verifyClass(op, clz, info.clazz)) return overlay;
-      if (pluginExists(info, overlay, name)) {
-        if (isCeate) {
-          op.addError(formatString(" ''{0}'' already exists . Do an ''{1}'' , if you want to change it ", name, "update-" + info.getTagCleanLower()));
-          return overlay;
-        } else {
-          return overlay.addNamedPlugin(op.getDataMap(), info.getCleanTag());
-        }
-      } else {
-        if (isCeate) {
-          return overlay.addNamedPlugin(op.getDataMap(), info.getCleanTag());
-        } else {
-          op.addError(formatString(" ''{0}'' does not exist . Do an ''{1}'' , if you want to create it ", name, "create-" + info.getTagCleanLower()));
-          return overlay;
-        }
-      }
-    }
-
-    private boolean pluginExists(SolrConfig.SolrPluginInfo info, ConfigOverlay overlay, String name) {
-      List<PluginInfo> l = req.getCore().getSolrConfig().getPluginInfos(info.clazz.getName());
-      for (PluginInfo pluginInfo : l) if(name.equals( pluginInfo.name)) return true;
-      return overlay.getNamedPlugins(info.getCleanTag()).containsKey(name);
-    }
-
-    private boolean verifyClass(CommandOperation op, String clz, Class expected) {
-      if (clz == null) return true;
-      if (!"true".equals(String.valueOf(op.getStr("runtimeLib", null)))) {
-        //this is not dynamically loaded so we can verify the class right away
-        try {
-          req.getCore().createInitInstance(new PluginInfo(SolrRequestHandler.TYPE, op.getDataMap()), expected, clz, "");
-        } catch (Exception e) {
-          op.addError(e.getMessage());
-          return false;
-        }
-
-      }
-      return true;
-    }
-
-    private ConfigOverlay applySetUserProp(CommandOperation op, ConfigOverlay overlay) {
-      Map<String, Object> m = op.getDataMap();
-      if (op.hasError()) return overlay;
-      for (Map.Entry<String, Object> e : m.entrySet()) {
-        String name = e.getKey();
-        Object val = e.getValue();
-        overlay = overlay.setUserProperty(name, val);
-      }
-      return overlay;
-    }
-
-    private ConfigOverlay applyUnsetUserProp(CommandOperation op, ConfigOverlay overlay) {
-      List<String> name = op.getStrs(CommandOperation.ROOT_OBJ);
-      if (op.hasError()) return overlay;
-      for (String o : name) {
-        if (!overlay.getUserProps().containsKey(o)) {
-          op.addError(formatString("No such property ''{0}''", name));
-        } else {
-          overlay = overlay.unsetUserProperty(o);
-        }
-      }
-      return overlay;
-    }
-
-
-    private ConfigOverlay applyUnset(CommandOperation op, ConfigOverlay overlay) {
-      List<String> name = op.getStrs(CommandOperation.ROOT_OBJ);
-      if (op.hasError()) return overlay;
-
-      for (String o : name) {
-        if (!ConfigOverlay.isEditableProp(o, false, null)) {
-          op.addError(formatString(NOT_EDITABLE, name));
-        } else {
-          overlay = overlay.unsetProperty(o);
-        }
-      }
-      return overlay;
-    }
-
-    private ConfigOverlay applySetProp(CommandOperation op, ConfigOverlay overlay) {
-      Map<String, Object> m = op.getDataMap();
-      if (op.hasError()) return overlay;
-      for (Map.Entry<String, Object> e : m.entrySet()) {
-        String name = e.getKey();
-        Object val = e.getValue();
-        Class typ = ConfigOverlay.checkEditable(name, false, null);
-        if (typ == null) {
-          op.addError(formatString(NOT_EDITABLE, name));
-          continue;
-        }
-
-        if (val != null) {
-          if (typ == String.class) val = val.toString();
-          String typeErr = "Property {0} must be of {1} type ";
-          if (typ == Boolean.class) {
-            try {
-              val = Boolean.parseBoolean(val.toString());
-            } catch (Exception exp) {
-              op.addError(formatString(typeErr, name, typ.getSimpleName()));
-              continue;
-            }
-          } else if (typ == Integer.class) {
-            try {
-              val = Integer.parseInt(val.toString());
-            } catch (Exception exp) {
-              op.addError(formatString(typeErr, name, typ.getSimpleName()));
-              continue;
-            }
-
-          } else if (typ == Float.class) {
-            try {
-              val = Float.parseFloat(val.toString());
-            } catch (Exception exp) {
-              op.addError(formatString(typeErr, name, typ.getSimpleName()));
-              continue;
-            }
-
-          }
-        }
-
-
-        overlay = overlay.setProperty(name, val);
-      }
-      return overlay;
-    }
-
-  }
-
-  public static String validateName(String s) {
-    for (int i = 0; i < s.length(); i++) {
-      char c = s.charAt(i);
-      if ((c >= 'A' && c <= 'Z') ||
-          (c >= 'a' && c <= 'z') ||
-          (c >= '0' && c <= '9') ||
-          c == '_' ||
-          c == '-' ||
-          c == '.'
-          ) continue;
-      else {
-        return formatString("''{0}'' name should only have chars [a-zA-Z_-.0-9] ", s);
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String path) {
-    if (subPaths.contains(path)) return this;
-    if (path.startsWith("/params/")) return this;
-    return null;
-  }
-
-
-  private static Set<String> subPaths = new HashSet<>(Arrays.asList("/overlay", "/params", "/updateHandler",
-      "/query", "/jmx", "/requestDispatcher", "/znodeVersion"));
-
-  static {
-    for (SolrConfig.SolrPluginInfo solrPluginInfo : SolrConfig.plugins)
-      subPaths.add("/" + solrPluginInfo.getCleanTag());
-
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-
-  @Override
-  public String getDescription() {
-    return "Edit solrconfig.xml";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-
-  public static final String SET_PROPERTY = "set-property";
-  public static final String UNSET_PROPERTY = "unset-property";
-  public static final String SET_USER_PROPERTY = "set-user-property";
-  public static final String UNSET_USER_PROPERTY = "unset-user-property";
-  public static final String SET = "set";
-  public static final String UPDATE = "update";
-  public static final String CREATE = "create";
-  private static Set<String> cmdPrefixes = ImmutableSet.of(CREATE, UPDATE, "delete", "add");
-
-  /**
-   * Block up to a specified maximum time until we see agreement on the schema
-   * version in ZooKeeper across all replicas for a collection.
-   */
-  private static void waitForAllReplicasState(String collection,
-                                              ZkController zkController,
-                                              String prop,
-                                              int expectedVersion,
-                                              int maxWaitSecs) {
-    final RTimer timer = new RTimer();
-    // get a list of active replica cores to query for the schema zk version (skipping this core of course)
-    List<PerReplicaCallable> concurrentTasks = new ArrayList<>();
-
-    for (String coreUrl : getActiveReplicaCoreUrls(zkController, collection)) {
-      PerReplicaCallable e = new PerReplicaCallable(coreUrl, prop, expectedVersion, maxWaitSecs);
-      concurrentTasks.add(e);
-    }
-    if (concurrentTasks.isEmpty()) return; // nothing to wait for ...
-
-    log.info(formatString("Waiting up to {0} secs for {1} replicas to set the property {2} to be of version {3} for collection {4}",
-        maxWaitSecs, concurrentTasks.size(), prop, expectedVersion, collection));
-
-    // use an executor service to invoke schema zk version requests in parallel with a max wait time
-    int poolSize = Math.min(concurrentTasks.size(), 10);
-    ExecutorService parallelExecutor =
-        ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new DefaultSolrThreadFactory("solrHandlerExecutor"));
-    try {
-      List<Future<Boolean>> results =
-          parallelExecutor.invokeAll(concurrentTasks, maxWaitSecs, TimeUnit.SECONDS);
-
-      // determine whether all replicas have the update
-      List<String> failedList = null; // lazily init'd
-      for (int f = 0; f < results.size(); f++) {
-        Boolean success = false;
-        Future<Boolean> next = results.get(f);
-        if (next.isDone() && !next.isCancelled()) {
-          // looks to have finished, but need to check if it succeeded
-          try {
-            success = next.get();
-          } catch (ExecutionException e) {
-            // shouldn't happen since we checked isCancelled
-          }
-        }
-
-        if (!success) {
-          String coreUrl = concurrentTasks.get(f).coreUrl;
-          log.warn("Core " + coreUrl + "could not get the expected version " + expectedVersion);
-          if (failedList == null) failedList = new ArrayList<>();
-          failedList.add(coreUrl);
-        }
-      }
-
-      // if any tasks haven't completed within the specified timeout, it's an error
-      if (failedList != null)
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            formatString("{0} out of {1} the property {2} to be of version {3} within {4} seconds! Failed cores: {5}",
-                failedList.size(), concurrentTasks.size() + 1, prop, expectedVersion, maxWaitSecs, failedList));
-
-    } catch (InterruptedException ie) {
-      log.warn(formatString(
-          "Core  was interrupted . trying to set the property {1} to version {2} to propagate to {3} replicas for collection {4}",
-          prop, expectedVersion, concurrentTasks.size(), collection));
-      Thread.currentThread().interrupt();
-    } finally {
-      ExecutorUtil.shutdownAndAwaitTermination(parallelExecutor);
-    }
-
-    log.info("Took {}ms to set the property {} to be of version {} for collection {}",
-        timer.getTime(), prop, expectedVersion, collection);
-  }
-
-  public static List<String> getActiveReplicaCoreUrls(ZkController zkController,
-                                                      String collection) {
-    List<String> activeReplicaCoreUrls = new ArrayList<>();
-    ClusterState clusterState = zkController.getZkStateReader().getClusterState();
-    Set<String> liveNodes = clusterState.getLiveNodes();
-    final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
-    if (docCollection != null && docCollection.getActiveSlices() != null && docCollection.getActiveSlices().size() > 0) {
-      final Collection<Slice> activeSlices = docCollection.getActiveSlices();
-      for (Slice next : activeSlices) {
-        Map<String, Replica> replicasMap = next.getReplicasMap();
-        if (replicasMap != null) {
-          for (Map.Entry<String, Replica> entry : replicasMap.entrySet()) {
-            Replica replica = entry.getValue();
-            if (replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())) {
-              activeReplicaCoreUrls.add(replica.getCoreUrl());
-            }
-          }
-        }
-      }
-    }
-    return activeReplicaCoreUrls;
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext ctx) {
-    switch (ctx.getHttpMethod()) {
-      case "GET":
-        return Name.CONFIG_READ_PERM;
-      case "POST":
-        return Name.CONFIG_EDIT_PERM;
-      default:
-        return null;
-    }
-  }
-
-  private static class PerReplicaCallable extends SolrRequest implements Callable<Boolean> {
-    String coreUrl;
-    String prop;
-    int expectedZkVersion;
-    Number remoteVersion = null;
-    int maxWait;
-
-    PerReplicaCallable(String coreUrl, String prop, int expectedZkVersion, int maxWait) {
-      super(METHOD.GET, "/config/" + ZNODEVER);
-      this.coreUrl = coreUrl;
-      this.expectedZkVersion = expectedZkVersion;
-      this.prop = prop;
-      this.maxWait = maxWait;
-    }
-
-    @Override
-    public SolrParams getParams() {
-      return new ModifiableSolrParams()
-          .set(prop, expectedZkVersion)
-          .set(CommonParams.WT, CommonParams.JAVABIN);
-    }
-
-    @Override
-    public Boolean call() throws Exception {
-      final RTimer timer = new RTimer();
-      int attempts = 0;
-      try (HttpSolrClient solr = new HttpSolrClient.Builder(coreUrl).build()) {
-        // eventually, this loop will get killed by the ExecutorService's timeout
-        while (true) {
-          try {
-            long timeElapsed = (long) timer.getTime() / 1000;
-            if (timeElapsed >= maxWait) {
-              return false;
-            }
-            log.info("Time elapsed : {} secs, maxWait {}", timeElapsed, maxWait);
-            Thread.sleep(100);
-            NamedList<Object> resp = solr.httpUriRequest(this).future.get();
-            if (resp != null) {
-              Map m = (Map) resp.get(ZNODEVER);
-              if (m != null) {
-                remoteVersion = (Number) m.get(prop);
-                if (remoteVersion != null && remoteVersion.intValue() >= expectedZkVersion) break;
-              }
-            }
-
-            attempts++;
-            log.info(formatString("Could not get expectedVersion {0} from {1} for prop {2}   after {3} attempts", expectedZkVersion, coreUrl, prop, attempts));
-          } catch (Exception e) {
-            if (e instanceof InterruptedException) {
-              break; // stop looping
-            } else {
-              log.warn("Failed to get /schema/zkversion from " + coreUrl + " due to: " + e);
-            }
-          }
-        }
-      }
-      return true;
-    }
-
-
-    @Override
-    protected SolrResponse createResponse(SolrClient client) {
-      return null;
-    }
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this,
-        "core.config",
-        "core.config.Commands",
-        "core.config.Params",
-        "core.config.Params.Commands");
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/SolrDefaultStreamFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SolrDefaultStreamFactory.java b/solr/core/src/java/org/apache/solr/handler/SolrDefaultStreamFactory.java
deleted file mode 100644
index c072f0b..0000000
--- a/solr/core/src/java/org/apache/solr/handler/SolrDefaultStreamFactory.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.client.solrj.io.Lang;
-import org.apache.solr.client.solrj.io.stream.expr.DefaultStreamFactory;
-import org.apache.solr.core.SolrResourceLoader;
-
-/**
- * A default collection of mappings, used to convert strings into stream expressions.
- * Same as {@link DefaultStreamFactory} plus functions that rely directly on either
- * Lucene or Solr capabilities that are not part of {@link Lang}.
- *
- * @since 7.5
- */
-public class SolrDefaultStreamFactory extends DefaultStreamFactory {
-
-  private SolrResourceLoader solrResourceLoader;
-
-  public SolrDefaultStreamFactory() {
-    super();
-    this.withFunctionName("analyze",  AnalyzeEvaluator.class);
-    this.withFunctionName("classify", ClassifyStream.class);
-    this.withFunctionName("haversineMeters", HaversineMetersEvaluator.class);
-  }
-
-  public SolrDefaultStreamFactory withSolrResourceLoader(SolrResourceLoader solrResourceLoader) {
-    this.solrResourceLoader = solrResourceLoader;
-    return this;
-  }
-
-  public void setSolrResourceLoader(SolrResourceLoader solrResourceLoader) {
-    this.solrResourceLoader = solrResourceLoader;
-  }
-
-  public SolrResourceLoader getSolrResourceLoader() {
-    return solrResourceLoader;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java
deleted file mode 100644
index e87aa68..0000000
--- a/solr/core/src/java/org/apache/solr/handler/StandardRequestHandler.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.handler.component.*;
-
-@Deprecated
-public class StandardRequestHandler extends SearchHandler 
-{
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "The standard Solr request handler";
-  }
-}
-
-
-
-
-
-
-


[14/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
deleted file mode 100644
index 66dc39e..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.File;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.api.Api;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.logging.MDCLoggingContext;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.stats.MetricUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import static org.apache.solr.common.params.CoreAdminParams.ACTION;
-import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.STATUS;
-import static org.apache.solr.security.PermissionNameProvider.Name.CORE_EDIT_PERM;
-import static org.apache.solr.security.PermissionNameProvider.Name.CORE_READ_PERM;
-
-/**
- *
- * @since solr 1.3
- */
-public class CoreAdminHandler extends RequestHandlerBase implements PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected final CoreContainer coreContainer;
-  protected final Map<String, Map<String, TaskObject>> requestStatusMap;
-  private final CoreAdminHandlerApi coreAdminHandlerApi;
-
-  protected ExecutorService parallelExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(50,
-      new DefaultSolrThreadFactory("parallelCoreAdminExecutor"));
-
-  protected static int MAX_TRACKED_REQUESTS = 100;
-  public static String RUNNING = "running";
-  public static String COMPLETED = "completed";
-  public static String FAILED = "failed";
-  public static String RESPONSE = "Response";
-  public static String RESPONSE_STATUS = "STATUS";
-  public static String RESPONSE_MESSAGE = "msg";
-
-  public CoreAdminHandler() {
-    super();
-    // Unlike most request handlers, CoreContainer initialization 
-    // should happen in the constructor...  
-    this.coreContainer = null;
-    HashMap<String, Map<String, TaskObject>> map = new HashMap<>(3, 1.0f);
-    map.put(RUNNING, Collections.synchronizedMap(new LinkedHashMap<String, TaskObject>()));
-    map.put(COMPLETED, Collections.synchronizedMap(new LinkedHashMap<String, TaskObject>()));
-    map.put(FAILED, Collections.synchronizedMap(new LinkedHashMap<String, TaskObject>()));
-    requestStatusMap = Collections.unmodifiableMap(map);
-    coreAdminHandlerApi = new CoreAdminHandlerApi(this);
-  }
-
-
-  /**
-   * Overloaded ctor to inject CoreContainer into the handler.
-   *
-   * @param coreContainer Core Container of the solr webapp installed.
-   */
-  public CoreAdminHandler(final CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-    HashMap<String, Map<String, TaskObject>> map = new HashMap<>(3, 1.0f);
-    map.put(RUNNING, Collections.synchronizedMap(new LinkedHashMap<String, TaskObject>()));
-    map.put(COMPLETED, Collections.synchronizedMap(new LinkedHashMap<String, TaskObject>()));
-    map.put(FAILED, Collections.synchronizedMap(new LinkedHashMap<String, TaskObject>()));
-    requestStatusMap = Collections.unmodifiableMap(map);
-    coreAdminHandlerApi = new CoreAdminHandlerApi(this);
-  }
-
-
-  @Override
-  final public void init(NamedList args) {
-    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "CoreAdminHandler should not be configured in solrconf.xml\n" +
-                    "it is a special Handler configured directly by the RequestDispatcher");
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registryName, String tag, String scope) {
-    super.initializeMetrics(manager, registryName, tag, scope);
-    parallelExecutor = MetricUtils.instrumentedExecutorService(parallelExecutor, this, manager.registry(registryName),
-        SolrMetricManager.mkName("parallelCoreAdminExecutor", getCategory().name(),scope, "threadPool"));
-  }
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  /**
-   * The instance of CoreContainer this handler handles. This should be the CoreContainer instance that created this
-   * handler.
-   *
-   * @return a CoreContainer instance
-   */
-  public CoreContainer getCoreContainer() {
-    return this.coreContainer;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    // Make sure the cores is enabled
-    try {
-      CoreContainer cores = getCoreContainer();
-      if (cores == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-                "Core container instance missing");
-      }
-      //boolean doPersist = false;
-      final String taskId = req.getParams().get(CommonAdminParams.ASYNC);
-      final TaskObject taskObject = new TaskObject(taskId);
-
-      if(taskId != null) {
-        // Put the tasks into the maps for tracking
-        if (getRequestStatusMap(RUNNING).containsKey(taskId) || getRequestStatusMap(COMPLETED).containsKey(taskId) || getRequestStatusMap(FAILED).containsKey(taskId)) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Duplicate request with the same requestid found.");
-        }
-
-        addTask(RUNNING, taskObject);
-      }
-
-      // Pick the action
-      CoreAdminOperation op = opMap.get(req.getParams().get(ACTION, STATUS.toString()).toLowerCase(Locale.ROOT));
-      if (op == null) {
-        handleCustomAction(req, rsp);
-        return;
-      }
-
-      final CallInfo callInfo = new CallInfo(this, req, rsp, op);
-      String coreName = req.getParams().get(CoreAdminParams.CORE);
-      if (coreName == null) {
-        coreName = req.getParams().get(CoreAdminParams.NAME);
-      }
-      MDCLoggingContext.setCoreName(coreName);
-      if (taskId == null) {
-        callInfo.call();
-      } else {
-        try {
-          MDC.put("CoreAdminHandler.asyncId", taskId);
-          MDC.put("CoreAdminHandler.action", op.action.toString());
-          parallelExecutor.execute(() -> {
-            boolean exceptionCaught = false;
-            try {
-              callInfo.call();
-              taskObject.setRspObject(callInfo.rsp);
-            } catch (Exception e) {
-              exceptionCaught = true;
-              taskObject.setRspObjectFromException(e);
-            } finally {
-              removeTask("running", taskObject.taskId);
-              if (exceptionCaught) {
-                addTask("failed", taskObject, true);
-              } else
-                addTask("completed", taskObject, true);
-            }
-          });
-        } finally {
-          MDC.remove("CoreAdminHandler.asyncId");
-          MDC.remove("CoreAdminHandler.action");
-        }
-      }
-    } finally {
-      rsp.setHttpCaching(false);
-
-    }
-  }
-
-  /**
-   * Handle Custom Action.
-   * <p>
-   * This method could be overridden by derived classes to handle custom actions. <br> By default - this method throws a
-   * solr exception. Derived classes are free to write their derivation if necessary.
-   */
-  protected void handleCustomAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unsupported operation: " +
-            req.getParams().get(ACTION));
-  }
-
-  public static ImmutableMap<String, String> paramToProp = ImmutableMap.<String, String>builder()
-      .put(CoreAdminParams.CONFIG, CoreDescriptor.CORE_CONFIG)
-      .put(CoreAdminParams.SCHEMA, CoreDescriptor.CORE_SCHEMA)
-      .put(CoreAdminParams.DATA_DIR, CoreDescriptor.CORE_DATADIR)
-      .put(CoreAdminParams.ULOG_DIR, CoreDescriptor.CORE_ULOGDIR)
-      .put(CoreAdminParams.CONFIGSET, CoreDescriptor.CORE_CONFIGSET)
-      .put(CoreAdminParams.LOAD_ON_STARTUP, CoreDescriptor.CORE_LOADONSTARTUP)
-      .put(CoreAdminParams.TRANSIENT, CoreDescriptor.CORE_TRANSIENT)
-      .put(CoreAdminParams.SHARD, CoreDescriptor.CORE_SHARD)
-      .put(CoreAdminParams.COLLECTION, CoreDescriptor.CORE_COLLECTION)
-      .put(CoreAdminParams.ROLES, CoreDescriptor.CORE_ROLES)
-      .put(CoreAdminParams.CORE_NODE_NAME, CoreDescriptor.CORE_NODE_NAME)
-      .put(ZkStateReader.NUM_SHARDS_PROP, CloudDescriptor.NUM_SHARDS)
-      .put(CoreAdminParams.REPLICA_TYPE, CloudDescriptor.REPLICA_TYPE)
-      .build();
-
-  protected static Map<String, String> buildCoreParams(SolrParams params) {
-
-    Map<String, String> coreParams = new HashMap<>();
-
-    // standard core create parameters
-    for (String param : paramToProp.keySet()) {
-      String value = params.get(param, null);
-      if (StringUtils.isNotEmpty(value)) {
-        coreParams.put(paramToProp.get(param), value);
-      }
-    }
-
-    // extra properties
-    Iterator<String> paramsIt = params.getParameterNamesIterator();
-    while (paramsIt.hasNext()) {
-      String param = paramsIt.next();
-      if (param.startsWith(CoreAdminParams.PROPERTY_PREFIX)) {
-        String propName = param.substring(CoreAdminParams.PROPERTY_PREFIX.length());
-        String propValue = params.get(param);
-        coreParams.put(propName, propValue);
-      }
-      if (param.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-        coreParams.put(param, params.get(param));
-      }
-    }
-
-    return coreParams;
-  }
-
-
-
-  protected static String normalizePath(String path) {
-    if (path == null)
-      return null;
-    path = path.replace('/', File.separatorChar);
-    path = path.replace('\\', File.separatorChar);
-    return path;
-  }
-
-  public static ModifiableSolrParams params(String... params) {
-    ModifiableSolrParams msp = new ModifiableSolrParams();
-    for (int i=0; i<params.length; i+=2) {
-      msp.add(params[i], params[i+1]);
-    }
-    return msp;
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Manage Multiple Solr Cores";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext ctx) {
-    String action = ctx.getParams().get(CoreAdminParams.ACTION);
-    if (action == null) return CORE_READ_PERM;
-    CoreAdminParams.CoreAdminAction coreAction = CoreAdminParams.CoreAdminAction.get(action);
-    if (coreAction == null) return CORE_READ_PERM;
-    return coreAction.isRead ?
-        CORE_READ_PERM :
-        CORE_EDIT_PERM;
-  }
-
-  /**
-   * Helper class to manage the tasks to be tracked.
-   * This contains the taskId, request and the response (if available).
-   */
-  static class TaskObject {
-    String taskId;
-    String rspInfo;
-
-    public TaskObject(String taskId) {
-      this.taskId = taskId;
-    }
-
-    public String getRspObject() {
-      return rspInfo;
-    }
-
-    public void setRspObject(SolrQueryResponse rspObject) {
-      this.rspInfo = rspObject.getToLogAsString("TaskId: " + this.taskId);
-    }
-
-    public void setRspObjectFromException(Exception e) {
-      this.rspInfo = e.getMessage();
-    }
-  }
-
-  /**
-   * Helper method to add a task to a tracking type.
-   */
-  void addTask(String type, TaskObject o, boolean limit) {
-    synchronized (getRequestStatusMap(type)) {
-      if(limit && getRequestStatusMap(type).size() == MAX_TRACKED_REQUESTS) {
-        String key = getRequestStatusMap(type).entrySet().iterator().next().getKey();
-        getRequestStatusMap(type).remove(key);
-      }
-      addTask(type, o);
-    }
-  }
-
-
- private void addTask(String type, TaskObject o) {
-    synchronized (getRequestStatusMap(type)) {
-      getRequestStatusMap(type).put(o.taskId, o);
-    }
-  }
-
-  /**
-   * Helper method to remove a task from a tracking map.
-   */
-  private void removeTask(String map, String taskId) {
-    synchronized (getRequestStatusMap(map)) {
-      getRequestStatusMap(map).remove(taskId);
-    }
-  }
-
-  /**
-   * Helper method to get a request status map given the name.
-   */
-  Map<String, TaskObject> getRequestStatusMap(String key) {
-    return requestStatusMap.get(key);
-  }
-
-  /**
-   * Method to ensure shutting down of the ThreadPool Executor.
-   */
-  public void shutdown() {
-    if (parallelExecutor != null && !parallelExecutor.isShutdown())
-      ExecutorUtil.shutdownAndAwaitTermination(parallelExecutor);
-  }
-
-  private static final Map<String, CoreAdminOperation> opMap = new HashMap<>();
-
-
-  static class CallInfo {
-    final CoreAdminHandler handler;
-    final SolrQueryRequest req;
-    final SolrQueryResponse rsp;
-    final CoreAdminOperation op;
-
-    CallInfo(CoreAdminHandler handler, SolrQueryRequest req, SolrQueryResponse rsp, CoreAdminOperation op) {
-      this.handler = handler;
-      this.req = req;
-      this.rsp = rsp;
-      this.op = op;
-    }
-
-    void call() throws Exception {
-      op.execute(this);
-    }
-
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return coreAdminHandlerApi.getApis();
-  }
-
-  static {
-    for (CoreAdminOperation op : CoreAdminOperation.values())
-      opMap.put(op.action.toString().toLowerCase(Locale.ROOT), op);
-  }
-  /**
-   * used by the INVOKE action of core admin handler
-   */
-  public interface Invocable {
-    Map<String, Object> invoke(SolrQueryRequest req);
-  }
-  
-  interface CoreAdminOp {
-   /**
-    * @param it request/response object
-    *
-    * If the request is invalid throw a SolrException with SolrException.ErrorCode.BAD_REQUEST ( 400 )
-    * If the execution of the command fails throw a SolrException with SolrException.ErrorCode.SERVER_ERROR ( 500 )
-    * 
-    * Any non-SolrException's are wrapped at a higher level as a SolrException with SolrException.ErrorCode.SERVER_ERROR.
-    */
-    void execute(CallInfo it) throws Exception;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandlerApi.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandlerApi.java
deleted file mode 100644
index cb2623d..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandlerApi.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.request.CollectionApiMapping.CommandMeta;
-import org.apache.solr.client.solrj.request.CollectionApiMapping.V2EndPoint;
-import org.apache.solr.client.solrj.request.CoreApiMapping;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-public class CoreAdminHandlerApi extends BaseHandlerApiSupport {
-  private final CoreAdminHandler handler;
-  static Collection<ApiCommand> apiCommands = createMapping();
-
-  private static Collection<ApiCommand> createMapping() {
-    Map<CoreApiMapping.Meta, ApiCommand> result = new EnumMap<>(CoreApiMapping.Meta.class);
-
-    for (CoreApiMapping.Meta meta : CoreApiMapping.Meta.values()) {
-
-      for (CoreAdminOperation op : CoreAdminOperation.values()) {
-        if (op.action == meta.action) {
-          result.put(meta, new ApiCommand() {
-            @Override
-            public CommandMeta meta() {
-              return meta;
-            }
-
-            @Override
-            public void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) throws Exception {
-              op.execute(new CoreAdminHandler.CallInfo(((CoreAdminHandlerApi) apiHandler).handler,
-                  req,
-                  rsp,
-                  op));
-            }
-          });
-        }
-      }
-    }
-
-    for (CoreApiMapping.Meta meta : CoreApiMapping.Meta.values()) {
-      if (result.get(meta) == null) {
-        throw new RuntimeException("No implementation for " + meta.name());
-      }
-    }
-
-    return result.values();
-  }
-
-  public CoreAdminHandlerApi(CoreAdminHandler handler) {
-    this.handler = handler;
-  }
-
-
-  @Override
-  protected Collection<ApiCommand> getCommands() {
-    return apiCommands;
-  }
-
-  @Override
-  protected Collection<V2EndPoint> getEndPoints() {
-    return Arrays.asList(CoreApiMapping.EndPoint.values());
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
deleted file mode 100644
index 1ccc7d4..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData;
-import org.apache.solr.handler.admin.CoreAdminHandler.CoreAdminOp;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.NumberUtils;
-import org.apache.solr.util.PropertiesUtil;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.TestInjection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.CoreAdminParams.COLLECTION;
-import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.*;
-import static org.apache.solr.common.params.CoreAdminParams.REPLICA;
-import static org.apache.solr.common.params.CoreAdminParams.SHARD;
-import static org.apache.solr.handler.admin.CoreAdminHandler.COMPLETED;
-import static org.apache.solr.handler.admin.CoreAdminHandler.CallInfo;
-import static org.apache.solr.handler.admin.CoreAdminHandler.FAILED;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE_MESSAGE;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE_STATUS;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RUNNING;
-import static org.apache.solr.handler.admin.CoreAdminHandler.buildCoreParams;
-import static org.apache.solr.handler.admin.CoreAdminHandler.normalizePath;
-
-enum CoreAdminOperation implements CoreAdminOp {
-
-  CREATE_OP(CREATE, it -> {
-    assert TestInjection.injectRandomDelayInCoreCreation();
-
-    SolrParams params = it.req.getParams();
-    log().info("core create command {}", params);
-    String coreName = params.required().get(CoreAdminParams.NAME);
-    Map<String, String> coreParams = buildCoreParams(params);
-    CoreContainer coreContainer = it.handler.coreContainer;
-    Path instancePath = coreContainer.getCoreRootDirectory().resolve(coreName);
-
-    // TODO: Should we nuke setting odd instance paths?  They break core discovery, generally
-    String instanceDir = it.req.getParams().get(CoreAdminParams.INSTANCE_DIR);
-    if (instanceDir == null)
-      instanceDir = it.req.getParams().get("property.instanceDir");
-    if (instanceDir != null) {
-      instanceDir = PropertiesUtil.substituteProperty(instanceDir, coreContainer.getContainerProperties());
-      instancePath = coreContainer.getCoreRootDirectory().resolve(instanceDir).normalize();
-    }
-
-    boolean newCollection = params.getBool(CoreAdminParams.NEW_COLLECTION, false);
-
-    coreContainer.create(coreName, instancePath, coreParams, newCollection);
-
-    it.rsp.add("core", coreName);
-  }),
-  UNLOAD_OP(UNLOAD, it -> {
-    SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    boolean deleteIndexDir = params.getBool(CoreAdminParams.DELETE_INDEX, false);
-    boolean deleteDataDir = params.getBool(CoreAdminParams.DELETE_DATA_DIR, false);
-    boolean deleteInstanceDir = params.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, false);
-    boolean deleteMetricsHistory = params.getBool(CoreAdminParams.DELETE_METRICS_HISTORY, false);
-    CoreDescriptor cdescr = it.handler.coreContainer.getCoreDescriptor(cname);
-    it.handler.coreContainer.unload(cname, deleteIndexDir, deleteDataDir, deleteInstanceDir);
-    if (deleteMetricsHistory) {
-      MetricsHistoryHandler historyHandler = it.handler.coreContainer.getMetricsHistoryHandler();
-      if (historyHandler != null) {
-        CloudDescriptor cd = cdescr != null ? cdescr.getCloudDescriptor() : null;
-        String registry;
-        if (cd == null) {
-          registry = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, cname);
-        } else {
-          String replicaName = Utils.parseMetricsReplicaName(cd.getCollectionName(), cname);
-          registry = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core,
-              cd.getCollectionName(),
-              cd.getShardId(),
-              replicaName);
-        }
-        historyHandler.checkSystemCollection();
-        historyHandler.removeHistory(registry);
-      }
-    }
-
-    assert TestInjection.injectNonExistentCoreExceptionAfterUnload(cname);
-  }),
-  RELOAD_OP(RELOAD, it -> {
-    SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    it.handler.coreContainer.reload(cname);
-  }),
-  STATUS_OP(STATUS, new StatusOp()),
-  SWAP_OP(SWAP, it -> {
-    final SolrParams params = it.req.getParams();
-    final String cname = params.required().get(CoreAdminParams.CORE);
-    String other = params.required().get(CoreAdminParams.OTHER);
-    it.handler.coreContainer.swap(cname, other);
-  }),
-
-  RENAME_OP(RENAME, it -> {
-    SolrParams params = it.req.getParams();
-    String name = params.required().get(CoreAdminParams.OTHER);
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    if (cname.equals(name)) return;
-
-    it.handler.coreContainer.rename(cname, name);
-  }),
-
-  MERGEINDEXES_OP(MERGEINDEXES, new MergeIndexesOp()),
-
-  SPLIT_OP(SPLIT, new SplitOp()),
-
-  PREPRECOVERY_OP(PREPRECOVERY, new PrepRecoveryOp()),
-
-  REQUESTRECOVERY_OP(REQUESTRECOVERY, it -> {
-    final SolrParams params = it.req.getParams();
-    final String cname = params.required().get(CoreAdminParams.CORE);
-    log().info("It has been requested that we recover: core=" + cname);
-    
-    try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
-      if (core != null) {
-        // This can take a while, but doRecovery is already async so don't worry about it here
-        core.getUpdateHandler().getSolrCoreState().doRecovery(it.handler.coreContainer, core.getCoreDescriptor());
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to locate core " + cname);
-      }
-    }
-  }),
-  REQUESTSYNCSHARD_OP(REQUESTSYNCSHARD, new RequestSyncShardOp()),
-
-  REQUESTBUFFERUPDATES_OP(REQUESTBUFFERUPDATES, it -> {
-    SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.NAME);
-    log().info("Starting to buffer updates on core:" + cname);
-
-    try (SolrCore core = it.handler.coreContainer.getCore(cname)) {
-      if (core == null)
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Core [" + cname + "] does not exist");
-      UpdateLog updateLog = core.getUpdateHandler().getUpdateLog();
-      if (updateLog.getState() != UpdateLog.State.ACTIVE) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Core " + cname + " not in active state");
-      }
-      updateLog.bufferUpdates();
-      it.rsp.add("core", cname);
-      it.rsp.add("status", "BUFFERING");
-    } catch (Throwable e) {
-      if (e instanceof SolrException)
-        throw (SolrException) e;
-      else
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Could not start buffering updates", e);
-    } finally {
-      if (it.req != null) it.req.close();
-    }
-  }),
-  REQUESTAPPLYUPDATES_OP(REQUESTAPPLYUPDATES, new RequestApplyUpdatesOp()),
-
-  REQUESTSTATUS_OP(REQUESTSTATUS, it -> {
-    SolrParams params = it.req.getParams();
-    String requestId = params.required().get(CoreAdminParams.REQUESTID);
-    log().info("Checking request status for : " + requestId);
-
-    if (it.handler.getRequestStatusMap(RUNNING).containsKey(requestId)) {
-      it.rsp.add(RESPONSE_STATUS, RUNNING);
-    } else if (it.handler.getRequestStatusMap(COMPLETED).containsKey(requestId)) {
-      it.rsp.add(RESPONSE_STATUS, COMPLETED);
-      it.rsp.add(RESPONSE, it.handler.getRequestStatusMap(COMPLETED).get(requestId).getRspObject());
-    } else if (it.handler.getRequestStatusMap(FAILED).containsKey(requestId)) {
-      it.rsp.add(RESPONSE_STATUS, FAILED);
-      it.rsp.add(RESPONSE, it.handler.getRequestStatusMap(FAILED).get(requestId).getRspObject());
-    } else {
-      it.rsp.add(RESPONSE_STATUS, "notfound");
-      it.rsp.add(RESPONSE_MESSAGE, "No task found in running, completed or failed tasks");
-    }
-  }),
-
-  OVERSEEROP_OP(OVERSEEROP, it -> {
-    ZkController zkController = it.handler.coreContainer.getZkController();
-    if (zkController != null) {
-      String op = it.req.getParams().get("op");
-      String electionNode = it.req.getParams().get("electionNode");
-      if (electionNode != null) {
-        zkController.rejoinOverseerElection(electionNode, "rejoinAtHead".equals(op));
-      } else {
-        log().info("electionNode is required param");
-      }
-    }
-  }),
-
-  REJOINLEADERELECTION_OP(REJOINLEADERELECTION, it -> {
-    ZkController zkController = it.handler.coreContainer.getZkController();
-
-    if (zkController != null) {
-      zkController.rejoinShardLeaderElection(it.req.getParams());
-    } else {
-      log().warn("zkController is null in CoreAdminHandler.handleRequestInternal:REJOINLEADERELECTION. No action taken.");
-    }
-  }),
-  INVOKE_OP(INVOKE, new InvokeOp()),
-  BACKUPCORE_OP(BACKUPCORE, new BackupCoreOp()),
-  RESTORECORE_OP(RESTORECORE, new RestoreCoreOp()),
-  CREATESNAPSHOT_OP(CREATESNAPSHOT, new CreateSnapshotOp()),
-  DELETESNAPSHOT_OP(DELETESNAPSHOT, new DeleteSnapshotOp()),
-  LISTSNAPSHOTS_OP(LISTSNAPSHOTS, it -> {
-    final SolrParams params = it.req.getParams();
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    CoreContainer cc = it.handler.getCoreContainer();
-
-    try ( SolrCore core = cc.getCore(cname) ) {
-      if (core == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to locate core " + cname);
-      }
-
-      SolrSnapshotMetaDataManager mgr = core.getSnapshotMetaDataManager();
-      NamedList result = new NamedList();
-      for (String name : mgr.listSnapshots()) {
-        Optional<SnapshotMetaData> metadata = mgr.getSnapshotMetaData(name);
-        if ( metadata.isPresent() ) {
-          NamedList<String> props = new NamedList<>();
-          props.add(SolrSnapshotManager.GENERATION_NUM, String.valueOf(metadata.get().getGenerationNumber()));
-          props.add(SolrSnapshotManager.INDEX_DIR_PATH, metadata.get().getIndexDirPath());
-          result.add(name, props);
-        }
-      }
-      it.rsp.add(SolrSnapshotManager.SNAPSHOTS_INFO, result);
-    }
-  });
-
-  final CoreAdminParams.CoreAdminAction action;
-  final CoreAdminOp fun;
-
-  CoreAdminOperation(CoreAdminParams.CoreAdminAction action, CoreAdminOp fun) {
-    this.action = action;
-    this.fun = fun;
-  }
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  static Logger log() {
-    return log;
-  }
-
-
-
-
-  /**
-   * Returns the core status for a particular core.
-   * @param cores - the enclosing core container
-   * @param cname - the core to return
-   * @param isIndexInfoNeeded - add what may be expensive index information. NOT returned if the core is not loaded
-   * @return - a named list of key/value pairs from the core.
-   * @throws IOException - LukeRequestHandler can throw an I/O exception
-   */
-  static NamedList<Object> getCoreStatus(CoreContainer cores, String cname, boolean isIndexInfoNeeded) throws IOException {
-    NamedList<Object> info = new SimpleOrderedMap<>();
-
-    if (cores.isCoreLoading(cname)) {
-      info.add(NAME, cname);
-      info.add("isLoaded", "false");
-      info.add("isLoading", "true");
-    } else {
-      if (!cores.isLoaded(cname)) { // Lazily-loaded core, fill in what we can.
-        // It would be a real mistake to load the cores just to get the status
-        CoreDescriptor desc = cores.getUnloadedCoreDescriptor(cname);
-        if (desc != null) {
-          info.add(NAME, desc.getName());
-          info.add("instanceDir", desc.getInstanceDir());
-          // None of the following are guaranteed to be present in a not-yet-loaded core.
-          String tmp = desc.getDataDir();
-          if (StringUtils.isNotBlank(tmp)) info.add("dataDir", tmp);
-          tmp = desc.getConfigName();
-          if (StringUtils.isNotBlank(tmp)) info.add("config", tmp);
-          tmp = desc.getSchemaName();
-          if (StringUtils.isNotBlank(tmp)) info.add("schema", tmp);
-          info.add("isLoaded", "false");
-        }
-      } else {
-        try (SolrCore core = cores.getCore(cname)) {
-          if (core != null) {
-            info.add(NAME, core.getName());
-            info.add("instanceDir", core.getResourceLoader().getInstancePath().toString());
-            info.add("dataDir", normalizePath(core.getDataDir()));
-            info.add("config", core.getConfigResource());
-            info.add("schema", core.getSchemaResource());
-            info.add("startTime", core.getStartTimeStamp());
-            info.add("uptime", core.getUptimeMs());
-            if (cores.isZooKeeperAware()) {
-              info.add("lastPublished", core.getCoreDescriptor().getCloudDescriptor().getLastPublished().toString().toLowerCase(Locale.ROOT));
-              info.add("configVersion", core.getSolrConfig().getZnodeVersion());
-              SimpleOrderedMap cloudInfo = new SimpleOrderedMap<>();
-              cloudInfo.add(COLLECTION, core.getCoreDescriptor().getCloudDescriptor().getCollectionName());
-              cloudInfo.add(SHARD, core.getCoreDescriptor().getCloudDescriptor().getShardId());
-              cloudInfo.add(REPLICA, core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-              info.add("cloud", cloudInfo);
-            }
-            if (isIndexInfoNeeded) {
-              RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
-              try {
-                SimpleOrderedMap<Object> indexInfo = LukeRequestHandler.getIndexInfo(searcher.get().getIndexReader());
-                long size = core.getIndexSize();
-                indexInfo.add("sizeInBytes", size);
-                indexInfo.add("size", NumberUtils.readableSize(size));
-                info.add("index", indexInfo);
-              } finally {
-                searcher.decref();
-              }
-            }
-          }
-        }
-      }
-    }
-    return info;
-  }
-
-  @Override
-  public void execute(CallInfo it) throws Exception {
-    try {
-      fun.execute(it);
-    } catch (SolrException | InterruptedException e) {
-      // No need to re-wrap; throw as-is.
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error handling '" + action.name() + "' action", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/CreateSnapshotOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CreateSnapshotOp.java b/solr/core/src/java/org/apache/solr/handler/admin/CreateSnapshotOp.java
deleted file mode 100644
index cdf2c38..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/CreateSnapshotOp.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import org.apache.lucene.index.IndexCommit;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
-
-class CreateSnapshotOp implements CoreAdminHandler.CoreAdminOp {
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    final SolrParams params = it.req.getParams();
-    String commitName = params.required().get(CoreAdminParams.COMMIT_NAME);
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    CoreContainer cc = it.handler.getCoreContainer();
-
-    try (SolrCore core = cc.getCore(cname)) {
-      if (core == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to locate core " + cname);
-      }
-
-      String indexDirPath = core.getIndexDir();
-      IndexCommit ic = core.getDeletionPolicy().getLatestCommit();
-      if (ic == null) {
-        ic = core.withSearcher(searcher -> searcher.getIndexReader().getIndexCommit());
-      }
-      SolrSnapshotMetaDataManager mgr = core.getSnapshotMetaDataManager();
-      mgr.snapshot(commitName, indexDirPath, ic.getGeneration());
-
-      it.rsp.add(CoreAdminParams.CORE, core.getName());
-      it.rsp.add(CoreAdminParams.COMMIT_NAME, commitName);
-      it.rsp.add(SolrSnapshotManager.INDEX_DIR_PATH, indexDirPath);
-      it.rsp.add(SolrSnapshotManager.GENERATION_NUM, ic.getGeneration());
-      it.rsp.add(SolrSnapshotManager.FILE_LIST, ic.getFileNames());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/DeleteSnapshotOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/DeleteSnapshotOp.java b/solr/core/src/java/org/apache/solr/handler/admin/DeleteSnapshotOp.java
deleted file mode 100644
index ed1ec05..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/DeleteSnapshotOp.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-
-
-class DeleteSnapshotOp implements CoreAdminHandler.CoreAdminOp {
-
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    final SolrParams params = it.req.getParams();
-    String commitName = params.required().get(CoreAdminParams.COMMIT_NAME);
-    String cname = params.required().get(CoreAdminParams.CORE);
-
-    CoreContainer cc = it.handler.getCoreContainer();
-    SolrCore core = cc.getCore(cname);
-    if (core == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to locate core " + cname);
-    }
-
-    try {
-      core.deleteNamedSnapshot(commitName);
-      // Ideally we shouldn't need this. This is added since the RPC logic in
-      // OverseerCollectionMessageHandler can not provide the coreName as part of the result.
-      it.rsp.add(CoreAdminParams.CORE, core.getName());
-      it.rsp.add(CoreAdminParams.COMMIT_NAME, commitName);
-    } finally {
-      core.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/HealthCheckHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/HealthCheckHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/HealthCheckHandler.java
deleted file mode 100644
index 7b07a1e..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/HealthCheckHandler.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.FAILURE;
-import static org.apache.solr.common.params.CommonParams.OK;
-import static org.apache.solr.common.params.CommonParams.STATUS;
-
-/*
- * Health Check Handler for reporting the health of a specific node.
- *
- * This checks if the node is:
- * 1. Connected to zookeeper
- * 2. listed in 'live_nodes'.
- */
-public class HealthCheckHandler extends RequestHandlerBase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CoreContainer coreContainer;
-
-  public HealthCheckHandler(final CoreContainer coreContainer) {
-    super();
-    this.coreContainer = coreContainer;
-  }
-
-  @Override
-  final public void init(NamedList args) {
-
-  }
-
-  public CoreContainer getCoreContainer() {
-    return this.coreContainer;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-
-    log.debug("Invoked HealthCheckHandler on [{}]", coreContainer.getZkController().getNodeName());
-    CoreContainer cores = getCoreContainer();
-
-    if(cores == null) {
-      rsp.setException(new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Core container not initialized"));
-      return;
-    }
-    if(!cores.isZooKeeperAware()) {
-      //TODO: Support standalone instances
-      rsp.setException(new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Health check is only available when running in SolrCloud mode"));
-      return;
-    }
-    ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();
-    ClusterState clusterState = zkStateReader.getClusterState();
-    // Check for isConnected and isClosed
-    if(zkStateReader.getZkClient().isClosed() || !zkStateReader.getZkClient().isConnected()) {
-      rsp.add(STATUS, FAILURE);
-      rsp.setException(new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Host Unavailable: Not connected to zk"));
-      return;
-    }
-
-    // Set status to true if this node is in live_nodes
-    if (clusterState.getLiveNodes().contains(cores.getZkController().getNodeName())) {
-      rsp.add(STATUS, OK);
-    } else {
-      rsp.add(STATUS, FAILURE);
-      rsp.setException(new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Host Unavailable: Not in live nodes as per zk"));
-    }
-
-    rsp.setHttpCaching(false);
-
-    return;
-  }
-
-  @Override
-  public String getDescription() {
-    return "Health check handler for SolrCloud node";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
deleted file mode 100644
index a2bfb5b..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.util.Collection;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.solr.api.ApiBag.ReqHandlerToApi;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.api.Api;
-
-import static java.util.Collections.singletonList;
-import static org.apache.solr.common.util.Utils.getSpec;
-import static org.apache.solr.common.params.CommonParams.PATH;
-
-public class InfoHandler extends RequestHandlerBase  {
-
-  protected final CoreContainer coreContainer;
-
-  /**
-   * Overloaded ctor to inject CoreContainer into the handler.
-   *
-   * @param coreContainer Core Container of the solr webapp installed.
-   */
-  public InfoHandler(final CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-    handlers.put("threads", new ThreadDumpHandler());
-    handlers.put("properties", new PropertiesRequestHandler());
-    handlers.put("logging", new LoggingHandler(coreContainer));
-    handlers.put("system", new SystemInfoHandler(coreContainer));
-  }
-
-
-  @Override
-  final public void init(NamedList args) {
-
-  }
-
-  /**
-   * The instance of CoreContainer this handler handles. This should be the CoreContainer instance that created this
-   * handler.
-   *
-   * @return a CoreContainer instance
-   */
-  public CoreContainer getCoreContainer() {
-    return this.coreContainer;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    // Make sure the cores is enabled
-    CoreContainer cores = getCoreContainer();
-    if (cores == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Core container instance missing");
-    }
-
-    String path = (String) req.getContext().get(PATH);
-    handle(req, rsp, path);
-  }
-
-  private void handle(SolrQueryRequest req, SolrQueryResponse rsp, String path) {
-    int i = path.lastIndexOf('/');
-    String name = path.substring(i + 1, path.length());
-    RequestHandlerBase handler = handlers.get(name.toLowerCase(Locale.ROOT));
-    if(handler == null) {
-      throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "No handler by name "+name + " available names are "+ handlers.keySet());
-    }
-    handler.handleRequest(req, rsp);
-    rsp.setHttpCaching(false);
-  }
-
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "System Information";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  protected PropertiesRequestHandler getPropertiesHandler() {
-    return (PropertiesRequestHandler) handlers.get("properties");
-
-  }
-
-  protected ThreadDumpHandler getThreadDumpHandler() {
-    return (ThreadDumpHandler) handlers.get("threads");
-  }
-
-  protected LoggingHandler getLoggingHandler() {
-    return (LoggingHandler) handlers.get("logging");
-  }
-
-  protected SystemInfoHandler getSystemInfoHandler() {
-    return (SystemInfoHandler) handlers.get("system");
-  }
-
-  protected void setPropertiesHandler(PropertiesRequestHandler propertiesHandler) {
-    handlers.put("properties", propertiesHandler);
-  }
-
-  protected void setThreadDumpHandler(ThreadDumpHandler threadDumpHandler) {
-    handlers.put("threads", threadDumpHandler);
-  }
-
-  protected void setLoggingHandler(LoggingHandler loggingHandler) {
-    handlers.put("logging", loggingHandler);
-  }
-
-  protected void setSystemInfoHandler(SystemInfoHandler systemInfoHandler) {
-    handlers.put("system", systemInfoHandler);
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String subPath) {
-    return this;
-  }
-
-  private Map<String, RequestHandlerBase> handlers = new ConcurrentHashMap<>();
-
-  @Override
-  public Collection<Api> getApis() {
-    return singletonList(new ReqHandlerToApi(this, getSpec("node.Info")));
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/InvokeOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/InvokeOp.java b/solr/core/src/java/org/apache/solr/handler/admin/InvokeOp.java
deleted file mode 100644
index 04002fa..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/InvokeOp.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Map;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.request.SolrQueryRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class InvokeOp implements CoreAdminHandler.CoreAdminOp {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  static Map<String, Object> invokeAClass(SolrQueryRequest req, String c) {
-    SolrResourceLoader loader = null;
-    if (req.getCore() != null) loader = req.getCore().getResourceLoader();
-    else if (req.getContext().get(CoreContainer.class.getName()) != null) {
-      CoreContainer cc = (CoreContainer) req.getContext().get(CoreContainer.class.getName());
-      loader = cc.getResourceLoader();
-    }
-
-    CoreAdminHandler.Invocable invokable = loader.newInstance(c, CoreAdminHandler.Invocable.class);
-    Map<String, Object> result = invokable.invoke(req);
-    log.info("Invocable_invoked {}", result);
-    return result;
-  }
-
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    String[] klas = it.req.getParams().getParams("class");
-    if (klas == null || klas.length == 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "class is a required param");
-    }
-    for (String c : klas) {
-      Map<String, Object> result = invokeAClass(it.req, c);
-      it.rsp.add(c, result);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
deleted file mode 100644
index ef52636..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.logging.LogWatcher;
-import org.apache.solr.logging.LoggerInfo;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * A request handler to show which loggers are registered and allows you to set them
- *
- * @since 4.0
- */
-public class LoggingHandler extends RequestHandlerBase implements SolrCoreAware {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private LogWatcher watcher;
-  
-  public LoggingHandler(CoreContainer cc) {
-    this.watcher = cc.getLogging();
-  }
-  
-  public LoggingHandler() {
-    
-  }
-  
-  @Override
-  public void inform(SolrCore core) {
-    if (watcher == null) {
-      watcher = core.getCoreContainer().getLogging();
-    }
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    // Don't do anything if the framework is unknown
-    if (watcher==null) {
-      rsp.add("error", "Logging Not Initialized");
-      return;
-    }
-    rsp.add("watcher", watcher.getName());
-    
-    SolrParams params = req.getParams();
-    if(params.get("threshold")!=null) {
-      watcher.setThreshold(params.get("threshold"));
-    }
-    
-    // Write something at each level
-    if(params.get("test")!=null) {
-      log.trace("trace message");
-      log.debug( "debug message");
-      log.info("info (with exception)", new RuntimeException("test") );
-      log.warn("warn (with exception)", new RuntimeException("test") );
-      log.error("error (with exception)", new RuntimeException("test") );
-    }
-    
-    String[] set = params.getParams("set");
-    if (set != null) {
-      for (String pair : set) {
-        String[] split = pair.split(":");
-        if (split.length != 2) {
-          throw new SolrException(
-              SolrException.ErrorCode.SERVER_ERROR,
-              "Invalid format, expected level:value, got " + pair);
-        }
-        String category = split[0];
-        String level = split[1];
-
-        watcher.setLogLevel(category, level);
-      }
-    }
-    
-    String since = req.getParams().get("since");
-    if(since != null) {
-      long time = -1;
-      try {
-        time = Long.parseLong(since);
-      }
-      catch(Exception ex) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "invalid timestamp: "+since);
-      }
-      AtomicBoolean found = new AtomicBoolean(false);
-      SolrDocumentList docs = watcher.getHistory(time, found);
-      if(docs==null) {
-        rsp.add("error", "History not enabled");
-        return;
-      }
-      else {
-        SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
-        if(time>0) {
-          info.add("since", time);
-          info.add("found", found.get());
-        }
-        else {
-          info.add("levels", watcher.getAllLevels()); // show for the first request
-        }
-        info.add("last", watcher.getLastEvent());
-        info.add("buffer", watcher.getHistorySize());
-        info.add("threshold", watcher.getThreshold());
-        
-        rsp.add("info", info);
-        rsp.add("history", docs);
-      }
-    }
-    else {
-      rsp.add("levels", watcher.getAllLevels());
-  
-      List<LoggerInfo> loggers = new ArrayList<>(watcher.getAllLoggers());
-      Collections.sort(loggers);
-  
-      List<SimpleOrderedMap<?>> info = new ArrayList<>();
-      for (LoggerInfo wrap : loggers) {
-        info.add(wrap.getInfo());
-      }
-      rsp.add("loggers", info);
-    }
-    rsp.setHttpCaching(false);
-  }
-
-  // ////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Logging Handler";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-}
\ No newline at end of file


[11/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
deleted file mode 100644
index 510ef48..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import com.google.common.collect.ImmutableList;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.RequestHandlerUtils;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthenticationPlugin;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.AuthorizationPlugin;
-import org.apache.solr.security.ConfigEditablePlugin;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag.ReqHandlerToApi;
-import org.apache.solr.common.SpecProvider;
-import org.apache.solr.common.util.JsonSchemaValidator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
-
-public abstract class SecurityConfHandler extends RequestHandlerBase implements PermissionNameProvider {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected CoreContainer cores;
-
-  public SecurityConfHandler(CoreContainer coreContainer) {
-    this.cores = coreContainer;
-  }
-
-  @Override
-  public PermissionNameProvider.Name getPermissionName(AuthorizationContext ctx) {
-    switch (ctx.getHttpMethod()) {
-      case "GET":
-        return PermissionNameProvider.Name.SECURITY_READ_PERM;
-      case "POST":
-        return PermissionNameProvider.Name.SECURITY_EDIT_PERM;
-      default:
-        return null;
-    }
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    RequestHandlerUtils.setWt(req, CommonParams.JSON);
-    String httpMethod = (String) req.getContext().get("httpMethod");
-    String path = (String) req.getContext().get("path");
-    String key = path.substring(path.lastIndexOf('/')+1);
-    if ("GET".equals(httpMethod)) {
-      getConf(rsp, key);
-    } else if ("POST".equals(httpMethod)) {
-      Object plugin = getPlugin(key);
-      doEdit(req, rsp, path, key, plugin);
-    }
-  }
-
-  private void doEdit(SolrQueryRequest req, SolrQueryResponse rsp, String path, final String key, final Object plugin)
-      throws IOException {
-    ConfigEditablePlugin configEditablePlugin = null;
-
-    if (plugin == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No " + key + " plugin configured");
-    }
-    if (plugin instanceof ConfigEditablePlugin) {
-      configEditablePlugin = (ConfigEditablePlugin) plugin;
-    } else {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, key + " plugin is not editable");
-    }
-
-    if (req.getContentStreams() == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No contentStream");
-    }
-    List<CommandOperation> ops = CommandOperation.readCommands(req.getContentStreams(), rsp.getValues());
-    if (ops == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No commands");
-    }
-    for (int count = 1; count <= 3 ; count++ ) {
-      SecurityConfig securityConfig = getSecurityConfig(true);
-      Map<String, Object> data = securityConfig.getData();
-      Map<String, Object> latestConf = (Map<String, Object>) data.get(key);
-      if (latestConf == null) {
-        throw new SolrException(SERVER_ERROR, "No configuration present for " + key);
-      }
-      List<CommandOperation> commandsCopy = CommandOperation.clone(ops);
-      Map<String, Object> out = configEditablePlugin.edit(Utils.getDeepCopy(latestConf, 4) , commandsCopy);
-      if (out == null) {
-        List<Map> errs = CommandOperation.captureErrors(commandsCopy);
-        if (!errs.isEmpty()) {
-          rsp.add(CommandOperation.ERR_MSGS, errs);
-          return;
-        }
-        log.debug("No edits made");
-        return;
-      } else {
-        if(!Objects.equals(latestConf.get("class") , out.get("class"))){
-          throw new SolrException(SERVER_ERROR, "class cannot be modified");
-        }
-        Map meta = getMapValue(out, "");
-        meta.put("v", securityConfig.getVersion()+1);//encode the expected zkversion
-        data.put(key, out);
-        
-        if(persistConf(securityConfig)) {
-          securityConfEdited();
-          return;
-        }
-      }
-      log.debug("Security edit operation failed {} time(s)" + count);
-    }
-    throw new SolrException(SERVER_ERROR, "Failed to persist security config after 3 attempts. Giving up");
-  }
-
-  /**
-   * Hook where you can do stuff after a config has been edited. Defaults to NOP
-   */
-  protected void securityConfEdited() {}
-
-  Object getPlugin(String key) {
-    Object plugin = null;
-    if ("authentication".equals(key)) plugin = cores.getAuthenticationPlugin();
-    if ("authorization".equals(key)) plugin = cores.getAuthorizationPlugin();
-    return plugin;
-  }
-
-  protected abstract void getConf(SolrQueryResponse rsp, String key);
-
-  public static Map<String, Object> getMapValue(Map<String, Object> lookupMap, String key) {
-    Map<String, Object> m = (Map<String, Object>) lookupMap.get(key);
-    if (m == null) lookupMap.put(key, m = new LinkedHashMap<>());
-    return m;
-  }
-
-  public static List getListValue(Map<String, Object> lookupMap, String key) {
-    List l = (List) lookupMap.get(key);
-    if (l == null) lookupMap.put(key, l= new ArrayList());
-    return l;
-  }
-
-  @Override
-  public String getDescription() {
-    return "Edit or read security configuration";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  /**
-   * Gets security.json from source
-   */
-  public abstract SecurityConfig getSecurityConfig(boolean getFresh);
-
-  /**
-   * Persist security.json to the source, optionally with a version
-   */
-  protected abstract boolean persistConf(SecurityConfig securityConfig) throws IOException;
-
-  /**
-   * Object to hold security.json as nested <code>Map&lt;String,Object&gt;</code> and optionally its version.
-   * The version property is optional and defaults to -1 if not initialized.
-   * The data object defaults to EMPTY_MAP if not set
-   */
-  public static class SecurityConfig {
-    private Map<String, Object> data = Collections.EMPTY_MAP;
-    private int version = -1;
-
-    public SecurityConfig() {}
-
-    /**
-     * Sets the data as a Map
-     * @param data a Map
-     * @return SecurityConf object (builder pattern)
-     */
-    public SecurityConfig setData(Map<String, Object> data) {
-      this.data = data;
-      return this;
-    }
-
-    /**
-     * Sets the data as an Object, but the object needs to be of type Map
-     * @param data an Object of type Map&lt;String,Object&gt;
-     * @return SecurityConf object (builder pattern)
-     */
-    public SecurityConfig setData(Object data) {
-      if (data instanceof Map) {
-        this.data = (Map<String, Object>) data;
-        return this;
-      } else {
-        throw new SolrException(SERVER_ERROR, "Illegal format when parsing security.json, not object");
-      }
-    }
-
-    /**
-     * Sets version
-     * @param version integer for version. Depends on underlying storage
-     * @return SecurityConf object (builder pattern)
-     */
-    public SecurityConfig setVersion(int version) {
-      this.version = version;
-      return this;
-    }
-
-    public Map<String, Object> getData() {
-      return data;
-    }
-
-    public int getVersion() {
-      return version;
-    }
-
-    /**
-     * Set data from input stream
-     * @param securityJsonInputStream an input stream for security.json
-     * @return this (builder pattern)
-     */
-    public SecurityConfig setData(InputStream securityJsonInputStream) {
-      return setData(Utils.fromJSON(securityJsonInputStream));
-    }
-
-    public String toString() {
-      return "SecurityConfig: version=" + version + ", data=" + Utils.toJSONString(data);
-    } 
-  }
-
-  private Collection<Api> apis;
-  private AuthenticationPlugin authcPlugin;
-  private AuthorizationPlugin authzPlugin;
-
-  @Override
-  public Collection<Api> getApis() {
-    if (apis == null) {
-      synchronized (this) {
-        if (apis == null) {
-          Collection<Api> apis = new ArrayList<>();
-          final SpecProvider authcCommands = Utils.getSpec("cluster.security.authentication.Commands");
-          final SpecProvider authzCommands = Utils.getSpec("cluster.security.authorization.Commands");
-          apis.add(new ReqHandlerToApi(this, Utils.getSpec("cluster.security.authentication")));
-          apis.add(new ReqHandlerToApi(this, Utils.getSpec("cluster.security.authorization")));
-          SpecProvider authcSpecProvider = () -> {
-            AuthenticationPlugin authcPlugin = cores.getAuthenticationPlugin();
-            return authcPlugin != null && authcPlugin instanceof SpecProvider ?
-                ((SpecProvider) authcPlugin).getSpec() :
-                authcCommands.getSpec();
-          };
-
-          apis.add(new ReqHandlerToApi(this, authcSpecProvider) {
-            @Override
-            public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
-              //it is possible that the Authentication plugin is modified since the last call. invalidate the
-              // the cached commandSchema
-              if(SecurityConfHandler.this.authcPlugin != cores.getAuthenticationPlugin()) commandSchema = null;
-              SecurityConfHandler.this.authcPlugin = cores.getAuthenticationPlugin();
-              return super.getCommandSchema();
-            }
-          });
-
-          SpecProvider authzSpecProvider = () -> {
-            AuthorizationPlugin authzPlugin = cores.getAuthorizationPlugin();
-            return authzPlugin != null && authzPlugin instanceof SpecProvider ?
-                ((SpecProvider) authzPlugin).getSpec() :
-                authzCommands.getSpec();
-          };
-          apis.add(new ApiBag.ReqHandlerToApi(this, authzSpecProvider) {
-            @Override
-            public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
-              //it is possible that the Authorization plugin is modified since the last call. invalidate the
-              // the cached commandSchema
-              if(SecurityConfHandler.this.authzPlugin != cores.getAuthorizationPlugin()) commandSchema = null;
-              SecurityConfHandler.this.authzPlugin = cores.getAuthorizationPlugin();
-              return super.getCommandSchema();
-            }
-          });
-
-          this.apis = ImmutableList.copyOf(apis);
-        }
-      }
-    }
-    return this.apis;
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerLocal.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerLocal.java b/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerLocal.java
deleted file mode 100644
index 69ae3ed..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerLocal.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.common.util.CommandOperation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Security Configuration Handler which works on standalone local files
- */
-public class SecurityConfHandlerLocal extends SecurityConfHandler {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected Path securityJsonPath;
-  
-  public SecurityConfHandlerLocal(CoreContainer coreContainer) {
-    super(coreContainer);
-    securityJsonPath = Paths.get(coreContainer.getSolrHome()).resolve("security.json");
-  }
-
-  /**
-   * Fetches security props from SOLR_HOME
-   * @param getFresh NOP
-   * @return SecurityConfig whose data property either contains security.json, or an empty map if not found
-   */
-  @Override
-  public SecurityConfig getSecurityConfig(boolean getFresh) {
-    if (Files.exists(securityJsonPath)) {
-      try (InputStream securityJsonIs = Files.newInputStream(securityJsonPath)) {
-        return new SecurityConfig().setData(securityJsonIs);
-      } catch (Exception e) { 
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed opening existing security.json file: " + securityJsonPath, e);
-      }
-    }
-    return new SecurityConfig();
-  }
-
-  @Override
-  protected void getConf(SolrQueryResponse rsp, String key) {
-    SecurityConfig props = getSecurityConfig(false);
-    Object o = props.getData().get(key);
-    if (o == null) {
-      rsp.add(CommandOperation.ERR_MSGS, Collections.singletonList("No " + key + " configured"));
-    } else {
-      rsp.add(key+".enabled", getPlugin(key)!=null);
-      rsp.add(key, o);
-    }
-  }
-  
-  @Override
-  protected boolean persistConf(SecurityConfig securityConfig) throws IOException {
-    if (securityConfig == null || securityConfig.getData().isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-          "Failed persisting security.json to SOLR_HOME. Object was empty.");
-    }
-    try(OutputStream securityJsonOs = Files.newOutputStream(securityJsonPath)) {
-      securityJsonOs.write(Utils.toJSON(securityConfig.getData()));
-      log.debug("Persisted security.json to {}", securityJsonPath);
-      return true;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-          "Failed persisting security.json to " + securityJsonPath, e);
-    }
-  }
-
-  @Override
-  public String getDescription() {
-    return "Edit or read security configuration locally in SOLR_HOME";
-  }
-
-  @Override
-  protected void securityConfEdited() {
-    // Need to call explicitly since we will not get notified of changes to local security.json
-    cores.securityNodeChanged();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerZk.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerZk.java b/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerZk.java
deleted file mode 100644
index 9e77fe3..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandlerZk.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
-import static org.apache.solr.common.cloud.ZkStateReader.SOLR_SECURITY_CONF_PATH;
-
-/**
- * Security Configuration Handler which works with Zookeeper
- */
-public class SecurityConfHandlerZk extends SecurityConfHandler {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public SecurityConfHandlerZk(CoreContainer coreContainer) {
-    super(coreContainer);
-  }
-
-  /**
-   * Fetches security props from Zookeeper and adds version
-   * @param getFresh refresh from ZK
-   * @return SecurityConfig whose data property either contains security.json, or an empty map if not found
-   */
-  @Override
-  public SecurityConfig getSecurityConfig(boolean getFresh) {
-    ZkStateReader.ConfigData configDataFromZk = cores.getZkController().getZkStateReader().getSecurityProps(getFresh);
-    return configDataFromZk == null ? 
-        new SecurityConfig() :
-        new SecurityConfig().setData(configDataFromZk.data).setVersion(configDataFromZk.version);
-  }
-
-  @Override
-  protected void getConf(SolrQueryResponse rsp, String key) {
-    ZkStateReader.ConfigData map = cores.getZkController().getZkStateReader().getSecurityProps(false);
-    Object o = map == null ? null : map.data.get(key);
-    if (o == null) {
-      rsp.add(CommandOperation.ERR_MSGS, Collections.singletonList("No " + key + " configured"));
-    } else {
-      rsp.add(key+".enabled", getPlugin(key)!=null);
-      rsp.add(key, o);
-    }
-  }
-  
-  @Override
-  protected boolean persistConf(SecurityConfig securityConfig) throws IOException {
-    try {
-      cores.getZkController().getZkClient().setData(SOLR_SECURITY_CONF_PATH, 
-          Utils.toJSON(securityConfig.getData()), 
-          securityConfig.getVersion(), true);
-      log.debug("Persisted security.json to {}", SOLR_SECURITY_CONF_PATH);
-      return true;
-    } catch (KeeperException.BadVersionException bdve){
-      log.warn("Failed persisting security.json to {}", SOLR_SECURITY_CONF_PATH, bdve);
-      return false;
-    } catch (Exception e) {
-      throw new SolrException(SERVER_ERROR, "Unable to persist security.json", e);
-    }
-  }
-  
-  @Override
-  public String getDescription() {
-    return "Edit or read security configuration from Zookeeper";
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
deleted file mode 100644
index 740280b..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.MergePolicy;
-import org.apache.lucene.index.MergePolicy.MergeSpecification;
-import org.apache.lucene.index.MergePolicy.OneMerge;
-import org.apache.lucene.index.MergeTrigger;
-import org.apache.lucene.index.SegmentCommitInfo;
-import org.apache.lucene.index.SegmentInfos;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.RefCounted;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * This handler exposes information about last commit generation segments
- */
-public class SegmentsInfoRequestHandler extends RequestHandlerBase {
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp)
-      throws Exception {
-    rsp.add("segments", getSegmentsInfo(req, rsp));
-    rsp.setHttpCaching(false);
-  }
-
-  private SimpleOrderedMap<Object> getSegmentsInfo(SolrQueryRequest req, SolrQueryResponse rsp)
-      throws Exception {
-    SolrIndexSearcher searcher = req.getSearcher();
-
-    SegmentInfos infos =
-        SegmentInfos.readLatestCommit(searcher.getIndexReader().directory());
-
-    List<String> mergeCandidates = getMergeCandidatesNames(req, infos);
-
-    SimpleOrderedMap<Object> segmentInfos = new SimpleOrderedMap<>();
-    SimpleOrderedMap<Object> segmentInfo = null;
-    List<SegmentCommitInfo> sortable = new ArrayList<>();
-    sortable.addAll(infos.asList());
-    // Order by the number of live docs. The display is logarithmic so it is a little jumbled visually
-    sortable.sort((s1, s2) -> {
-      return (s2.info.maxDoc() - s2.getDelCount()) - (s1.info.maxDoc() - s1.getDelCount());
-    });
-    for (SegmentCommitInfo segmentCommitInfo : sortable) {
-      segmentInfo = getSegmentInfo(segmentCommitInfo);
-      if (mergeCandidates.contains(segmentCommitInfo.info.name)) {
-        segmentInfo.add("mergeCandidate", true);
-      }
-      segmentInfos.add((String) segmentInfo.get(NAME), segmentInfo);
-    }
-
-    return segmentInfos;
-  }
-
-  private SimpleOrderedMap<Object> getSegmentInfo(
-      SegmentCommitInfo segmentCommitInfo) throws IOException {
-    SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();
-
-    segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
-    segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
-    segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
-    segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
-    Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
-        .get("timestamp"));
-    segmentInfoMap.add("age", new Date(timestamp));
-    segmentInfoMap.add("source",
-        segmentCommitInfo.info.getDiagnostics().get("source"));
-    segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
-
-    return segmentInfoMap;
-  }
-
-  private List<String> getMergeCandidatesNames(SolrQueryRequest req, SegmentInfos infos) throws IOException {
-    List<String> result = new ArrayList<String>();
-    RefCounted<IndexWriter> refCounted = req.getCore().getSolrCoreState().getIndexWriter(req.getCore());
-    try {
-      IndexWriter indexWriter = refCounted.get();
-      //get chosen merge policy
-      MergePolicy mp = indexWriter.getConfig().getMergePolicy();
-      //Find merges
-      MergeSpecification findMerges = mp.findMerges(MergeTrigger.EXPLICIT, infos, indexWriter);
-      if (findMerges != null && findMerges.merges != null && findMerges.merges.size() > 0) {
-        for (OneMerge merge : findMerges.merges) {
-          //TODO: add merge grouping
-          for (SegmentCommitInfo mergeSegmentInfo : merge.segments) {
-            result.add(mergeSegmentInfo.info.name);
-          }
-        }
-      }
-
-      return result;
-    } finally {
-      refCounted.decref();
-    }
-  }
-
-  @Override
-  public String getDescription() {
-    return "Lucene segments info.";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
deleted file mode 100644
index 02577f1..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStreamBase;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.RawResponseWriter;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.lang.invoke.MethodHandles;
-import java.net.URISyntaxException;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Set;
-
-/**
- * This handler uses the RawResponseWriter to give client access to
- * files inside ${solr.home}/conf
- * <p>
- * If you want to selectively restrict access some configuration files, you can list
- * these files in the {@link #HIDDEN} invariants.  For example to hide 
- * synonyms.txt and anotherfile.txt, you would register:
- * <br>
- * <pre>
- * &lt;requestHandler name="/admin/file" class="org.apache.solr.handler.admin.ShowFileRequestHandler" &gt;
- *   &lt;lst name="defaults"&gt;
- *    &lt;str name="echoParams"&gt;explicit&lt;/str&gt;
- *   &lt;/lst&gt;
- *   &lt;lst name="invariants"&gt;
- *    &lt;str name="hidden"&gt;synonyms.txt&lt;/str&gt; 
- *    &lt;str name="hidden"&gt;anotherfile.txt&lt;/str&gt;
- *    &lt;str name="hidden"&gt;*&lt;/str&gt;
- *   &lt;/lst&gt;
- * &lt;/requestHandler&gt;
- * </pre>
- *
- * At present, there is only explicit file names (including path) or the glob '*' are supported. Variants like '*.xml'
- * are NOT supported.ere
- *
- * <p>
- * The ShowFileRequestHandler uses the {@link RawResponseWriter} (wt=raw) to return
- * file contents.  If you need to use a different writer, you will need to change 
- * the registered invariant param for wt.
- * <p>
- * If you want to override the contentType header returned for a given file, you can
- * set it directly using: {@link #USE_CONTENT_TYPE}.  For example, to get a plain text
- * version of schema.xml, try:
- * <pre>
- *   http://localhost:8983/solr/admin/file?file=schema.xml&amp;contentType=text/plain
- * </pre>
- *
- *
- * @since solr 1.3
- */
-public class ShowFileRequestHandler extends RequestHandlerBase
-{
-  public static final String HIDDEN = "hidden";
-  public static final String USE_CONTENT_TYPE = "contentType";
-
-  protected Set<String> hiddenFiles;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-
-  public ShowFileRequestHandler()
-  {
-    super();
-  }
-
-  @Override
-  public void init(NamedList args) {
-    super.init( args );
-    hiddenFiles = initHidden(invariants);
-  }
-
-  public static Set<String> initHidden(SolrParams invariants) {
-
-    Set<String> hiddenRet = new HashSet<>();
-    // Build a list of hidden files
-    if (invariants != null) {
-      String[] hidden = invariants.getParams(HIDDEN);
-      if (hidden != null) {
-        for (String s : hidden) {
-          hiddenRet.add(s.toUpperCase(Locale.ROOT));
-        }
-      }
-    }
-    return hiddenRet;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp)
-      throws InterruptedException, KeeperException, IOException {
-
-    CoreContainer coreContainer = req.getCore().getCoreContainer();
-    if (coreContainer.isZooKeeperAware()) {
-      showFromZooKeeper(req, rsp, coreContainer);
-    } else {
-      showFromFileSystem(req, rsp);
-    }
-  }
-
-  // Get a list of files from ZooKeeper for from the path in the file= parameter.
-  private void showFromZooKeeper(SolrQueryRequest req, SolrQueryResponse rsp,
-      CoreContainer coreContainer) throws KeeperException,
-      InterruptedException, UnsupportedEncodingException {
-
-    SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
-
-    String adminFile = getAdminFileFromZooKeeper(req, rsp, zkClient, hiddenFiles);
-
-    if (adminFile == null) {
-      return;
-    }
-
-    // Show a directory listing
-    List<String> children = zkClient.getChildren(adminFile, null, true);
-    if (children.size() > 0) {
-      
-      NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<>();
-      for (String f : children) {
-        if (isHiddenFile(req, rsp, f, false, hiddenFiles)) {
-          continue;
-        }
-
-        SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<>();
-        files.add(f, fileInfo);
-        List<String> fchildren = zkClient.getChildren(adminFile + "/" + f, null, true);
-        if (fchildren.size() > 0) {
-          fileInfo.add("directory", true);
-        } else {
-          // TODO? content type
-          fileInfo.add("size", f.length());
-        }
-        // TODO: ?
-        // fileInfo.add( "modified", new Date( f.lastModified() ) );
-      }
-      rsp.add("files", files);
-    } else {
-      // Include the file contents
-      // The file logic depends on RawResponseWriter, so force its use.
-      ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
-      params.set(CommonParams.WT, "raw");
-      req.setParams(params);
-      ContentStreamBase content = new ContentStreamBase.ByteArrayStream(zkClient.getData(adminFile, null, null, true), adminFile);
-      content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
-      
-      rsp.add(RawResponseWriter.CONTENT, content);
-    }
-    rsp.setHttpCaching(false);
-  }
-
-  // Return the file indicated (or the directory listing) from the local file system.
-  private void showFromFileSystem(SolrQueryRequest req, SolrQueryResponse rsp) {
-    File adminFile = getAdminFileFromFileSystem(req, rsp, hiddenFiles);
-
-    if (adminFile == null) { // exception already recorded
-      return;
-    }
-
-    // Make sure the file exists, is readable and is not a hidden file
-    if( !adminFile.exists() ) {
-      log.error("Can not find: "+adminFile.getName() + " ["+adminFile.getAbsolutePath()+"]");
-      rsp.setException(new SolrException
-                       ( ErrorCode.NOT_FOUND, "Can not find: "+adminFile.getName() 
-                         + " ["+adminFile.getAbsolutePath()+"]" ));
-      return;
-    }
-    if( !adminFile.canRead() || adminFile.isHidden() ) {
-      log.error("Can not show: "+adminFile.getName() + " ["+adminFile.getAbsolutePath()+"]");
-      rsp.setException(new SolrException
-                       ( ErrorCode.NOT_FOUND, "Can not show: "+adminFile.getName() 
-                         + " ["+adminFile.getAbsolutePath()+"]" ));
-      return;
-    }
-    
-    // Show a directory listing
-    if( adminFile.isDirectory() ) {
-      // it's really a directory, just go for it.
-      int basePath = adminFile.getAbsolutePath().length() + 1;
-      NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<>();
-      for( File f : adminFile.listFiles() ) {
-        String path = f.getAbsolutePath().substring( basePath );
-        path = path.replace( '\\', '/' ); // normalize slashes
-
-        if (isHiddenFile(req, rsp, f.getName().replace('\\', '/'), false, hiddenFiles)) {
-          continue;
-        }
-
-        SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<>();
-        files.add( path, fileInfo );
-        if( f.isDirectory() ) {
-          fileInfo.add( "directory", true ); 
-        }
-        else {
-          // TODO? content type
-          fileInfo.add( "size", f.length() );
-        }
-        fileInfo.add( "modified", new Date( f.lastModified() ) );
-      }
-      rsp.add("files", files);
-    }
-    else {
-      // Include the file contents
-      //The file logic depends on RawResponseWriter, so force its use.
-      ModifiableSolrParams params = new ModifiableSolrParams( req.getParams() );
-      params.set( CommonParams.WT, "raw" );
-      req.setParams(params);
-
-      ContentStreamBase content = new ContentStreamBase.FileStream( adminFile );
-      content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
-
-      rsp.add(RawResponseWriter.CONTENT, content);
-    }
-    rsp.setHttpCaching(false);
-  }
-
-  //////////////////////// Static methods //////////////////////////////
-
-  public static boolean isHiddenFile(SolrQueryRequest req, SolrQueryResponse rsp, String fnameIn, boolean reportError,
-                                     Set<String> hiddenFiles) {
-    String fname = fnameIn.toUpperCase(Locale.ROOT);
-    if (hiddenFiles.contains(fname) || hiddenFiles.contains("*")) {
-      if (reportError) {
-        log.error("Cannot access " + fname);
-        rsp.setException(new SolrException(SolrException.ErrorCode.FORBIDDEN, "Can not access: " + fnameIn));
-      }
-      return true;
-    }
-
-    // This is slightly off, a valid path is something like ./schema.xml. I don't think it's worth the effort though
-    // to fix it to handle all possibilities though.
-    if (fname.indexOf("..") >= 0 || fname.startsWith(".")) {
-      if (reportError) {
-        log.error("Invalid path: " + fname);
-        rsp.setException(new SolrException(SolrException.ErrorCode.FORBIDDEN, "Invalid path: " + fnameIn));
-      }
-      return true;
-    }
-    return false;
-  }
-
-  // Refactored to be usable from multiple methods. Gets the path of the requested file from ZK.
-  // Returns null if the file is not found.
-  //
-  // Assumes that the file is in a parameter called "file".
-
-  public static String getAdminFileFromZooKeeper(SolrQueryRequest req, SolrQueryResponse rsp, SolrZkClient zkClient,
-                                                 Set<String> hiddenFiles)
-      throws KeeperException, InterruptedException {
-    String adminFile = null;
-    SolrCore core = req.getCore();
-
-    final ZkSolrResourceLoader loader = (ZkSolrResourceLoader) core
-        .getResourceLoader();
-    String confPath = loader.getConfigSetZkPath();
-
-    String fname = req.getParams().get("file", null);
-    if (fname == null) {
-      adminFile = confPath;
-    } else {
-      fname = fname.replace('\\', '/'); // normalize slashes
-      if (isHiddenFile(req, rsp, fname, true, hiddenFiles)) {
-        return null;
-      }
-      if (fname.startsWith("/")) { // Only files relative to conf are valid
-        fname = fname.substring(1);
-      }
-      adminFile = confPath + "/" + fname;
-    }
-
-    // Make sure the file exists, is readable and is not a hidden file
-    if (!zkClient.exists(adminFile, true)) {
-      log.error("Can not find: " + adminFile);
-      rsp.setException(new SolrException(SolrException.ErrorCode.NOT_FOUND, "Can not find: "
-          + adminFile));
-      return null;
-    }
-
-    return adminFile;
-  }
-
-
-  // Find the file indicated by the "file=XXX" parameter or the root of the conf directory on the local
-  // file system. Respects all the "interesting" stuff around what the resource loader does to find files.
-  public static File getAdminFileFromFileSystem(SolrQueryRequest req, SolrQueryResponse rsp,
-                                                Set<String> hiddenFiles) {
-    File adminFile = null;
-    final SolrResourceLoader loader = req.getCore().getResourceLoader();
-    File configdir = new File( loader.getConfigDir() );
-    if (!configdir.exists()) {
-      // TODO: maybe we should just open it this way to start with?
-      try {
-        configdir = new File( loader.getClassLoader().getResource(loader.getConfigDir()).toURI() );
-      } catch (URISyntaxException e) {
-        log.error("Can not access configuration directory!");
-        rsp.setException(new SolrException( SolrException.ErrorCode.FORBIDDEN, "Can not access configuration directory!", e));
-        return null;
-      }
-    }
-    String fname = req.getParams().get("file", null);
-    if( fname == null ) {
-      adminFile = configdir;
-    }
-    else {
-      fname = fname.replace( '\\', '/' ); // normalize slashes
-      if( hiddenFiles.contains( fname.toUpperCase(Locale.ROOT) ) ) {
-        log.error("Can not access: "+ fname);
-        rsp.setException(new SolrException( SolrException.ErrorCode.FORBIDDEN, "Can not access: "+fname ));
-        return null;
-      }
-      if( fname.indexOf( ".." ) >= 0 ) {
-        log.error("Invalid path: "+ fname);
-        rsp.setException(new SolrException( SolrException.ErrorCode.FORBIDDEN, "Invalid path: "+fname ));
-        return null;
-      }
-      adminFile = new File( configdir, fname );
-    }
-    return adminFile;
-  }
-
-  public final Set<String> getHiddenFiles() {
-    return hiddenFiles;
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Admin Config File -- view or update config files directly";
-  }
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
deleted file mode 100644
index f9a3c05..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.client.solrj.impl.XMLResponseParser;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.response.BinaryResponseWriter;
-import org.apache.solr.response.SolrQueryResponse;
-
-import java.io.StringReader;
-import java.text.NumberFormat;
-import java.util.Locale;
-import java.util.Set;
-import java.util.Map;
-import java.util.HashSet;
-
-/**
- * A request handler that provides info about all 
- * registered SolrInfoMBeans.
- */
-@SuppressWarnings("unchecked")
-public class SolrInfoMBeanHandler extends RequestHandlerBase {
-
-  /**
-   * Take an array of any type and generate a Set containing the toString.
-   * Set is guarantee to never be null (but may be empty)
-   */
-  private Set<String> arrayToSet(Object[] arr) {
-    HashSet<String> r = new HashSet<>();
-    if (null == arr) return r;
-    for (Object o : arr) {
-      if (null != o) r.add(o.toString());
-    }
-    return r;
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    NamedList<NamedList<NamedList<Object>>> cats = getMBeanInfo(req);
-    if(req.getParams().getBool("diff", false)) {
-      ContentStream body = null;
-      try {
-        body = req.getContentStreams().iterator().next();
-      }
-      catch(Exception ex) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "missing content-stream for diff");
-      }
-      String content = IOUtils.toString(body.getReader());
-      
-      NamedList<NamedList<NamedList<Object>>> ref = fromXML(content);
-      
-      
-      // Normalize the output 
-      SolrQueryResponse wrap = new SolrQueryResponse();
-      wrap.add("solr-mbeans", cats);
-      cats = (NamedList<NamedList<NamedList<Object>>>)
-          BinaryResponseWriter.getParsedResponse(req, wrap).get("solr-mbeans");
-      
-      // Get rid of irrelevant things
-      ref = normalize(ref);
-      cats = normalize(cats);
-      
-      // Only the changes
-      boolean showAll = req.getParams().getBool("all", false);
-      rsp.add("solr-mbeans", getDiff(ref,cats, showAll));
-    }
-    else {
-      rsp.add("solr-mbeans", cats);
-    }
-    rsp.setHttpCaching(false); // never cache, no matter what init config looks like
-  }
-  
-  static NamedList<NamedList<NamedList<Object>>> fromXML(String content) {
-    int idx = content.indexOf("<response>");
-    if(idx<0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Body does not appear to be an XML response");
-    }
-  
-    try {
-      XMLResponseParser parser = new XMLResponseParser();
-      return (NamedList<NamedList<NamedList<Object>>>)
-          parser.processResponse(new StringReader(content)).get("solr-mbeans");
-    }
-    catch(Exception ex) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to read original XML", ex);
-    }
-  }
-  
-  protected NamedList<NamedList<NamedList<Object>>> getMBeanInfo(SolrQueryRequest req) {
-
-    NamedList<NamedList<NamedList<Object>>> cats = new NamedList<>();
-    
-    String[] requestedCats = req.getParams().getParams("cat");
-    if (null == requestedCats || 0 == requestedCats.length) {
-      for (SolrInfoBean.Category cat : SolrInfoBean.Category.values()) {
-        cats.add(cat.name(), new SimpleOrderedMap<NamedList<Object>>());
-      }
-    } else {
-      for (String catName : requestedCats) {
-        cats.add(catName,new SimpleOrderedMap<NamedList<Object>>());
-      }
-    }
-         
-    Set<String> requestedKeys = arrayToSet(req.getParams().getParams("key"));
-    
-    Map<String, SolrInfoBean> reg = req.getCore().getInfoRegistry();
-    for (Map.Entry<String, SolrInfoBean> entry : reg.entrySet()) {
-      addMBean(req, cats, requestedKeys, entry.getKey(),entry.getValue());
-    }
-
-    for (SolrInfoBean infoMBean : req.getCore().getCoreContainer().getResourceLoader().getInfoMBeans()) {
-      addMBean(req,cats,requestedKeys,infoMBean.getName(),infoMBean);
-    }
-    return cats;
-  }
-
-  private void addMBean(SolrQueryRequest req, NamedList<NamedList<NamedList<Object>>> cats, Set<String> requestedKeys, String key, SolrInfoBean m) {
-    if ( ! ( requestedKeys.isEmpty() || requestedKeys.contains(key) ) ) return;
-    NamedList<NamedList<Object>> catInfo = cats.get(m.getCategory().name());
-    if ( null == catInfo ) return;
-    NamedList<Object> mBeanInfo = new SimpleOrderedMap<>();
-    mBeanInfo.add("class", m.getName());
-    mBeanInfo.add("description", m.getDescription());
-
-    if (req.getParams().getFieldBool(key, "stats", false))
-      mBeanInfo.add("stats", m.getMetricsSnapshot());
-
-    catInfo.add(key, mBeanInfo);
-  }
-
-  protected NamedList<NamedList<NamedList<Object>>> getDiff(
-      NamedList<NamedList<NamedList<Object>>> ref, 
-      NamedList<NamedList<NamedList<Object>>> now,
-      boolean includeAll ) {
-    
-    NamedList<NamedList<NamedList<Object>>> changed = new NamedList<>();
-    
-    // Cycle through each category
-    for(int i=0;i<ref.size();i++) {
-      String category = ref.getName(i);
-      NamedList<NamedList<Object>> ref_cat = ref.get(category);
-      NamedList<NamedList<Object>> now_cat = now.get(category);
-      if(now_cat != null) {
-        String ref_txt = ref_cat+"";
-        String now_txt = now_cat+"";
-        if(!ref_txt.equals(now_txt)) {
-          // Something in the category changed
-          // Now iterate the real beans
-          
-          NamedList<NamedList<Object>> cat = new SimpleOrderedMap<>();
-          for(int j=0;j<ref_cat.size();j++) {
-            String name = ref_cat.getName(j);
-            NamedList<Object> ref_bean = ref_cat.get(name);
-            NamedList<Object> now_bean = now_cat.get(name);
-
-            ref_txt = ref_bean+"";
-            now_txt = now_bean+"";
-            if(!ref_txt.equals(now_txt)) {
-//              System.out.println( "----" );
-//              System.out.println( category +" : " + name );
-//              System.out.println( "REF: " + ref_txt );
-//              System.out.println( "NOW: " + now_txt );
-              
-              // Calculate the differences
-              NamedList diff = diffNamedList(ref_bean,now_bean);
-              diff.add( "_changed_", true ); // flag the changed thing
-              cat.add(name, diff);
-            }
-            else if(includeAll) {
-              cat.add(name, ref_bean);
-            }
-          }
-          if(cat.size()>0) {
-            changed.add(category, cat);
-          }
-        }
-        else if(includeAll) {
-          changed.add(category, ref_cat);
-        }
-      }
-    }
-    return changed;
-  }
-  
-  public NamedList diffNamedList(NamedList ref, NamedList now) {
-    NamedList out = new SimpleOrderedMap();
-    for(int i=0; i<ref.size(); i++) {
-      String name = ref.getName(i);
-      Object r = ref.getVal(i);
-      Object n = now.get(name);
-      if (n == null) {
-        if (r != null) {
-          out.add("REMOVE " + name, r);
-          now.remove(name);
-        }
-      }
-      else if (r != null) {
-        out.add(name, diffObject(r, n));
-        now.remove(name);
-      }
-    }
-    
-    for(int i=0; i<now.size(); i++) {
-      String name = now.getName(i);
-      Object v = now.getVal(i);
-      if(v!=null) {
-        out.add("ADD "+name, v);
-      }
-    }
-    return out;
-  }
-  
-  public Object diffObject(Object ref, Object now) {
-    if (now instanceof Map) {
-      now = new NamedList((Map)now);
-    }
-    if(ref instanceof NamedList) {
-      return diffNamedList((NamedList)ref, (NamedList)now);
-    }
-    if(ref.equals(now)) {
-      return ref;
-    }
-    StringBuilder str = new StringBuilder();
-    str.append("Was: ")
-     .append(ref).append(", Now: ").append(now);
-    
-    if(ref instanceof Number) {
-      NumberFormat nf = NumberFormat.getIntegerInstance(Locale.ROOT);
-      if((ref instanceof Double) || (ref instanceof Float)) {
-        nf = NumberFormat.getInstance(Locale.ROOT);
-      }
-      double dref = ((Number)ref).doubleValue();
-      double dnow = ((Number)now).doubleValue();
-      double diff = Double.NaN;
-      if(Double.isNaN(dref)) {
-        diff = dnow;
-      }
-      else if(Double.isNaN(dnow)) {
-        diff = dref;
-      }
-      else {
-        diff = dnow-dref;
-      }
-      str.append( ", Delta: ").append(nf.format(diff));
-    }
-    return str.toString();
-  }
-  
-  
-  /**
-   * The 'avgRequestsPerSecond' field will make everything look like it changed
-   */
-  public NamedList normalize(NamedList input) {
-    input.remove("avgRequestsPerSecond");
-    for(int i=0; i<input.size(); i++) {
-      Object v = input.getVal(i);
-      if(v instanceof NamedList) {
-        input.setVal(i, normalize((NamedList)v));
-      }
-    }
-    return input;
-  }
-  
-  
-  @Override
-  public String getDescription() {
-    return "Get Info (and statistics) for registered SolrInfoMBeans";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java b/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
deleted file mode 100644
index 31382c3..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkShardTerms;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.update.SolrIndexSplitter;
-import org.apache.solr.update.SplitIndexCommand;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
-import static org.apache.solr.common.params.CommonParams.PATH;
-
-
-class SplitOp implements CoreAdminHandler.CoreAdminOp {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    SolrParams params = it.req.getParams();
-    List<DocRouter.Range> ranges = null;
-
-    String[] pathsArr = params.getParams(PATH);
-    String rangesStr = params.get(CoreAdminParams.RANGES);    // ranges=a-b,c-d,e-f
-    if (rangesStr != null) {
-      String[] rangesArr = rangesStr.split(",");
-      if (rangesArr.length == 0) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index");
-      } else {
-        ranges = new ArrayList<>(rangesArr.length);
-        for (String r : rangesArr) {
-          try {
-            ranges.add(DocRouter.DEFAULT.fromString(r));
-          } catch (Exception e) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e);
-          }
-        }
-      }
-    }
-    String splitKey = params.get("split.key");
-    String[] newCoreNames = params.getParams("targetCore");
-    String cname = params.get(CoreAdminParams.CORE, "");
-
-    if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
-    }
-
-    log.info("Invoked split action for core: " + cname);
-    String methodStr = params.get(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower());
-    SolrIndexSplitter.SplitMethod splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr);
-    if (splitMethod == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unsupported value of '" + CommonAdminParams.SPLIT_METHOD + "': " + methodStr);
-    }
-    SolrCore parentCore = it.handler.coreContainer.getCore(cname);
-    List<SolrCore> newCores = null;
-    SolrQueryRequest req = null;
-
-    try {
-      // TODO: allow use of rangesStr in the future
-      List<String> paths = null;
-      int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
-
-      DocRouter router = null;
-      String routeFieldName = null;
-      if (it.handler.coreContainer.isZooKeeperAware()) {
-        ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
-        String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-        DocCollection collection = clusterState.getCollection(collectionName);
-        String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId();
-        Slice slice = collection.getSlice(sliceName);
-        router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-        if (ranges == null) {
-          DocRouter.Range currentRange = slice.getRange();
-          ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
-        }
-        Object routerObj = collection.get(DOC_ROUTER); // for back-compat with Solr 4.4
-        if (routerObj instanceof Map) {
-          Map routerProps = (Map) routerObj;
-          routeFieldName = (String) routerProps.get("field");
-        }
-      }
-
-      if (pathsArr == null) {
-        newCores = new ArrayList<>(partitions);
-        for (String newCoreName : newCoreNames) {
-          SolrCore newcore = it.handler.coreContainer.getCore(newCoreName);
-          if (newcore != null) {
-            newCores.add(newcore);
-            if (it.handler.coreContainer.isZooKeeperAware()) {
-              // this core must be the only replica in its shard otherwise
-              // we cannot guarantee consistency between replicas because when we add data to this replica
-              CloudDescriptor cd = newcore.getCoreDescriptor().getCloudDescriptor();
-              ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
-              if (clusterState.getCollection(cd.getCollectionName()).getSlice(cd.getShardId()).getReplicas().size() != 1) {
-                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                    "Core with core name " + newCoreName + " must be the only replica in shard " + cd.getShardId());
-              }
-            }
-          } else {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist.");
-          }
-        }
-      } else {
-        paths = Arrays.asList(pathsArr);
-      }
-
-      req = new LocalSolrQueryRequest(parentCore, params);
-
-      SplitIndexCommand cmd = new SplitIndexCommand(req, it.rsp, paths, newCores, ranges, router, routeFieldName, splitKey, splitMethod);
-      parentCore.getUpdateHandler().split(cmd);
-
-      if (it.handler.coreContainer.isZooKeeperAware()) {
-        for (SolrCore newcore : newCores) {
-          // the index of the core changed from empty to have some data, its term must be not zero
-          CloudDescriptor cd = newcore.getCoreDescriptor().getCloudDescriptor();
-          ZkShardTerms zkShardTerms = it.handler.coreContainer.getZkController().getShardTerms(cd.getCollectionName(), cd.getShardId());
-          zkShardTerms.ensureHighestTermsAreNotZero();
-        }
-      }
-
-      // After the split has completed, someone (here?) should start the process of replaying the buffered updates.
-    } catch (Exception e) {
-      log.error("ERROR executing split:", e);
-      throw e;
-    } finally {
-      if (req != null) req.close();
-      if (parentCore != null) parentCore.close();
-      if (newCores != null) {
-        for (SolrCore newCore : newCores) {
-          newCore.close();
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/StatusOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/StatusOp.java b/solr/core/src/java/org/apache/solr/handler/admin/StatusOp.java
deleted file mode 100644
index f2bddbd..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/StatusOp.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.handler.admin;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.CoreContainer;
-
-
-class StatusOp implements CoreAdminHandler.CoreAdminOp {
-  @Override
-  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
-    SolrParams params = it.req.getParams();
-
-    String cname = params.get(CoreAdminParams.CORE);
-    String indexInfo = params.get(CoreAdminParams.INDEX_INFO);
-    boolean isIndexInfoNeeded = Boolean.parseBoolean(null == indexInfo ? "true" : indexInfo);
-    NamedList<Object> status = new SimpleOrderedMap<>();
-    Map<String, Exception> failures = new HashMap<>();
-    for (Map.Entry<String, CoreContainer.CoreLoadFailure> failure : it.handler.coreContainer.getCoreInitFailures().entrySet()) {
-      failures.put(failure.getKey(), failure.getValue().exception);
-    }
-    if (cname == null) {
-      for (String name : it.handler.coreContainer.getAllCoreNames()) {
-        status.add(name, CoreAdminOperation.getCoreStatus(it.handler.coreContainer, name, isIndexInfoNeeded));
-      }
-      it.rsp.add("initFailures", failures);
-    } else {
-      failures = failures.containsKey(cname)
-          ? Collections.singletonMap(cname, failures.get(cname))
-              : Collections.<String, Exception>emptyMap();
-          it.rsp.add("initFailures", failures);
-          status.add(cname, CoreAdminOperation.getCoreStatus(it.handler.coreContainer, cname, isIndexInfoNeeded));
-    }
-    it.rsp.add("status", status);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
deleted file mode 100644
index d8e10ab..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.lang.management.ManagementFactory;
-import java.lang.management.OperatingSystemMXBean;
-import java.lang.management.RuntimeMXBean;
-import java.net.InetAddress;
-import java.nio.charset.Charset;
-import java.text.DecimalFormat;
-import java.text.DecimalFormatSymbols;
-import java.util.Date;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Locale;
-
-import com.codahale.metrics.Gauge;
-import org.apache.commons.io.IOUtils;
-import org.apache.lucene.LucenePackage;
-import org.apache.lucene.util.Constants;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.RedactionUtils;
-import org.apache.solr.util.stats.MetricUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-/**
- * This handler returns system info
- * 
- * @since solr 1.2
- */
-public class SystemInfoHandler extends RequestHandlerBase 
-{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String PARAM_NODE = "node";
-
-  public static String REDACT_STRING = RedactionUtils.getRedactString();
-
-  /**
-   * <p>
-   * Undocumented expert level system property to prevent doing a reverse lookup of our hostname.
-   * This property will be logged as a suggested workaround if any problems are noticed when doing reverse 
-   * lookup.
-   * </p>
-   *
-   * <p>
-   * TODO: should we refactor this (and the associated logic) into a helper method for any other places
-   * where DNS is used?
-   * </p>
-   * @see #initHostname
-   */
-  private static final String PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP = "solr.dns.prevent.reverse.lookup";
-  
-  // on some platforms, resolving canonical hostname can cause the thread
-  // to block for several seconds if nameservices aren't available
-  // so resolve this once per handler instance 
-  //(ie: not static, so core reload will refresh)
-  private String hostname = null;
-
-  private CoreContainer cc;
-
-  public SystemInfoHandler() {
-    this(null);
-  }
-
-  public SystemInfoHandler(CoreContainer cc) {
-    super();
-    this.cc = cc;
-    initHostname();
-  }
-  
-  private void initHostname() {
-    if (null != System.getProperty(PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP, null)) {
-      log.info("Resolving canonical hostname for local host prevented due to '{}' sysprop",
-               PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP);
-      hostname = null;
-      return;
-    }
-    
-    RTimer timer = new RTimer();
-    try {
-      InetAddress addr = InetAddress.getLocalHost();
-      hostname = addr.getCanonicalHostName();
-    } catch (Exception e) {
-      log.warn("Unable to resolve canonical hostname for local host, possible DNS misconfiguration. " +
-               "Set the '"+PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP+"' sysprop to true on startup to " +
-               "prevent future lookups if DNS can not be fixed.", e);
-      hostname = null;
-      return;
-    }
-    timer.stop();
-    
-    if (15000D < timer.getTime()) {
-      String readableTime = String.format(Locale.ROOT, "%.3f", (timer.getTime() / 1000));
-      log.warn("Resolving canonical hostname for local host took {} seconds, possible DNS misconfiguration. " +
-               "Set the '{}' sysprop to true on startup to prevent future lookups if DNS can not be fixed.",
-               readableTime, PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP);
-    
-    }
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
-  {
-    rsp.setHttpCaching(false);
-    SolrCore core = req.getCore();
-    if (AdminHandlersProxy.maybeProxyToNodes(req, rsp, getCoreContainer(req, core))) {
-      return; // Request was proxied to other node
-    }
-    if (core != null) rsp.add( "core", getCoreInfo( core, req.getSchema() ) );
-    boolean solrCloudMode =  getCoreContainer(req, core).isZooKeeperAware();
-    rsp.add( "mode", solrCloudMode ? "solrcloud" : "std");
-    if (solrCloudMode) {
-      rsp.add("zkHost", getCoreContainer(req, core).getZkController().getZkServerAddress());
-    }
-    if (cc != null)
-      rsp.add( "solr_home", cc.getSolrHome());
-    rsp.add( "lucene", getLuceneInfo() );
-    rsp.add( "jvm", getJvmInfo() );
-    rsp.add( "system", getSystemInfo() );
-    if (solrCloudMode) {
-      rsp.add("node", getCoreContainer(req, core).getZkController().getNodeName());
-    }
-  }
-
-  private CoreContainer getCoreContainer(SolrQueryRequest req, SolrCore core) {
-    CoreContainer coreContainer;
-    if (core != null) {
-       coreContainer = req.getCore().getCoreContainer();
-    } else {
-      coreContainer = cc;
-    }
-    return coreContainer;
-  }
-  
-  /**
-   * Get system info
-   */
-  private SimpleOrderedMap<Object> getCoreInfo( SolrCore core, IndexSchema schema ) {
-    SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
-    
-    info.add( "schema", schema != null ? schema.getSchemaName():"no schema!" );
-    
-    // Host
-    info.add( "host", hostname );
-
-    // Now
-    info.add( "now", new Date() );
-    
-    // Start Time
-    info.add( "start", core.getStartTimeStamp() );
-
-    // Solr Home
-    SimpleOrderedMap<Object> dirs = new SimpleOrderedMap<>();
-    dirs.add( "cwd" , new File( System.getProperty("user.dir")).getAbsolutePath() );
-    dirs.add("instance", core.getResourceLoader().getInstancePath().toString());
-    try {
-      dirs.add( "data", core.getDirectoryFactory().normalize(core.getDataDir()));
-    } catch (IOException e) {
-      log.warn("Problem getting the normalized data directory path", e);
-      dirs.add( "data", "N/A" );
-    }
-    dirs.add( "dirimpl", core.getDirectoryFactory().getClass().getName());
-    try {
-      dirs.add( "index", core.getDirectoryFactory().normalize(core.getIndexDir()) );
-    } catch (IOException e) {
-      log.warn("Problem getting the normalized index directory path", e);
-      dirs.add( "index", "N/A" );
-    }
-    info.add( "directory", dirs );
-    return info;
-  }
-  
-  /**
-   * Get system info
-   */
-  public static SimpleOrderedMap<Object> getSystemInfo() {
-    SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
-    
-    OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean();
-    info.add(NAME, os.getName()); // add at least this one
-    // add remaining ones dynamically using Java Beans API
-    // also those from JVM implementation-specific classes
-    MetricUtils.addMXBeanMetrics(os, MetricUtils.OS_MXBEAN_CLASSES, null, (name, metric) -> {
-      if (info.get(name) == null) {
-        info.add(name, ((Gauge) metric).getValue());
-      }
-    });
-
-    // Try some command line things:
-    try { 
-      if (!Constants.WINDOWS) {
-        info.add( "uname",  execute( "uname -a" ) );
-        info.add( "uptime", execute( "uptime" ) );
-      }
-    } catch( Exception ex ) {
-      log.warn("Unable to execute command line tools to get operating system properties.", ex);
-    } 
-    return info;
-  }
-  
-  /**
-   * Utility function to execute a function
-   */
-  private static String execute( String cmd )
-  {
-    InputStream in = null;
-    Process process = null;
-    
-    try {
-      process = Runtime.getRuntime().exec(cmd);
-      in = process.getInputStream();
-      // use default charset from locale here, because the command invoked also uses the default locale:
-      return IOUtils.toString(new InputStreamReader(in, Charset.defaultCharset()));
-    } catch( Exception ex ) {
-      // ignore - log.warn("Error executing command", ex);
-      return "(error executing: " + cmd + ")";
-    } catch (Error err) {
-      if (err.getMessage() != null && (err.getMessage().contains("posix_spawn") || err.getMessage().contains("UNIXProcess"))) {
-        log.warn("Error forking command due to JVM locale bug (see https://issues.apache.org/jira/browse/SOLR-6387): " + err.getMessage());
-        return "(error executing: " + cmd + ")";
-      }
-      throw err;
-    } finally {
-      if (process != null) {
-        IOUtils.closeQuietly( process.getOutputStream() );
-        IOUtils.closeQuietly( process.getInputStream() );
-        IOUtils.closeQuietly( process.getErrorStream() );
-      }
-    }
-  }
-  
-  /**
-   * Get JVM Info - including memory info
-   */
-  public static SimpleOrderedMap<Object> getJvmInfo()
-  {
-    SimpleOrderedMap<Object> jvm = new SimpleOrderedMap<>();
-
-    final String javaVersion = System.getProperty("java.specification.version", "unknown"); 
-    final String javaVendor = System.getProperty("java.specification.vendor", "unknown"); 
-    final String javaName = System.getProperty("java.specification.name", "unknown"); 
-    final String jreVersion = System.getProperty("java.version", "unknown");
-    final String jreVendor = System.getProperty("java.vendor", "unknown");
-    final String vmVersion = System.getProperty("java.vm.version", "unknown"); 
-    final String vmVendor = System.getProperty("java.vm.vendor", "unknown"); 
-    final String vmName = System.getProperty("java.vm.name", "unknown"); 
-
-    // Summary Info
-    jvm.add( "version", jreVersion + " " + vmVersion);
-    jvm.add(NAME, jreVendor + " " + vmName);
-    
-    // details
-    SimpleOrderedMap<Object> java = new SimpleOrderedMap<>();
-    java.add( "vendor", javaVendor );
-    java.add(NAME, javaName);
-    java.add( "version", javaVersion );
-    jvm.add( "spec", java );
-    SimpleOrderedMap<Object> jre = new SimpleOrderedMap<>();
-    jre.add( "vendor", jreVendor );
-    jre.add( "version", jreVersion );
-    jvm.add( "jre", jre );
-    SimpleOrderedMap<Object> vm = new SimpleOrderedMap<>();
-    vm.add( "vendor", vmVendor );
-    vm.add(NAME, vmName);
-    vm.add( "version", vmVersion );
-    jvm.add( "vm", vm );
-           
-    
-    Runtime runtime = Runtime.getRuntime();
-    jvm.add( "processors", runtime.availableProcessors() );
-    
-    // not thread safe, but could be thread local
-    DecimalFormat df = new DecimalFormat("#.#", DecimalFormatSymbols.getInstance(Locale.ROOT));
-
-    SimpleOrderedMap<Object> mem = new SimpleOrderedMap<>();
-    SimpleOrderedMap<Object> raw = new SimpleOrderedMap<>();
-    long free = runtime.freeMemory();
-    long max = runtime.maxMemory();
-    long total = runtime.totalMemory();
-    long used = total - free;
-    double percentUsed = ((double)(used)/(double)max)*100;
-    raw.add("free",  free );
-    mem.add("free",  humanReadableUnits(free, df));
-    raw.add("total", total );
-    mem.add("total", humanReadableUnits(total, df));
-    raw.add("max",   max );
-    mem.add("max",   humanReadableUnits(max, df));
-    raw.add("used",  used );
-    mem.add("used",  humanReadableUnits(used, df) + 
-            " (%" + df.format(percentUsed) + ")");
-    raw.add("used%", percentUsed);
-
-    mem.add("raw", raw);
-    jvm.add("memory", mem);
-
-    // JMX properties -- probably should be moved to a different handler
-    SimpleOrderedMap<Object> jmx = new SimpleOrderedMap<>();
-    try{
-      RuntimeMXBean mx = ManagementFactory.getRuntimeMXBean();
-      if (mx.isBootClassPathSupported()) {
-        jmx.add( "bootclasspath", mx.getBootClassPath());
-      }
-      jmx.add( "classpath", mx.getClassPath() );
-
-      // the input arguments passed to the Java virtual machine
-      // which does not include the arguments to the main method.
-      jmx.add( "commandLineArgs", getInputArgumentsRedacted(mx));
-
-      jmx.add( "startTime", new Date(mx.getStartTime()));
-      jmx.add( "upTimeMS",  mx.getUptime() );
-
-    }
-    catch (Exception e) {
-      log.warn("Error getting JMX properties", e);
-    }
-    jvm.add( "jmx", jmx );
-    return jvm;
-  }
-  
-  private static SimpleOrderedMap<Object> getLuceneInfo() {
-    SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
-
-    Package p = SolrCore.class.getPackage();
-
-    info.add( "solr-spec-version", p.getSpecificationVersion() );
-    info.add( "solr-impl-version", p.getImplementationVersion() );
-  
-    p = LucenePackage.class.getPackage();
-
-    info.add( "lucene-spec-version", p.getSpecificationVersion() );
-    info.add( "lucene-impl-version", p.getImplementationVersion() );
-
-    return info;
-  }
-  
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Get System Info";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-
-  private static final long ONE_KB = 1024;
-  private static final long ONE_MB = ONE_KB * ONE_KB;
-  private static final long ONE_GB = ONE_KB * ONE_MB;
-
-  /**
-   * Return good default units based on byte size.
-   */
-  private static String humanReadableUnits(long bytes, DecimalFormat df) {
-    String newSizeAndUnits;
-
-    if (bytes / ONE_GB > 0) {
-      newSizeAndUnits = String.valueOf(df.format((float)bytes / ONE_GB)) + " GB";
-    } else if (bytes / ONE_MB > 0) {
-      newSizeAndUnits = String.valueOf(df.format((float)bytes / ONE_MB)) + " MB";
-    } else if (bytes / ONE_KB > 0) {
-      newSizeAndUnits = String.valueOf(df.format((float)bytes / ONE_KB)) + " KB";
-    } else {
-      newSizeAndUnits = String.valueOf(bytes) + " bytes";
-    }
-
-    return newSizeAndUnits;
-  }
-
-  private static List<String> getInputArgumentsRedacted(RuntimeMXBean mx) {
-    List<String> list = new LinkedList<>();
-    for (String arg : mx.getInputArguments()) {
-      if (arg.startsWith("-D") && arg.contains("=") && RedactionUtils.isSystemPropertySensitive(arg.substring(2, arg.indexOf("=")))) {
-        list.add(String.format(Locale.ROOT, "%s=%s", arg.substring(0, arg.indexOf("=")), REDACT_STRING));
-      } else {
-        list.add(arg);
-      }
-    }
-    return list;
-  }
-  
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
deleted file mode 100644
index bb5b3ee..0000000
--- a/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-import java.lang.management.ThreadMXBean;
-import java.util.Locale;
-
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-import static org.apache.solr.common.params.CommonParams.ID;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * 
- * @since solr 1.2
- */
-public class ThreadDumpHandler extends RequestHandlerBase
-{
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException 
-  {    
-    SimpleOrderedMap<Object> system = new SimpleOrderedMap<>();
-    rsp.add( "system", system );
-
-    ThreadMXBean tmbean = ManagementFactory.getThreadMXBean();
-    
-    // Thread Count
-    SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
-    nl.add( "current",tmbean.getThreadCount() );
-    nl.add( "peak", tmbean.getPeakThreadCount() );
-    nl.add( "daemon", tmbean.getDaemonThreadCount() );
-    system.add( "threadCount", nl );
-    
-    // Deadlocks
-    ThreadInfo[] tinfos;
-    long[] tids = tmbean.findMonitorDeadlockedThreads();
-    if (tids != null) {
-      tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE);
-      NamedList<SimpleOrderedMap<Object>> lst = new NamedList<>();
-      for (ThreadInfo ti : tinfos) {
-        if (ti != null) {
-          lst.add( "thread", getThreadInfo( ti, tmbean ) );
-        }
-      }
-      system.add( "deadlocks", lst );
-    }
-    
-    // Now show all the threads....
-    tids = tmbean.getAllThreadIds();
-    tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE);
-    NamedList<SimpleOrderedMap<Object>> lst = new NamedList<>();
-    for (ThreadInfo ti : tinfos) {
-      if (ti != null) {
-        lst.add( "thread", getThreadInfo( ti, tmbean ) );
-      }
-    }
-    system.add( "threadDump", lst );
-    rsp.setHttpCaching(false);
-  }
-
-  //--------------------------------------------------------------------------------
-  //--------------------------------------------------------------------------------
-  
-  private static SimpleOrderedMap<Object> getThreadInfo( ThreadInfo ti, ThreadMXBean tmbean ) {
-    SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
-    long tid = ti.getThreadId();
-
-    info.add( ID, tid );
-    info.add(NAME, ti.getThreadName());
-    info.add( "state", ti.getThreadState().toString() );
-    
-    if (ti.getLockName() != null) {
-      info.add( "lock", ti.getLockName() );
-    }
-    if (ti.isSuspended()) {
-      info.add( "suspended", true );
-    }
-    if (ti.isInNative()) {
-      info.add( "native", true );
-    }
-    
-    if (tmbean.isThreadCpuTimeSupported()) {
-      info.add( "cpuTime", formatNanos(tmbean.getThreadCpuTime(tid)) );
-      info.add( "userTime", formatNanos(tmbean.getThreadUserTime(tid)) );
-    }
-
-    if (ti.getLockOwnerName() != null) {
-      SimpleOrderedMap<Object> owner = new SimpleOrderedMap<>();
-      owner.add(NAME, ti.getLockOwnerName());
-      owner.add( ID, ti.getLockOwnerId() );
-    }
-    
-    // Add the stack trace
-    int i=0;
-    String[] trace = new String[ti.getStackTrace().length];
-    for( StackTraceElement ste : ti.getStackTrace()) {
-      trace[i++] = ste.toString();
-    }
-    info.add( "stackTrace", trace );
-    return info;
-  }
-  
-  private static String formatNanos(long ns) {
-    return String.format(Locale.ROOT, "%.4fms", ns / (double) 1000000);
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Thread Dump";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}


[08/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
deleted file mode 100644
index a548031..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.lang.invoke.MethodHandles;
-import java.net.ConnectException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import org.apache.http.client.HttpClient;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.util.ClientUtils;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.request.SolrQueryRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-public class HttpShardHandler extends ShardHandler {
-  
-  /**
-   * If the request context map has an entry with this key and Boolean.TRUE as value,
-   * {@link #prepDistributed(ResponseBuilder)} will only include {@link org.apache.solr.common.cloud.Replica.Type#NRT} replicas as possible
-   * destination of the distributed request (or a leader replica of type {@link org.apache.solr.common.cloud.Replica.Type#TLOG}). This is used 
-   * by the RealtimeGet handler, since other types of replicas shouldn't respond to RTG requests
-   */
-  public static String ONLY_NRT_REPLICAS = "distribOnlyRealtime";
-
-  private HttpShardHandlerFactory httpShardHandlerFactory;
-  private CompletionService<ShardResponse> completionService;
-  private Set<Future<ShardResponse>> pending;
-  private Map<String,List<String>> shardToURLs;
-  private HttpClient httpClient;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public HttpShardHandler(HttpShardHandlerFactory httpShardHandlerFactory, HttpClient httpClient) {
-    this.httpClient = httpClient;
-    this.httpShardHandlerFactory = httpShardHandlerFactory;
-    completionService = httpShardHandlerFactory.newCompletionService();
-    pending = new HashSet<>();
-
-    // maps "localhost:8983|localhost:7574" to a shuffled List("http://localhost:8983","http://localhost:7574")
-    // This is primarily to keep track of what order we should use to query the replicas of a shard
-    // so that we use the same replica for all phases of a distributed request.
-    shardToURLs = new HashMap<>();
-
-  }
-
-
-  private static class SimpleSolrResponse extends SolrResponse {
-
-    long elapsedTime;
-
-    NamedList<Object> nl;
-
-    @Override
-    public long getElapsedTime() {
-      return elapsedTime;
-    }
-
-    @Override
-    public NamedList<Object> getResponse() {
-      return nl;
-    }
-
-    @Override
-    public void setResponse(NamedList<Object> rsp) {
-      nl = rsp;
-    }
-
-    @Override
-    public void setElapsedTime(long elapsedTime) {
-      this.elapsedTime = elapsedTime;
-    }
-  }
-
-
-  // Not thread safe... don't use in Callable.
-  // Don't modify the returned URL list.
-  private List<String> getURLs(String shard) {
-    List<String> urls = shardToURLs.get(shard);
-    if (urls == null) {
-      urls = httpShardHandlerFactory.buildURLList(shard);
-      shardToURLs.put(shard, urls);
-    }
-    return urls;
-  }
-
-  @Override
-  public void submit(final ShardRequest sreq, final String shard, final ModifiableSolrParams params) {
-    // do this outside of the callable for thread safety reasons
-    final List<String> urls = getURLs(shard);
-
-    Callable<ShardResponse> task = () -> {
-
-      ShardResponse srsp = new ShardResponse();
-      if (sreq.nodeName != null) {
-        srsp.setNodeName(sreq.nodeName);
-      }
-      srsp.setShardRequest(sreq);
-      srsp.setShard(shard);
-      SimpleSolrResponse ssr = new SimpleSolrResponse();
-      srsp.setSolrResponse(ssr);
-      long startTime = System.nanoTime();
-
-      try {
-        params.remove(CommonParams.WT); // use default (currently javabin)
-        params.remove(CommonParams.VERSION);
-
-        QueryRequest req = makeQueryRequest(sreq, params, shard);
-        req.setMethod(SolrRequest.METHOD.POST);
-
-        // no need to set the response parser as binary is the default
-        // req.setResponseParser(new BinaryResponseParser());
-
-        // if there are no shards available for a slice, urls.size()==0
-        if (urls.size()==0) {
-          // TODO: what's the right error code here? We should use the same thing when
-          // all of the servers for a shard are down.
-          throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "no servers hosting shard: " + shard);
-        }
-
-        if (urls.size() <= 1) {
-          String url = urls.get(0);
-          srsp.setShardAddress(url);
-          try (SolrClient client = new Builder(url).withHttpClient(httpClient).build()) {
-            ssr.nl = client.request(req);
-          }
-        } else {
-          LBHttpSolrClient.Rsp rsp = httpShardHandlerFactory.makeLoadBalancedRequest(req, urls);
-          ssr.nl = rsp.getResponse();
-          srsp.setShardAddress(rsp.getServer());
-        }
-      }
-      catch( ConnectException cex ) {
-        srsp.setException(cex); //????
-      } catch (Exception th) {
-        srsp.setException(th);
-        if (th instanceof SolrException) {
-          srsp.setResponseCode(((SolrException)th).code());
-        } else {
-          srsp.setResponseCode(-1);
-        }
-      }
-
-      ssr.elapsedTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
-
-      return transfomResponse(sreq, srsp, shard);
-    };
-
-    try {
-      if (shard != null)  {
-        MDC.put("ShardRequest.shards", shard);
-      }
-      if (urls != null && !urls.isEmpty())  {
-        MDC.put("ShardRequest.urlList", urls.toString());
-      }
-      pending.add( completionService.submit(task) );
-    } finally {
-      MDC.remove("ShardRequest.shards");
-      MDC.remove("ShardRequest.urlList");
-    }
-  }
-  
-  /**
-   * Subclasses could modify the request based on the shard
-   */
-  protected QueryRequest makeQueryRequest(final ShardRequest sreq, ModifiableSolrParams params, String shard)
-  {
-    // use generic request to avoid extra processing of queries
-    return new QueryRequest(params);
-  }
-  
-  /**
-   * Subclasses could modify the Response based on the the shard
-   */
-  protected ShardResponse transfomResponse(final ShardRequest sreq, ShardResponse rsp, String shard)
-  {
-    return rsp;
-  }
-
-  /** returns a ShardResponse of the last response correlated with a ShardRequest.  This won't 
-   * return early if it runs into an error.  
-   **/
-  @Override
-  public ShardResponse takeCompletedIncludingErrors() {
-    return take(false);
-  }
-
-
-  /** returns a ShardResponse of the last response correlated with a ShardRequest,
-   * or immediately returns a ShardResponse if there was an error detected
-   */
-  @Override
-  public ShardResponse takeCompletedOrError() {
-    return take(true);
-  }
-  
-  private ShardResponse take(boolean bailOnError) {
-    
-    while (pending.size() > 0) {
-      try {
-        Future<ShardResponse> future = completionService.take();
-        pending.remove(future);
-        ShardResponse rsp = future.get();
-        if (bailOnError && rsp.getException() != null) return rsp; // if exception, return immediately
-        // add response to the response list... we do this after the take() and
-        // not after the completion of "call" so we know when the last response
-        // for a request was received.  Otherwise we might return the same
-        // request more than once.
-        rsp.getShardRequest().responses.add(rsp);
-        if (rsp.getShardRequest().responses.size() == rsp.getShardRequest().actualShards.length) {
-          return rsp;
-        }
-      } catch (InterruptedException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-      } catch (ExecutionException e) {
-        // should be impossible... the problem with catching the exception
-        // at this level is we don't know what ShardRequest it applied to
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Impossible Exception",e);
-      }
-    }
-    return null;
-  }
-
-
-  @Override
-  public void cancelAll() {
-    for (Future<ShardResponse> future : pending) {
-      future.cancel(false);
-    }
-  }
-
-  @Override
-  public void prepDistributed(ResponseBuilder rb) {
-    final SolrQueryRequest req = rb.req;
-    final SolrParams params = req.getParams();
-    final String shards = params.get(ShardParams.SHARDS);
-
-    // since the cost of grabbing cloud state is still up in the air, we grab it only
-    // if we need it.
-    ClusterState clusterState = null;
-    Map<String,Slice> slices = null;
-    CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor();
-    CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor();
-    ZkController zkController = req.getCore().getCoreContainer().getZkController();
-
-    final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req);
-
-    if (shards != null) {
-      List<String> lst = StrUtils.splitSmart(shards, ",", true);
-      rb.shards = lst.toArray(new String[lst.size()]);
-      rb.slices = new String[rb.shards.length];
-
-      if (zkController != null) {
-        // figure out which shards are slices
-        for (int i=0; i<rb.shards.length; i++) {
-          if (rb.shards[i].indexOf('/') < 0) {
-            // this is a logical shard
-            rb.slices[i] = rb.shards[i];
-            rb.shards[i] = null;
-          }
-        }
-      }
-    } else if (zkController != null) {
-      // we weren't provided with an explicit list of slices to query via "shards", so use the cluster state
-
-      clusterState =  zkController.getClusterState();
-      String shardKeys =  params.get(ShardParams._ROUTE_);
-
-      // This will be the complete list of slices we need to query for this request.
-      slices = new HashMap<>();
-
-      // we need to find out what collections this request is for.
-
-      // A comma-separated list of specified collections.
-      // Eg: "collection1,collection2,collection3"
-      String collections = params.get("collection");
-      if (collections != null) {
-        // If there were one or more collections specified in the query, split
-        // each parameter and store as a separate member of a List.
-        List<String> collectionList = StrUtils.splitSmart(collections, ",",
-            true);
-        // In turn, retrieve the slices that cover each collection from the
-        // cloud state and add them to the Map 'slices'.
-        for (String collectionName : collectionList) {
-          // The original code produced <collection-name>_<shard-name> when the collections
-          // parameter was specified (see ClientUtils.appendMap)
-          // Is this necessary if ony one collection is specified?
-          // i.e. should we change multiCollection to collectionList.size() > 1?
-          addSlices(slices, clusterState, params, collectionName,  shardKeys, true);
-        }
-      } else {
-        // just this collection
-        String collectionName = cloudDescriptor.getCollectionName();
-        addSlices(slices, clusterState, params, collectionName,  shardKeys, false);
-      }
-
-
-      // Store the logical slices in the ResponseBuilder and create a new
-      // String array to hold the physical shards (which will be mapped
-      // later).
-      rb.slices = slices.keySet().toArray(new String[slices.size()]);
-      rb.shards = new String[rb.slices.length];
-    }
-
-    //
-    // Map slices to shards
-    //
-    if (zkController != null) {
-
-      // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
-      // and make it a non-distributed request.
-      String ourSlice = cloudDescriptor.getShardId();
-      String ourCollection = cloudDescriptor.getCollectionName();
-      // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
-      boolean onlyNrtReplicas = Boolean.TRUE == req.getContext().get(ONLY_NRT_REPLICAS);
-      if (rb.slices.length == 1 && rb.slices[0] != null
-          && ( rb.slices[0].equals(ourSlice) || rb.slices[0].equals(ourCollection + "_" + ourSlice) )  // handle the <collection>_<slice> format
-          && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE
-          && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
-        boolean shortCircuit = params.getBool("shortCircuit", true);       // currently just a debugging parameter to check distrib search on a single node
-
-        String targetHandler = params.get(ShardParams.SHARDS_QT);
-        shortCircuit = shortCircuit && targetHandler == null;             // if a different handler is specified, don't short-circuit
-
-        if (shortCircuit) {
-          rb.isDistrib = false;
-          rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
-          return;
-        }
-        // We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
-      }
-
-
-      for (int i=0; i<rb.shards.length; i++) {
-        if (rb.shards[i] != null) {
-          final List<String> shardUrls = StrUtils.splitSmart(rb.shards[i], "|", true);
-          replicaListTransformer.transform(shardUrls);
-          // And now recreate the | delimited list of equivalent servers
-          rb.shards[i] = createSliceShardsStr(shardUrls);
-        } else {
-          if (clusterState == null) {
-            clusterState =  zkController.getClusterState();
-            slices = clusterState.getCollection(cloudDescriptor.getCollectionName()).getSlicesMap();
-          }
-          String sliceName = rb.slices[i];
-
-          Slice slice = slices.get(sliceName);
-
-          if (slice==null) {
-            // Treat this the same as "all servers down" for a slice, and let things continue
-            // if partial results are acceptable
-            rb.shards[i] = "";
-            continue;
-            // throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
-          }
-          final Predicate<Replica> isShardLeader = new Predicate<Replica>() {
-            private Replica shardLeader = null;
-
-            @Override
-            public boolean test(Replica replica) {
-              if (shardLeader == null) {
-                try {
-                  shardLeader = zkController.getZkStateReader().getLeaderRetry(cloudDescriptor.getCollectionName(), slice.getName());
-                } catch (InterruptedException e) {
-                  throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + slice.getName() + " in collection " 
-                      + cloudDescriptor.getCollectionName(), e);
-                } catch (SolrException e) {
-                  if (log.isDebugEnabled()) {
-                    log.debug("Exception finding leader for shard {} in collection {}. Collection State: {}", 
-                        slice.getName(), cloudDescriptor.getCollectionName(), zkController.getZkStateReader().getClusterState().getCollectionOrNull(cloudDescriptor.getCollectionName()));
-                  }
-                  throw e;
-                }
-              }
-              return replica.getName().equals(shardLeader.getName());
-            }
-          };
-
-          final List<Replica> eligibleSliceReplicas = collectEligibleReplicas(slice, clusterState, onlyNrtReplicas, isShardLeader);
-
-          final List<String> shardUrls = transformReplicasToShardUrls(replicaListTransformer, eligibleSliceReplicas);
-
-          // And now recreate the | delimited list of equivalent servers
-          final String sliceShardsStr = createSliceShardsStr(shardUrls);
-          if (sliceShardsStr.isEmpty()) {
-            boolean tolerant = ShardParams.getShardsTolerantAsBool(rb.req.getParams());
-            if (!tolerant) {
-              // stop the check when there are no replicas available for a shard
-              throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
-                  "no servers hosting shard: " + rb.slices[i]);
-            }
-          }
-          rb.shards[i] = sliceShardsStr;
-        }
-      }
-    }
-    String shards_rows = params.get(ShardParams.SHARDS_ROWS);
-    if(shards_rows != null) {
-      rb.shards_rows = Integer.parseInt(shards_rows);
-    }
-    String shards_start = params.get(ShardParams.SHARDS_START);
-    if(shards_start != null) {
-      rb.shards_start = Integer.parseInt(shards_start);
-    }
-  }
-
-  private static List<Replica> collectEligibleReplicas(Slice slice, ClusterState clusterState, boolean onlyNrtReplicas, Predicate<Replica> isShardLeader) {
-    final Collection<Replica> allSliceReplicas = slice.getReplicasMap().values();
-    final List<Replica> eligibleSliceReplicas = new ArrayList<>(allSliceReplicas.size());
-    for (Replica replica : allSliceReplicas) {
-      if (!clusterState.liveNodesContain(replica.getNodeName())
-          || replica.getState() != Replica.State.ACTIVE
-          || (onlyNrtReplicas && replica.getType() == Replica.Type.PULL)) {
-        continue;
-      }
-
-      if (onlyNrtReplicas && replica.getType() == Replica.Type.TLOG) {
-        if (!isShardLeader.test(replica)) {
-          continue;
-        }
-      }
-      eligibleSliceReplicas.add(replica);
-    }
-    return eligibleSliceReplicas;
-  }
-
-  private static List<String> transformReplicasToShardUrls(final ReplicaListTransformer replicaListTransformer, final List<Replica> eligibleSliceReplicas) {
-    replicaListTransformer.transform(eligibleSliceReplicas);
-
-    final List<String> shardUrls = new ArrayList<>(eligibleSliceReplicas.size());
-    for (Replica replica : eligibleSliceReplicas) {
-      String url = ZkCoreNodeProps.getCoreUrl(replica);
-      shardUrls.add(url);
-    }
-    return shardUrls;
-  }
-
-  private static String createSliceShardsStr(final List<String> shardUrls) {
-    final StringBuilder sliceShardsStr = new StringBuilder();
-    boolean first = true;
-    for (String shardUrl : shardUrls) {
-      if (first) {
-        first = false;
-      } else {
-        sliceShardsStr.append('|');
-      }
-      sliceShardsStr.append(shardUrl);
-    }
-    return sliceShardsStr.toString();
-  }
-
-
-  private void addSlices(Map<String,Slice> target, ClusterState state, SolrParams params, String collectionName, String shardKeys, boolean multiCollection) {
-    DocCollection coll = state.getCollection(collectionName);
-    Collection<Slice> slices = coll.getRouter().getSearchSlices(shardKeys, params , coll);
-    ClientUtils.addSlices(target, collectionName, slices, multiCollection);
-  }
-
-  public ShardHandlerFactory getShardHandlerFactory(){
-    return httpShardHandlerFactory;
-  }
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
deleted file mode 100644
index 1bb1fdb..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.http.client.HttpClient;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
-import org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.URLUtil;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.update.UpdateShardHandlerConfig;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.stats.HttpClientMetricNameStrategy;
-import org.apache.solr.util.stats.InstrumentedHttpRequestExecutor;
-import org.apache.solr.util.stats.InstrumentedPoolingHttpClientConnectionManager;
-import org.apache.solr.util.stats.MetricUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.solr.util.stats.InstrumentedHttpRequestExecutor.KNOWN_METRIC_NAME_STRATEGIES;
-
-
-public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.apache.solr.util.plugin.PluginInfoInitialized, SolrMetricProducer {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String DEFAULT_SCHEME = "http";
-  
-  // We want an executor that doesn't take up any resources if
-  // it's not used, so it could be created statically for
-  // the distributed search component if desired.
-  //
-  // Consider CallerRuns policy and a lower max threads to throttle
-  // requests at some point (or should we simply return failure?)
-  private ExecutorService commExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(
-      0,
-      Integer.MAX_VALUE,
-      5, TimeUnit.SECONDS, // terminate idle threads after 5 sec
-      new SynchronousQueue<>(),  // directly hand off tasks
-      new DefaultSolrThreadFactory("httpShardExecutor"),
-      // the Runnable added to this executor handles all exceptions so we disable stack trace collection as an optimization
-      // see SOLR-11880 for more details
-      false
-  );
-
-  protected InstrumentedPoolingHttpClientConnectionManager clientConnectionManager;
-  protected CloseableHttpClient defaultClient;
-  protected InstrumentedHttpRequestExecutor httpRequestExecutor;
-  private LBHttpSolrClient loadbalancer;
-  //default values:
-  int soTimeout = HttpClientUtil.DEFAULT_SO_TIMEOUT;
-  int connectionTimeout = HttpClientUtil.DEFAULT_CONNECT_TIMEOUT;
-  int maxConnectionsPerHost = HttpClientUtil.DEFAULT_MAXCONNECTIONSPERHOST;
-  int maxConnections = HttpClientUtil.DEFAULT_MAXCONNECTIONS;
-  int corePoolSize = 0;
-  int maximumPoolSize = Integer.MAX_VALUE;
-  int keepAliveTime = 5;
-  int queueSize = -1;
-  int   permittedLoadBalancerRequestsMinimumAbsolute = 0;
-  float permittedLoadBalancerRequestsMaximumFraction = 1.0f;
-  boolean accessPolicy = false;
-
-  private String scheme = null;
-
-  private HttpClientMetricNameStrategy metricNameStrategy;
-
-  private String metricTag;
-
-  protected final Random r = new Random();
-
-  private final ReplicaListTransformer shufflingReplicaListTransformer = new ShufflingReplicaListTransformer(r);
-
-  // URL scheme to be used in distributed search.
-  static final String INIT_URL_SCHEME = "urlScheme";
-
-  // The core size of the threadpool servicing requests
-  static final String INIT_CORE_POOL_SIZE = "corePoolSize";
-
-  // The maximum size of the threadpool servicing requests
-  static final String INIT_MAX_POOL_SIZE = "maximumPoolSize";
-
-  // The amount of time idle threads persist for in the queue, before being killed
-  static final String MAX_THREAD_IDLE_TIME = "maxThreadIdleTime";
-
-  // If the threadpool uses a backing queue, what is its maximum size (-1) to use direct handoff
-  static final String INIT_SIZE_OF_QUEUE = "sizeOfQueue";
-
-  // The minimum number of replicas that may be used
-  static final String LOAD_BALANCER_REQUESTS_MIN_ABSOLUTE = "loadBalancerRequestsMinimumAbsolute";
-
-  // The maximum proportion of replicas to be used
-  static final String LOAD_BALANCER_REQUESTS_MAX_FRACTION = "loadBalancerRequestsMaximumFraction";
-
-  // Configure if the threadpool favours fairness over throughput
-  static final String INIT_FAIRNESS_POLICY = "fairnessPolicy";
-
-  /**
-   * Get {@link ShardHandler} that uses the default http client.
-   */
-  @Override
-  public ShardHandler getShardHandler() {
-    return getShardHandler(defaultClient);
-  }
-
-  /**
-   * Get {@link ShardHandler} that uses custom http client.
-   */
-  public ShardHandler getShardHandler(final HttpClient httpClient){
-    return new HttpShardHandler(this, httpClient);
-  }
-
-  @Override
-  public void init(PluginInfo info) {
-    StringBuilder sb = new StringBuilder();
-    NamedList args = info.initArgs;
-    this.soTimeout = getParameter(args, HttpClientUtil.PROP_SO_TIMEOUT, soTimeout,sb);
-    this.scheme = getParameter(args, INIT_URL_SCHEME, null,sb);
-    if(StringUtils.endsWith(this.scheme, "://")) {
-      this.scheme = StringUtils.removeEnd(this.scheme, "://");
-    }
-
-    String strategy = getParameter(args, "metricNameStrategy", UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY, sb);
-    this.metricNameStrategy = KNOWN_METRIC_NAME_STRATEGIES.get(strategy);
-    if (this.metricNameStrategy == null)  {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Unknown metricNameStrategy: " + strategy + " found. Must be one of: " + KNOWN_METRIC_NAME_STRATEGIES.keySet());
-    }
-
-    this.connectionTimeout = getParameter(args, HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout, sb);
-    this.maxConnectionsPerHost = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost,sb);
-    this.maxConnections = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections,sb);
-    this.corePoolSize = getParameter(args, INIT_CORE_POOL_SIZE, corePoolSize,sb);
-    this.maximumPoolSize = getParameter(args, INIT_MAX_POOL_SIZE, maximumPoolSize,sb);
-    this.keepAliveTime = getParameter(args, MAX_THREAD_IDLE_TIME, keepAliveTime,sb);
-    this.queueSize = getParameter(args, INIT_SIZE_OF_QUEUE, queueSize,sb);
-    this.permittedLoadBalancerRequestsMinimumAbsolute = getParameter(
-        args,
-        LOAD_BALANCER_REQUESTS_MIN_ABSOLUTE,
-        permittedLoadBalancerRequestsMinimumAbsolute,
-        sb);
-    this.permittedLoadBalancerRequestsMaximumFraction = getParameter(
-        args,
-        LOAD_BALANCER_REQUESTS_MAX_FRACTION,
-        permittedLoadBalancerRequestsMaximumFraction,
-        sb);
-    this.accessPolicy = getParameter(args, INIT_FAIRNESS_POLICY, accessPolicy,sb);
-    log.debug("created with {}",sb);
-    
-    // magic sysprop to make tests reproducible: set by SolrTestCaseJ4.
-    String v = System.getProperty("tests.shardhandler.randomSeed");
-    if (v != null) {
-      r.setSeed(Long.parseLong(v));
-    }
-
-    BlockingQueue<Runnable> blockingQueue = (this.queueSize == -1) ?
-        new SynchronousQueue<Runnable>(this.accessPolicy) :
-        new ArrayBlockingQueue<Runnable>(this.queueSize, this.accessPolicy);
-
-    this.commExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(
-        this.corePoolSize,
-        this.maximumPoolSize,
-        this.keepAliveTime, TimeUnit.SECONDS,
-        blockingQueue,
-        new DefaultSolrThreadFactory("httpShardExecutor")
-    );
-
-    ModifiableSolrParams clientParams = getClientParams();
-    httpRequestExecutor = new InstrumentedHttpRequestExecutor(this.metricNameStrategy);
-    clientConnectionManager = new InstrumentedPoolingHttpClientConnectionManager(HttpClientUtil.getSchemaRegisteryProvider().getSchemaRegistry());
-    this.defaultClient = HttpClientUtil.createClient(clientParams, clientConnectionManager, false, httpRequestExecutor);
-    this.loadbalancer = createLoadbalancer(defaultClient);
-  }
-
-  protected ModifiableSolrParams getClientParams() {
-    ModifiableSolrParams clientParams = new ModifiableSolrParams();
-    clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost);
-    clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections);
-    return clientParams;
-  }
-
-  protected ExecutorService getThreadPoolExecutor(){
-    return this.commExecutor;
-  }
-
-  protected LBHttpSolrClient createLoadbalancer(HttpClient httpClient){
-    LBHttpSolrClient client = new Builder()
-        .withHttpClient(httpClient)
-        .withConnectionTimeout(connectionTimeout)
-        .withSocketTimeout(soTimeout)
-        .build();
-    return client;
-  }
-
-  protected <T> T getParameter(NamedList initArgs, String configKey, T defaultValue, StringBuilder sb) {
-    T toReturn = defaultValue;
-    if (initArgs != null) {
-      T temp = (T) initArgs.get(configKey);
-      toReturn = (temp != null) ? temp : defaultValue;
-    }
-    if(sb!=null && toReturn != null) sb.append(configKey).append(" : ").append(toReturn).append(",");
-    return toReturn;
-  }
-
-
-  @Override
-  public void close() {
-    try {
-      ExecutorUtil.shutdownAndAwaitTermination(commExecutor);
-    } finally {
-      try {
-        if (loadbalancer != null) {
-          loadbalancer.close();
-        }
-      } finally { 
-        if (defaultClient != null) {
-          HttpClientUtil.close(defaultClient);
-        }
-        if (clientConnectionManager != null)  {
-          clientConnectionManager.close();
-        }
-      }
-    }
-  }
-
-  /**
-   * Makes a request to one or more of the given urls, using the configured load balancer.
-   *
-   * @param req The solr search request that should be sent through the load balancer
-   * @param urls The list of solr server urls to load balance across
-   * @return The response from the request
-   */
-  public LBHttpSolrClient.Rsp makeLoadBalancedRequest(final QueryRequest req, List<String> urls)
-    throws SolrServerException, IOException {
-    return loadbalancer.request(newLBHttpSolrClientReq(req, urls));
-  }
-
-  protected LBHttpSolrClient.Req newLBHttpSolrClientReq(final QueryRequest req, List<String> urls) {
-    int numServersToTry = (int)Math.floor(urls.size() * this.permittedLoadBalancerRequestsMaximumFraction);
-    if (numServersToTry < this.permittedLoadBalancerRequestsMinimumAbsolute) {
-      numServersToTry = this.permittedLoadBalancerRequestsMinimumAbsolute;
-    }
-    return new LBHttpSolrClient.Req(req, urls, numServersToTry);
-  }
-
-  /**
-   * Creates a list of urls for the given shard.
-   *
-   * @param shard the urls for the shard, separated by '|'
-   * @return A list of valid urls (including protocol) that are replicas for the shard
-   */
-  public List<String> buildURLList(String shard) {
-    List<String> urls = StrUtils.splitSmart(shard, "|", true);
-
-    // convert shard to URL
-    for (int i=0; i<urls.size(); i++) {
-      urls.set(i, buildUrl(urls.get(i)));
-    }
-
-    return urls;
-  }
-
-  /**
-   * A distributed request is made via {@link LBHttpSolrClient} to the first live server in the URL list.
-   * This means it is just as likely to choose current host as any of the other hosts.
-   * This function makes sure that the cores are sorted according to the given list of preferences.
-   * E.g. If all nodes prefer local cores then a bad/heavily-loaded node will receive less requests from 
-   * healthy nodes. This will help prevent a distributed deadlock or timeouts in all the healthy nodes due 
-   * to one bad node.
-   */
-  static class NodePreferenceRulesComparator implements Comparator<Object> {
-    private static class PreferenceRule {
-      public final String name;
-      public final String value;
-
-      public PreferenceRule(String name, String value) {
-        this.name = name;
-        this.value = value;
-      }
-    }
-
-    private final SolrQueryRequest request;
-    private List<PreferenceRule> preferenceRules;
-    private String localHostAddress = null;
-
-    public NodePreferenceRulesComparator(final List<String> sortRules, final SolrQueryRequest request) {
-      this.request = request;
-      this.preferenceRules = new ArrayList<PreferenceRule>(sortRules.size());
-      sortRules.forEach(rule -> {
-        String[] parts = rule.split(":", 2);
-        if (parts.length != 2) {
-          throw new IllegalArgumentException("Invalid " + ShardParams.SHARDS_PREFERENCE + " rule: " + rule);
-        }
-        this.preferenceRules.add(new PreferenceRule(parts[0], parts[1])); 
-      });
-    }
-    @Override
-    public int compare(Object left, Object right) {
-      for (PreferenceRule preferenceRule: this.preferenceRules) {
-        final boolean lhs;
-        final boolean rhs;
-        switch (preferenceRule.name) {
-          case ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE:
-            lhs = hasReplicaType(left, preferenceRule.value);
-            rhs = hasReplicaType(right, preferenceRule.value);
-            break;
-          case ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION:
-            lhs = hasCoreUrlPrefix(left, preferenceRule.value);
-            rhs = hasCoreUrlPrefix(right, preferenceRule.value);
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid " + ShardParams.SHARDS_PREFERENCE + " type: " + preferenceRule.name);
-        }
-        if (lhs != rhs) {
-          return lhs ? -1 : +1;
-        }
-      }
-      return 0;
-    }
-    private boolean hasCoreUrlPrefix(Object o, String prefix) {
-      final String s;
-      if (o instanceof String) {
-        s = (String)o;
-      }
-      else if (o instanceof Replica) {
-        s = ((Replica)o).getCoreUrl();
-      } else {
-        return false;
-      }
-      if (prefix.equals(ShardParams.REPLICA_LOCAL)) {
-        if (null == localHostAddress) {
-          final ZkController zkController = this.request.getCore().getCoreContainer().getZkController();
-          localHostAddress = zkController != null ? zkController.getBaseUrl() : "";
-          if (localHostAddress.isEmpty()) {
-            log.warn("Couldn't determine current host address for sorting of local replicas");
-          }
-        }
-        if (!localHostAddress.isEmpty()) {
-          if (s.startsWith(localHostAddress)) {
-            return true;
-          }
-        }
-      } else {
-        if (s.startsWith(prefix)) {
-          return true;
-        }
-      }
-      return false;
-    }
-    private static boolean hasReplicaType(Object o, String preferred) {
-      if (!(o instanceof Replica)) {
-        return false;
-      }
-      final String s = ((Replica)o).getType().toString();
-      return s.equals(preferred);
-    }
-  }
-
-  protected ReplicaListTransformer getReplicaListTransformer(final SolrQueryRequest req) {
-    final SolrParams params = req.getParams();
-    @SuppressWarnings("deprecation")
-    final boolean preferLocalShards = params.getBool(CommonParams.PREFER_LOCAL_SHARDS, false);
-    final String shardsPreferenceSpec = params.get(ShardParams.SHARDS_PREFERENCE, "");
-
-    if (preferLocalShards || !shardsPreferenceSpec.isEmpty()) {
-      if (preferLocalShards && !shardsPreferenceSpec.isEmpty()) {
-        throw new SolrException(
-          SolrException.ErrorCode.BAD_REQUEST,
-          "preferLocalShards is deprecated and must not be used with shards.preference" 
-        );
-      }
-      List<String> preferenceRules = StrUtils.splitSmart(shardsPreferenceSpec, ',');
-      if (preferLocalShards) {
-        preferenceRules.add(ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION + ":" + ShardParams.REPLICA_LOCAL);
-      }
-
-      return new ShufflingReplicaListTransformer(r) {
-        @Override
-        public void transform(List<?> choices)
-        {
-          if (choices.size() > 1) {
-            super.transform(choices);
-            if (log.isDebugEnabled()) {
-              log.debug("Applying the following sorting preferences to replicas: {}",
-                  Arrays.toString(preferenceRules.toArray()));
-            }
-            try {
-              choices.sort(new NodePreferenceRulesComparator(preferenceRules, req));
-            } catch (IllegalArgumentException iae) {
-              throw new SolrException(
-                SolrException.ErrorCode.BAD_REQUEST,
-                iae.getMessage()
-              );
-            }
-            if (log.isDebugEnabled()) {
-              log.debug("Applied sorting preferences to replica list: {}",
-                  Arrays.toString(choices.toArray()));
-            }
-          }
-        }
-      };
-    }
-
-    return shufflingReplicaListTransformer;
-  }
-
-  /**
-   * Creates a new completion service for use by a single set of distributed requests.
-   */
-  public CompletionService newCompletionService() {
-    return new ExecutorCompletionService<ShardResponse>(commExecutor);
-  }
-  
-  /**
-   * Rebuilds the URL replacing the URL scheme of the passed URL with the
-   * configured scheme replacement.If no scheme was configured, the passed URL's
-   * scheme is left alone.
-   */
-  private String buildUrl(String url) {
-    if(!URLUtil.hasScheme(url)) {
-      return StringUtils.defaultIfEmpty(scheme, DEFAULT_SCHEME) + "://" + url;
-    } else if(StringUtils.isNotEmpty(scheme)) {
-      return scheme + "://" + URLUtil.removeScheme(url);
-    }
-    
-    return url;
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registry, String tag, String scope) {
-    this.metricTag = tag;
-    String expandedScope = SolrMetricManager.mkName(scope, SolrInfoBean.Category.QUERY.name());
-    clientConnectionManager.initializeMetrics(manager, registry, tag, expandedScope);
-    httpRequestExecutor.initializeMetrics(manager, registry, tag, expandedScope);
-    commExecutor = MetricUtils.instrumentedExecutorService(commExecutor, null,
-        manager.registry(registry),
-        SolrMetricManager.mkName("httpShardExecutor", expandedScope, "threadPool"));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java b/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
deleted file mode 100644
index 97d4199..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-import java.util.concurrent.ExecutorService;
-import java.util.List;
-import java.util.ArrayList;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.SolrjNamedThreadFactory;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.http.client.HttpClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-
-public abstract class IterativeMergeStrategy implements MergeStrategy  {
-
-  protected ExecutorService executorService;
-  protected static HttpClient httpClient;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public void merge(ResponseBuilder rb, ShardRequest sreq) {
-    rb._responseDocs = new SolrDocumentList(); // Null pointers will occur otherwise.
-    rb.onePassDistributedQuery = true;   // Turn off the second pass distributed.
-    executorService =     ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("IterativeMergeStrategy"));
-    try {
-      process(rb, sreq);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    } finally {
-      executorService.shutdownNow();
-    }
-  }
-
-  public boolean mergesIds() {
-    return true;
-  }
-
-  public int getCost() {
-    return 0;
-  }
-
-  public boolean handlesMergeFields() {
-    return false;
-  }
-
-  public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) {
-
-  }
-
-  public static class CallBack implements Callable<CallBack> {
-    private HttpSolrClient solrClient;
-    private QueryRequest req;
-    private QueryResponse response;
-    private ShardResponse originalShardResponse;
-
-    public CallBack(ShardResponse originalShardResponse, QueryRequest req) {
-
-      this.solrClient = new Builder(originalShardResponse.getShardAddress())
-          .withHttpClient(getHttpClient())
-          .build();
-      this.req = req;
-      this.originalShardResponse = originalShardResponse;
-      req.setMethod(SolrRequest.METHOD.POST);
-      ModifiableSolrParams params = (ModifiableSolrParams)req.getParams();
-      params.add(DISTRIB, "false");
-    }
-
-    public QueryResponse getResponse() {
-      return this.response;
-    }
-
-    public ShardResponse getOriginalShardResponse() {
-      return this.originalShardResponse;
-    }
-
-    public CallBack call() throws Exception{
-      this.response = req.process(solrClient);
-      return this;
-    }
-  }
-
-  public List<Future<CallBack>> callBack(List<ShardResponse> responses, QueryRequest req) {
-    List<Future<CallBack>> futures = new ArrayList();
-    for(ShardResponse response : responses) {
-      futures.add(this.executorService.submit(new CallBack(response, req)));
-    }
-    return futures;
-  }
-
-  public Future<CallBack> callBack(ShardResponse response, QueryRequest req) {
-    return this.executorService.submit(new CallBack(response, req));
-  }
-
-  protected abstract void process(ResponseBuilder rb, ShardRequest sreq) throws Exception;
-
-  static synchronized HttpClient getHttpClient() {
-
-      if(httpClient == null) {
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 128);
-        params.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 32);
-        httpClient = HttpClientUtil.createClient(params);
-        return httpClient;
-      } else {
-        return httpClient;
-      }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java b/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java
deleted file mode 100644
index 503dfb5..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.solr.search.SolrIndexSearcher;
-
-import java.util.Comparator;
-import java.io.IOException;
-
-/**
-* The MergeStrategy class defines custom merge logic for distributed searches.
-*
-*  <b>Note: This API is experimental and may change in non backward-compatible ways in the future</b>
-**/
-
-
-public interface MergeStrategy {
-
-  /**
-  *  merge defines the merging behaving of results that are collected from the
-  *  shards during a distributed search.
-  *
-  **/
-
-  public void merge(ResponseBuilder rb, ShardRequest sreq);
-
-  /**
-  * mergesIds must return true if the merge method merges document ids from the shards.
-  * If it merges other output from the shards it must return false.
-  * */
-
-  public boolean mergesIds();
-
-
-  /**
-  * handlesMergeFields must return true if the MergeStrategy
-  * implements a custom handleMergeFields(ResponseBuilder rb, SolrIndexSearch searcher)
-  * */
-
-  public boolean handlesMergeFields();
-
-
-  /**
-  *  Implement handleMergeFields(ResponseBuilder rb, SolrIndexSearch searcher) if
-  *  your merge strategy needs more complex data then the sort fields provide.
-  * */
-
-  public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException;
-
-  /**
-  *  Defines the order that the mergeStrategies are applied. Lower costs are applied first.
-  * */
-  public int getCost();
-
-  final Comparator MERGE_COMP = (o1, o2) -> {
-    MergeStrategy m1 = (MergeStrategy) o1;
-    MergeStrategy m2 = (MergeStrategy) o2;
-    return m1.getCost() - m2.getCost();
-  };
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
deleted file mode 100644
index fd9d37d..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.MoreLikeThisParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.handler.MoreLikeThisHandler;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocListAndSet;
-import org.apache.solr.search.ReturnFields;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SolrReturnFields;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.SORT;
-
-/**
- * TODO!
- * 
- * 
- * @since solr 1.3
- */
-public class MoreLikeThisComponent extends SearchComponent {
-  public static final String COMPONENT_NAME = "mlt";
-  public static final String DIST_DOC_ID = "mlt.dist.id";
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    if (rb.req.getParams().getBool(MoreLikeThisParams.MLT, false)) {
-      rb.setNeedDocList(true);
-    }
-    
-  }
-  
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-
-    SolrParams params = rb.req.getParams();
-    
-    if (params.getBool(MoreLikeThisParams.MLT, false)) {
-      ReturnFields returnFields = new SolrReturnFields( rb.req );
-  
-      int flags = 0;
-      if (returnFields.wantsScore()) {
-        flags |= SolrIndexSearcher.GET_SCORES;
-      }
-  
-      rb.setFieldFlags(flags);
-
-      log.debug("Starting MoreLikeThis.Process.  isShard: "
-          + params.getBool(ShardParams.IS_SHARD));
-      SolrIndexSearcher searcher = rb.req.getSearcher();
-
-      if (params.getBool(ShardParams.IS_SHARD, false)) {
-        if (params.get(MoreLikeThisComponent.DIST_DOC_ID) == null) {
-          if (rb.getResults().docList.size() == 0) {
-            // return empty response
-            rb.rsp.add("moreLikeThis", new NamedList<DocList>());
-            return;
-          }
-          
-          MoreLikeThisHandler.MoreLikeThisHelper mlt = new MoreLikeThisHandler.MoreLikeThisHelper(
-              params, searcher);
-
-          NamedList<BooleanQuery> bQuery = mlt.getMoreLikeTheseQuery(rb
-              .getResults().docList);
-          
-          NamedList<String> temp = new NamedList<>();
-          Iterator<Entry<String,BooleanQuery>> idToQueryIt = bQuery.iterator();
-
-          
-          while (idToQueryIt.hasNext()) {
-            Entry<String,BooleanQuery> idToQuery = idToQueryIt.next();
-            String s = idToQuery.getValue().toString();
-
-            log.debug("MLT Query:" + s);
-            temp.add(idToQuery.getKey(), idToQuery.getValue().toString());
-          }
-
-          rb.rsp.add("moreLikeThis", temp);
-        } else {
-          NamedList<DocList> sim = getMoreLikeThese(rb, rb.req.getSearcher(),
-              rb.getResults().docList, flags);
-          rb.rsp.add("moreLikeThis", sim);
-        }
-      } else {
-        // non distrib case
-        NamedList<DocList> sim = getMoreLikeThese(rb, rb.req.getSearcher(), rb.getResults().docList,
-            flags);
-        rb.rsp.add("moreLikeThis", sim);
-      }
-    }
-  }
-  
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0
-        && rb.req.getParams().getBool(COMPONENT_NAME, false)) {
-      log.debug("ShardRequest.response.size: " + sreq.responses.size());
-      for (ShardResponse r : sreq.responses) {
-        if (r.getException() != null) {
-          // This should only happen in case of using shards.tolerant=true. Omit this ShardResponse
-          continue;
-        }
-        NamedList<?> moreLikeThisReponse = (NamedList<?>) r.getSolrResponse()
-            .getResponse().get("moreLikeThis");
-        log.debug("ShardRequest.response.shard: " + r.getShard());
-        if (moreLikeThisReponse != null) {
-          for (Entry<String,?> entry : moreLikeThisReponse) {
-            log.debug("id: \"" + entry.getKey() + "\" Query: \""
-                + entry.getValue() + "\"");
-            ShardRequest s = buildShardQuery(rb, (String) entry.getValue(),
-                entry.getKey());
-            rb.addRequest(this, s);
-          }
-        }
-      }
-    }
-    
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) != 0) {
-      for (ShardResponse r : sreq.responses) {
-        log.debug("MLT Query returned: "
-            + r.getSolrResponse().getResponse().toString());
-      }
-    }
-  }
-  
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    
-    // Handling Responses in finishStage, because solrResponse will put
-    // moreLikeThis xml
-    // segment ahead of result/response.
-    if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS
-        && rb.req.getParams().getBool(COMPONENT_NAME, false)) {
-      Map<Object,SolrDocumentList> tempResults = new LinkedHashMap<>();
-      
-      int mltcount = rb.req.getParams().getInt(MoreLikeThisParams.DOC_COUNT, MoreLikeThisParams.DEFAULT_DOC_COUNT);
-      String keyName = rb.req.getSchema().getUniqueKeyField().getName();
-      
-      for (ShardRequest sreq : rb.finished) {
-        if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) != 0) {
-          for (ShardResponse r : sreq.responses) {
-            log.debug("ShardRequest.response.shard: " + r.getShard());
-            String key = r.getShardRequest().params
-                .get(MoreLikeThisComponent.DIST_DOC_ID);
-            SolrDocumentList shardDocList =  (SolrDocumentList) r.getSolrResponse().getResponse().get("response");
-
-            if (shardDocList == null) {
-              continue;
-            }
- 
-            log.info("MLT: results added for key: " + key + " documents: "
-                + shardDocList.toString());
-//            if (log.isDebugEnabled()) {
-//              for (SolrDocument doc : shardDocList) {
-//                doc.addField("shard", "=" + r.getShard());
-//              }
-//            }
-            SolrDocumentList mergedDocList = tempResults.get(key);
- 
-            if (mergedDocList == null) {
-              mergedDocList = new SolrDocumentList();
-              mergedDocList.addAll(shardDocList);
-              mergedDocList.setNumFound(shardDocList.getNumFound());
-              mergedDocList.setStart(shardDocList.getStart());
-              mergedDocList.setMaxScore(shardDocList.getMaxScore());
-            } else {
-              mergedDocList = mergeSolrDocumentList(mergedDocList,
-                  shardDocList, mltcount, keyName);
-            }
-            log.debug("Adding docs for key: " + key);
-            tempResults.put(key, mergedDocList);
-          }
-        }
-      }
-
-      NamedList<SolrDocumentList> list = buildMoreLikeThisNamed(tempResults,
-          rb.resultIds);
-     
-      rb.rsp.add("moreLikeThis", list);
-      
-    }
-    super.finishStage(rb);
-  }
-
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) return;
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) == 0
-        && (sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) == 0) {
-      sreq.params.set(COMPONENT_NAME, "false");
-    }
-  }
-
-  /**
-   * Returns NamedList based on the order of
-   * resultIds.shardDoc.positionInResponse
-   */
-  NamedList<SolrDocumentList> buildMoreLikeThisNamed(
-      Map<Object,SolrDocumentList> allMlt, Map<Object,ShardDoc> resultIds) {
-    NamedList<SolrDocumentList> result = new NamedList<>();
-    TreeMap<Integer,Object> sortingMap = new TreeMap<>();
-    for (Entry<Object,ShardDoc> next : resultIds.entrySet()) {
-      sortingMap.put(next.getValue().positionInResponse, next.getKey());
-    }
-    for (Object key : sortingMap.values()) {
-      SolrDocumentList sdl = allMlt.get(key);
-      if (sdl == null) {
-        sdl = new SolrDocumentList();
-        sdl.setNumFound(0);
-        sdl.setStart(0);
-      }
-      result.add(key.toString(), sdl);
-    }
-    return result;
-  }
-  
-  public SolrDocumentList mergeSolrDocumentList(SolrDocumentList one,
-      SolrDocumentList two, int maxSize, String idField) {
-
-    List<SolrDocument> l = new ArrayList<>();
-    
-    // De-dup records sets. Shouldn't happen if indexed correctly.
-    Map<String,SolrDocument> map = new HashMap<>();
-    for (SolrDocument doc : one) {
-      Object id = doc.getFieldValue(idField);
-      assert id != null : doc.toString();
-      map.put(id.toString(), doc);
-    }
-    for (SolrDocument doc : two) {
-      map.put(doc.getFieldValue(idField).toString(), doc);
-    }
-    
-    l = new ArrayList<>(map.values());
-    
-    // Comparator to sort docs based on score. null scores/docs are set to 0.
-    
-    // hmm...we are ordering by scores that are not really comparable...
-    Comparator<SolrDocument> c = new Comparator<SolrDocument>() {
-      public int compare(SolrDocument o1, SolrDocument o2) {
-        Float f1 = getFloat(o1);
-        Float f2 = getFloat(o2);
-        return f2.compareTo(f1);
-      }
-      
-      private Float getFloat(SolrDocument doc) {
-        Float f = 0f;
-        if (doc != null) {
-          Object o = doc.getFieldValue("score");
-          if (o != null && o instanceof Float) {
-            f = (Float) o;
-          }
-        }
-        return f;
-      }
-    };
-    
-    Collections.sort(l, c);
-    
-    // Truncate list to maxSize
-    if (l.size() > maxSize) {
-      l = l.subList(0, maxSize);
-    }
-    
-    // Create SolrDocumentList Attributes from originals
-    SolrDocumentList result = new SolrDocumentList();
-    result.addAll(l);
-    result.setMaxScore(Math.max(one.getMaxScore(), two.getMaxScore()));
-    result.setNumFound(one.getNumFound() + two.getNumFound());
-    result.setStart(Math.min(one.getStart(), two.getStart()));
-
-    return result;
-  }
-  
-  ShardRequest buildShardQuery(ResponseBuilder rb, String q, String key) {
-    ShardRequest s = new ShardRequest();
-    s.params = new ModifiableSolrParams(rb.req.getParams());
-    s.purpose |= ShardRequest.PURPOSE_GET_MLT_RESULTS;
-    // Maybe unnecessary, but safe.
-    s.purpose |= ShardRequest.PURPOSE_PRIVATE;
-    
-    s.params.remove(ShardParams.SHARDS);
-    // s.params.remove(MoreLikeThisComponent.COMPONENT_NAME);
-    
-    // needed to correlate results
-    s.params.set(MoreLikeThisComponent.DIST_DOC_ID, key);
-    s.params.set(CommonParams.START, 0);
-    int mltcount = s.params.getInt(MoreLikeThisParams.DOC_COUNT, 20); // overrequest
-    s.params.set(CommonParams.ROWS, mltcount);
-    
-    // adding score to rank moreLikeThis
-    s.params.remove(CommonParams.FL);
-    
-    // Should probably add something like this:
-    // String fl = s.params.get(MoreLikeThisParams.RETURN_FL, "*");
-    // if(fl != null){
-    // s.params.set(CommonParams.FL, fl + ",score");
-    // }
-    String id = rb.req.getSchema().getUniqueKeyField()
-    .getName();
-    s.params.set(CommonParams.FL, "score," + id);
-    s.params.set(SORT, "score desc");
-    // MLT Query is submitted as normal query to shards.
-    s.params.set(CommonParams.Q, q);
-    
-    return s;
-  }
-  
-  ShardRequest buildMLTQuery(ResponseBuilder rb, String q) {
-    ShardRequest s = new ShardRequest();
-    s.params = new ModifiableSolrParams();
-    
-
-    s.params.set(CommonParams.START, 0);
-
-    String id = rb.req.getSchema().getUniqueKeyField().getName();
-
-    s.params.set(CommonParams.FL, "score," + id);
-    // MLT Query is submitted as normal query to shards.
-    s.params.set(CommonParams.Q, q);
-    
-    s.shards = ShardRequest.ALL_SHARDS;
-    return s;
-  }
-  
-  NamedList<DocList> getMoreLikeThese(ResponseBuilder rb,
-      SolrIndexSearcher searcher, DocList docs, int flags) throws IOException {
-    SolrParams p = rb.req.getParams();
-    IndexSchema schema = searcher.getSchema();
-    MoreLikeThisHandler.MoreLikeThisHelper mltHelper = new MoreLikeThisHandler.MoreLikeThisHelper(
-        p, searcher);
-    NamedList<DocList> mlt = new SimpleOrderedMap<>();
-    DocIterator iterator = docs.iterator();
-    
-    SimpleOrderedMap<Object> dbg = null;
-    if (rb.isDebug()) {
-      dbg = new SimpleOrderedMap<>();
-    }
-    
-    while (iterator.hasNext()) {
-      int id = iterator.nextDoc();
-      int rows = p.getInt(MoreLikeThisParams.DOC_COUNT, 5);
-      DocListAndSet sim = mltHelper.getMoreLikeThis(id, 0, rows, null, null,
-          flags);
-      String name = schema.printableUniqueKey(searcher.doc(id));
-      mlt.add(name, sim.docList);
-      
-      if (dbg != null) {
-        SimpleOrderedMap<Object> docDbg = new SimpleOrderedMap<>();
-        docDbg.add("rawMLTQuery", mltHelper.getRawMLTQuery().toString());
-        docDbg
-            .add("boostedMLTQuery", mltHelper.getBoostedMLTQuery().toString());
-        docDbg.add("realMLTQuery", mltHelper.getRealMLTQuery().toString());
-        SimpleOrderedMap<Object> explains = new SimpleOrderedMap<>();
-        DocIterator mltIte = sim.docList.iterator();
-        while (mltIte.hasNext()) {
-          int mltid = mltIte.nextDoc();
-          String key = schema.printableUniqueKey(searcher.doc(mltid));
-          explains.add(key,
-              searcher.explain(mltHelper.getRealMLTQuery(), mltid));
-        }
-        docDbg.add("explain", explains);
-        dbg.add(name, docDbg);
-      }
-    }
-    
-    // add debug information
-    if (dbg != null) {
-      rb.addDebugInfo("moreLikeThis", dbg);
-    }
-    return mlt;
-  }
-  
-  // ///////////////////////////////////////////
-  // / SolrInfoBean
-  // //////////////////////////////////////////
-  
-  @Override
-  public String getDescription() {
-    return "More Like This";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-}


[29/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
deleted file mode 100644
index 40e65b7..0000000
--- a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDeletionPolicy;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
-import org.apache.solr.update.SolrIndexWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A wrapper for an IndexDeletionPolicy instance.
- * <p>
- * Provides features for looking up IndexCommit given a version. Allows reserving index
- * commit points for certain amounts of time to support features such as index replication
- * or snapshooting directly out of a live index directory.
- * <p>
- * <b>NOTE</b>: The {@link #clone()} method returns <tt>this</tt> in order to make
- * this {@link IndexDeletionPolicy} instance trackable across {@link IndexWriter}
- * instantiations. This is correct because each core has its own
- * {@link IndexDeletionPolicy} and never has more than one open {@link IndexWriter}.
- *
- * @see org.apache.lucene.index.IndexDeletionPolicy
- */
-public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private final IndexDeletionPolicy deletionPolicy;
-  private volatile Map<Long, IndexCommit> solrVersionVsCommits = new ConcurrentHashMap<>();
-  private final Map<Long, Long> reserves = new ConcurrentHashMap<>();
-  private volatile IndexCommit latestCommit;
-  private final ConcurrentHashMap<Long, AtomicInteger> savedCommits = new ConcurrentHashMap<>();
-  private final SolrSnapshotMetaDataManager snapshotMgr;
-
-  public IndexDeletionPolicyWrapper(IndexDeletionPolicy deletionPolicy, SolrSnapshotMetaDataManager snapshotMgr) {
-    this.deletionPolicy = deletionPolicy;
-    this.snapshotMgr = snapshotMgr;
-  }
-
-  /**
-   * Gets the most recent commit point
-   * <p>
-   * It is recommended to reserve a commit point for the duration of usage so that
-   * it is not deleted by the underlying deletion policy
-   *
-   * @return the most recent commit point
-   */
-  public IndexCommit getLatestCommit() {
-    return latestCommit;
-  }
-
-  public IndexDeletionPolicy getWrappedDeletionPolicy() {
-    return deletionPolicy;
-  }
-
-  /**
-   * Set the duration for which commit point is to be reserved by the deletion policy.
-   *
-   * @param indexGen gen of the commit point to be reserved
-   * @param reserveTime  time in milliseconds for which the commit point is to be reserved
-   */
-  public void setReserveDuration(Long indexGen, long reserveTime) {
-    long timeToSet = System.nanoTime() + TimeUnit.NANOSECONDS.convert(reserveTime, TimeUnit.MILLISECONDS);
-    for(;;) {
-      Long previousTime = reserves.put(indexGen, timeToSet);
-
-      // this is the common success case: the older time didn't exist, or
-      // came before the new time.
-      if (previousTime == null || previousTime <= timeToSet) {
-        log.debug("Commit point reservation for generation {} set to {} (requested reserve time of {})",
-            indexGen, timeToSet, reserveTime);
-        break;
-      }
-
-      // At this point, we overwrote a longer reservation, so we want to restore the older one.
-      // the problem is that an even longer reservation may come in concurrently
-      // and we don't want to overwrite that one too.  We simply keep retrying in a loop
-      // with the maximum time value we have seen.
-      timeToSet = previousTime;      
-    }
-  }
-
-  private void cleanReserves() {
-    long currentTime = System.nanoTime();
-    for (Map.Entry<Long, Long> entry : reserves.entrySet()) {
-      if (entry.getValue() < currentTime) {
-        reserves.remove(entry.getKey());
-      }
-    }
-  }
-
-  private List<IndexCommitWrapper> wrap(List<? extends IndexCommit> list) {
-    List<IndexCommitWrapper> result = new ArrayList<>();
-    for (IndexCommit indexCommit : list) result.add(new IndexCommitWrapper(indexCommit));
-    return result;
-  }
-
-  /** Permanently prevent this commit point from being deleted.
-   * A counter is used to allow a commit point to be correctly saved and released
-   * multiple times. */
-  public synchronized void saveCommitPoint(Long indexCommitGen) {
-    AtomicInteger reserveCount = savedCommits.get(indexCommitGen);
-    if (reserveCount == null) reserveCount = new AtomicInteger();
-    reserveCount.incrementAndGet();
-    savedCommits.put(indexCommitGen, reserveCount);
-  }
-
-  /** Release a previously saved commit point */
-  public synchronized void releaseCommitPoint(Long indexCommitGen) {
-    AtomicInteger reserveCount = savedCommits.get(indexCommitGen);
-    if (reserveCount == null) return;// this should not happen
-    if (reserveCount.decrementAndGet() <= 0) {
-      savedCommits.remove(indexCommitGen);
-    }
-  }
-
-  /**
-   * Internal use for Lucene... do not explicitly call.
-   */
-  @Override
-  public void onInit(List<? extends IndexCommit> list) throws IOException {
-    List<IndexCommitWrapper> wrapperList = wrap(list);
-    deletionPolicy.onInit(wrapperList);
-    updateCommitPoints(wrapperList);
-    cleanReserves();
-  }
-
-  /**
-   * Internal use for Lucene... do not explicitly call.
-   */
-  @Override
-  public void onCommit(List<? extends IndexCommit> list) throws IOException {
-    List<IndexCommitWrapper> wrapperList = wrap(list);
-    deletionPolicy.onCommit(wrapperList);
-    updateCommitPoints(wrapperList);
-    cleanReserves();
-  }
-
-  private class IndexCommitWrapper extends IndexCommit {
-    IndexCommit delegate;
-
-
-    IndexCommitWrapper(IndexCommit delegate) {
-      this.delegate = delegate;
-    }
-
-    @Override
-    public String getSegmentsFileName() {
-      return delegate.getSegmentsFileName();
-    }
-
-    @Override
-    public Collection getFileNames() throws IOException {
-      return delegate.getFileNames();
-    }
-
-    @Override
-    public Directory getDirectory() {
-      return delegate.getDirectory();
-    }
-
-    @Override
-    public void delete() {
-      Long gen = delegate.getGeneration();
-      Long reserve = reserves.get(gen);
-      if (reserve != null && System.nanoTime() < reserve) return;
-      if (savedCommits.containsKey(gen)) return;
-      if (snapshotMgr.isSnapshotted(gen)) return;
-      delegate.delete();
-    }
-
-    @Override
-    public int getSegmentCount() {
-      return delegate.getSegmentCount();
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      return delegate.equals(o);
-    }
-
-    @Override
-    public int hashCode() {
-      return delegate.hashCode();
-    }
-
-    @Override
-    public long getGeneration() {
-      return delegate.getGeneration();
-    }
-
-    @Override
-    public boolean isDeleted() {
-      return delegate.isDeleted();
-    }
-
-    @Override
-    public Map getUserData() throws IOException {
-      return delegate.getUserData();
-    }    
-  }
-
-  /**
-   * @param gen the gen of the commit point
-   * @return a commit point corresponding to the given version
-   */
-  public IndexCommit getCommitPoint(Long gen) {
-    return solrVersionVsCommits.get(gen);
-  }
-
-  /**
-   * Gets the commit points for the index.
-   * This map instance may change between commits and commit points may be deleted.
-   * It is recommended to reserve a commit point for the duration of usage
-   *
-   * @return a Map of version to commit points
-   */
-  public Map<Long, IndexCommit> getCommits() {
-    return solrVersionVsCommits;
-  }
-
-  private void updateCommitPoints(List<IndexCommitWrapper> list) {
-    Map<Long, IndexCommit> map = new ConcurrentHashMap<>();
-    for (IndexCommitWrapper wrapper : list) {
-      if (!wrapper.isDeleted())
-        map.put(wrapper.delegate.getGeneration(), wrapper.delegate);
-    }
-    solrVersionVsCommits = map;
-    if (!list.isEmpty()) {
-      latestCommit = ((list.get(list.size() - 1)).delegate);
-    }
-  }
-
-  public static long getCommitTimestamp(IndexCommit commit) throws IOException {
-    final Map<String,String> commitData = commit.getUserData();
-    String commitTime = commitData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
-    if (commitTime != null) {
-      return Long.parseLong(commitTime);
-    } else {
-      return 0;
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/IndexReaderFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/IndexReaderFactory.java b/solr/core/src/java/org/apache/solr/core/IndexReaderFactory.java
deleted file mode 100644
index 03e73b7..0000000
--- a/solr/core/src/java/org/apache/solr/core/IndexReaderFactory.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.IOException;
-
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-
-/**
- * Factory used to build a new IndexReader instance.
- */
-public abstract class IndexReaderFactory implements NamedListInitializedPlugin {
-  /**
-   * init will be called just once, immediately after creation.
-   * <p>
-   * The args are user-level initialization parameters that may be specified
-   * when declaring an indexReaderFactory in solrconfig.xml
-   *
-   */
-  @Override
-  public void init(NamedList args) {
-   Object v = args.get("setTermIndexDivisor");
-   if (v != null) {
-     throw new IllegalArgumentException("Illegal parameter 'setTermIndexDivisor'");
-   }
-  }
-
-  /**
-   * Creates a new IndexReader instance using the given Directory.
-   * 
-   * @param indexDir indexDir index location
-   * @param core {@link SolrCore} instance where this reader will be used. NOTE:
-   * this SolrCore instance may not be fully configured yet, but basic things like
-   * {@link SolrCore#getCoreDescriptor()}, {@link SolrCore#getLatestSchema()} and
-   * {@link SolrCore#getSolrConfig()} are valid.
-   * @return An IndexReader instance
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract DirectoryReader newReader(Directory indexDir, SolrCore core)
-      throws IOException;
-  
-  /**
-   * Creates a new IndexReader instance using the given IndexWriter.
-   * <p>
-   * This is used for opening the initial reader in NRT mode
-   *
-   * @param writer IndexWriter
-   * @param core {@link SolrCore} instance where this reader will be used. NOTE:
-   * this SolrCore instance may not be fully configured yet, but basic things like
-   * {@link SolrCore#getCoreDescriptor()}, {@link SolrCore#getLatestSchema()} and
-   * {@link SolrCore#getSolrConfig()} are valid.
-   * @return An IndexReader instance
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public abstract DirectoryReader newReader(IndexWriter writer, SolrCore core)
-      throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/InitParams.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/InitParams.java b/solr/core/src/java/org/apache/solr/core/InitParams.java
deleted file mode 100644
index 031c7b7..0000000
--- a/solr/core/src/java/org/apache/solr/core/InitParams.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.ImmutableSet;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.params.CommonParams.PATH;
-import static org.apache.solr.core.PluginInfo.APPENDS;
-import static org.apache.solr.core.PluginInfo.DEFAULTS;
-import static org.apache.solr.core.PluginInfo.INVARIANTS;
-
-/**
- * An Object which represents a {@code <initParams>} tag
- */
-public class InitParams {
-  public static final String TYPE = "initParams";
-  public final String name;
-  public final Set<String> paths;
-  public final NamedList defaults, invariants, appends;
-  private PluginInfo pluginInfo;
-  private final Set<String> KNOWN_KEYS = ImmutableSet.of(DEFAULTS, INVARIANTS, APPENDS);
-
-  public InitParams(PluginInfo p) {
-    this.pluginInfo = p;
-    this.name = p.attributes.get(NAME);
-    Set<String> paths = null;
-    String pathStr = p.attributes.get(PATH);
-    if (pathStr != null) {
-      paths = Collections.unmodifiableSet(new HashSet<>(StrUtils.splitSmart(pathStr, ',')));
-    }
-    this.paths = paths;
-    NamedList nl = (NamedList) p.initArgs.get(DEFAULTS);
-    defaults = nl == null ? null : nl.getImmutableCopy();
-    nl = (NamedList) p.initArgs.get(INVARIANTS);
-    invariants = nl == null ? null : nl.getImmutableCopy();
-    nl = (NamedList) p.initArgs.get(APPENDS);
-    appends = nl == null ? null : nl.getImmutableCopy();
-  }
-
-  public boolean matchPath(String name) {
-    if (paths == null) return false;
-    if (paths.contains(name)) return true;
-
-    for (String path : paths) {
-      if (matchPath(path, name)) return true;
-    }
-
-    return false;
-  }
-
-  private static boolean matchPath(String path, String name) {
-    List<String> pathSplit = StrUtils.splitSmart(path, '/');
-    List<String> nameSplit = StrUtils.splitSmart(name, '/');
-    int i = 0;
-    for (; i < nameSplit.size(); i++) {
-      String s = nameSplit.get(i);
-      String ps = pathSplit.size() > i ? pathSplit.get(i) : null;
-      if (ps == null) return false;
-      if (s.equals(ps)) continue;
-      if ("*".equals(ps) && nameSplit.size() == i + 1) return true;
-      if ("**".equals(ps)) return true;
-      return false;
-    }
-    String ps = pathSplit.size() > i ? pathSplit.get(i) : null;
-    return "*".equals(ps) || "**".equals(ps);
-
-  }
-
-  public void apply(PluginInfo info) {
-    if (!info.isFromSolrConfig()) {
-      //if this is a component implicitly defined in code it should be overridden by initPrams
-      merge(defaults, (NamedList) info.initArgs.get(DEFAULTS), info.initArgs, DEFAULTS, false);
-    } else {
-      //if the args is initialized from solrconfig.xml inside the requestHandler it should be taking precedence over  initParams
-      merge((NamedList) info.initArgs.get(DEFAULTS), defaults, info.initArgs, DEFAULTS, false);
-    }
-    merge((NamedList) info.initArgs.get(INVARIANTS), invariants, info.initArgs, INVARIANTS, false);
-    merge((NamedList) info.initArgs.get(APPENDS), appends, info.initArgs, APPENDS, true);
-
-    if (pluginInfo.initArgs != null) {
-      for (int i = 0; i < pluginInfo.initArgs.size(); i++) {
-        String name = pluginInfo.initArgs.getName(i);
-        if (KNOWN_KEYS.contains(name)) continue;//already taken care of
-        Object val = info.initArgs.get(name);
-        if (val != null) continue; //this is explicitly specified in the reqhandler , ignore
-        info.initArgs.add(name, pluginInfo.initArgs.getVal(i));
-      }
-    }
-  }
-
-  private static void merge(NamedList first, NamedList second, NamedList sink, String name, boolean appends) {
-    if (first == null && second == null) return;
-    if (first == null) first = new NamedList();
-    NamedList nl = first.clone();
-    if (appends) {
-      if (second != null) nl.addAll(second);
-    } else {
-      Set<String> a = new HashSet<>();
-      Set<String> b = new HashSet<>();
-      for (Object o : first) {
-        Map.Entry<String, Object> e = (Map.Entry) o;
-        a.add(e.getKey());
-      }
-      if (second != null) {
-        for (Object o : second) {
-          Map.Entry<String, Object> e = (Map.Entry) o;
-          b.add(e.getKey());
-        }
-      }
-      for (String s : b) {
-        if (a.contains(s)) continue;
-        for (Object v : second.getAll(s)) nl.add(s, v);
-      }
-    }
-    if (sink.indexOf(name, 0) > -1) {
-      sink.setVal(sink.indexOf(name, 0), nl);
-    } else {
-      sink.add(name, nl);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/MMapDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/MMapDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/MMapDirectoryFactory.java
deleted file mode 100644
index 0c1875b..0000000
--- a/solr/core/src/java/org/apache/solr/core/MMapDirectoryFactory.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.File;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockFactory; // javadocs
-import org.apache.lucene.store.MMapDirectory;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Directly provide MMapDirectory instead of relying on {@link org.apache.lucene.store.FSDirectory#open}.
- * <p>
- * Can set the following parameters:
- * <ul>
- *  <li>unmap -- See {@link MMapDirectory#setUseUnmap(boolean)}</li>
- *  <li>preload -- See {@link MMapDirectory#setPreload(boolean)}</li>
- *  <li>maxChunkSize -- The Max chunk size.  See {@link MMapDirectory#MMapDirectory(Path, LockFactory, int)}</li>
- * </ul>
- *
- **/
-public class MMapDirectoryFactory extends StandardDirectoryFactory {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  boolean unmapHack;
-  boolean preload;
-  private int maxChunk;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    SolrParams params = args.toSolrParams();
-    maxChunk = params.getInt("maxChunkSize", MMapDirectory.DEFAULT_MAX_CHUNK_SIZE);
-    if (maxChunk <= 0){
-      throw new IllegalArgumentException("maxChunk must be greater than 0");
-    }
-    unmapHack = params.getBool("unmap", true);
-    preload = params.getBool("preload", false); //default turn-off
-  }
-
-  @Override
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    // we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
-    MMapDirectory mapDirectory = new MMapDirectory(new File(path).toPath(), lockFactory, maxChunk);
-    try {
-      mapDirectory.setUseUnmap(unmapHack);
-    } catch (IllegalArgumentException e) {
-      log.warn("Unmap not supported on this JVM, continuing on without setting unmap", e);
-    }
-    mapDirectory.setPreload(preload);
-    return mapDirectory;
-  }
-  
-  @Override
-  public boolean isAbsolute(String path) {
-    return new File(path).isAbsolute();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/MemClassLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/MemClassLoader.java b/solr/core/src/java/org/apache/solr/core/MemClassLoader.java
deleted file mode 100644
index e4f561f..0000000
--- a/solr/core/src/java/org/apache/solr/core/MemClassLoader.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.nio.ByteBuffer;
-import java.security.CodeSource;
-import java.security.ProtectionDomain;
-import java.security.cert.Certificate;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.analysis.util.ResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-public class MemClassLoader extends ClassLoader implements AutoCloseable, ResourceLoader {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private boolean allJarsLoaded = false;
-  private final SolrResourceLoader parentLoader;
-  private List<PluginBag.RuntimeLib> libs = new ArrayList<>();
-  private Map<String, Class> classCache = new HashMap<>();
-
-
-  public MemClassLoader(List<PluginBag.RuntimeLib> libs, SolrResourceLoader resourceLoader) {
-    this.parentLoader = resourceLoader;
-    this.libs = libs;
-  }
-
-
-  public synchronized void loadJars() {
-    if (allJarsLoaded) return;
-
-    for (PluginBag.RuntimeLib lib : libs) {
-      try {
-        lib.loadJar();
-        lib.verify();
-      } catch (Exception exception) {
-        if (exception instanceof SolrException) throw (SolrException) exception;
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Atleast one runtimeLib could not be loaded", exception);
-      }
-    }
-    allJarsLoaded = true;
-  }
-
-
-  @Override
-  protected Class<?> findClass(String name) throws ClassNotFoundException {
-    if(!allJarsLoaded ) loadJars();
-    try {
-      return parentLoader.findClass(name, Object.class);
-    } catch (Exception e) {
-      return loadFromRuntimeLibs(name);
-    }
-  }
-
-  private synchronized  Class<?> loadFromRuntimeLibs(String name) throws ClassNotFoundException {
-    Class result = classCache.get(name);
-    if(result != null)
-      return result;
-    AtomicReference<String> jarName = new AtomicReference<>();
-    ByteBuffer buf = null;
-    try {
-      buf = getByteBuffer(name, jarName);
-    } catch (Exception e) {
-      throw new ClassNotFoundException("class could not be loaded " + name, e);
-    }
-    if (buf == null) throw new ClassNotFoundException("Class not found :" + name);
-    ProtectionDomain defaultDomain = null;
-    //using the default protection domain, with no permissions
-    try {
-      defaultDomain = new ProtectionDomain(new CodeSource(new URL("http://localhost/" + CollectionAdminParams.SYSTEM_COLL + "/blob/" + jarName.get()), (Certificate[]) null),
-          null);
-    } catch (MalformedURLException mue) {
-      throw new ClassNotFoundException("Unexpected exception ", mue);
-      //should not happen
-    }
-    log.info("Defining_class {} from runtime jar {} ", name, jarName);
-
-    result = defineClass(name, buf.array(), buf.arrayOffset(), buf.limit(), defaultDomain);
-    classCache.put(name, result);
-    return result;
-  }
-
-  private ByteBuffer getByteBuffer(String name, AtomicReference<String> jarName) throws Exception {
-    if (!allJarsLoaded) {
-      loadJars();
-
-    }
-
-    String path = name.replace('.', '/').concat(".class");
-    ByteBuffer buf = null;
-    for (PluginBag.RuntimeLib lib : libs) {
-      try {
-        buf = lib.getFileContent(path);
-        if (buf != null) {
-          jarName.set(lib.getName());
-          break;
-        }
-      } catch (Exception exp) {
-        throw new ClassNotFoundException("Unable to load class :" + name, exp);
-      }
-    }
-
-    return buf;
-  }
-
-  @Override
-  public void close() throws Exception {
-    for (PluginBag.RuntimeLib lib : libs) {
-      try {
-        lib.close();
-      } catch (Exception e) {
-      }
-    }
-  }
-
-  @Override
-  public InputStream openResource(String resource) throws IOException {
-    AtomicReference<String> jarName = new AtomicReference<>();
-    try {
-      ByteBuffer buf = getByteBuffer(resource, jarName);
-      if (buf == null) throw new IOException("Resource could not be found " + resource);
-    } catch (Exception e) {
-      throw new IOException("Resource could not be found " + resource, e);
-    }
-    return null;
-  }
-
-  @Override
-  public <T> Class<? extends T> findClass(String cname, Class<T> expectedType) {
-    if(!allJarsLoaded ) loadJars();
-    try {
-      return findClass(cname).asSubclass(expectedType);
-    } catch (Exception e) {
-      if (e instanceof SolrException) {
-        throw (SolrException) e;
-      } else {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "error loading class " + cname, e);
-      }
-    }
-
-  }
-
-  @Override
-  public <T> T newInstance(String cname, Class<T> expectedType) {
-    try {
-      return findClass(cname, expectedType).newInstance();
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "error instantiating class :" + cname, e);
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/MetricsConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/MetricsConfig.java b/solr/core/src/java/org/apache/solr/core/MetricsConfig.java
deleted file mode 100644
index fab2553..0000000
--- a/solr/core/src/java/org/apache/solr/core/MetricsConfig.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- *
- */
-public class MetricsConfig {
-
-  private final PluginInfo[] metricReporters;
-  private final Set<String> hiddenSysProps;
-  private final PluginInfo counterSupplier;
-  private final PluginInfo meterSupplier;
-  private final PluginInfo timerSupplier;
-  private final PluginInfo histogramSupplier;
-  private final PluginInfo historyHandler;
-
-  private MetricsConfig(PluginInfo[] metricReporters, Set<String> hiddenSysProps,
-                        PluginInfo counterSupplier, PluginInfo meterSupplier,
-                        PluginInfo timerSupplier, PluginInfo histogramSupplier,
-                        PluginInfo historyHandler) {
-    this.metricReporters = metricReporters;
-    this.hiddenSysProps = hiddenSysProps;
-    this.counterSupplier = counterSupplier;
-    this.meterSupplier = meterSupplier;
-    this.timerSupplier = timerSupplier;
-    this.histogramSupplier = histogramSupplier;
-    this.historyHandler = historyHandler;
-  }
-
-  public PluginInfo[] getMetricReporters() {
-    return metricReporters;
-  }
-
-  public Set<String> getHiddenSysProps() {
-    return hiddenSysProps;
-  }
-
-  public PluginInfo getCounterSupplier() {
-    return counterSupplier;
-  }
-
-  public PluginInfo getMeterSupplier() {
-    return meterSupplier;
-  }
-
-  public PluginInfo getTimerSupplier() {
-    return timerSupplier;
-  }
-
-  public PluginInfo getHistogramSupplier() {
-    return histogramSupplier;
-  }
-
-  public PluginInfo getHistoryHandler() {
-    return historyHandler;
-  }
-
-  public static class MetricsConfigBuilder {
-    private PluginInfo[] metricReporterPlugins = new PluginInfo[0];
-    private Set<String> hiddenSysProps = new HashSet<>();
-    private PluginInfo counterSupplier;
-    private PluginInfo meterSupplier;
-    private PluginInfo timerSupplier;
-    private PluginInfo histogramSupplier;
-    private PluginInfo historyHandler;
-
-    public MetricsConfigBuilder() {
-
-    }
-
-    public MetricsConfigBuilder setHiddenSysProps(Set<String> hiddenSysProps) {
-      if (hiddenSysProps != null && !hiddenSysProps.isEmpty()) {
-        this.hiddenSysProps.clear();
-        this.hiddenSysProps.addAll(hiddenSysProps);
-      }
-      return this;
-    }
-
-    public MetricsConfigBuilder setMetricReporterPlugins(PluginInfo[] metricReporterPlugins) {
-      this.metricReporterPlugins = metricReporterPlugins != null ? metricReporterPlugins : new PluginInfo[0];
-      return this;
-    }
-
-    public MetricsConfigBuilder setCounterSupplier(PluginInfo info) {
-      this.counterSupplier = info;
-      return this;
-    }
-
-    public MetricsConfigBuilder setMeterSupplier(PluginInfo info) {
-      this.meterSupplier = info;
-      return this;
-    }
-
-    public MetricsConfigBuilder setTimerSupplier(PluginInfo info) {
-      this.timerSupplier = info;
-      return this;
-    }
-
-    public MetricsConfigBuilder setHistogramSupplier(PluginInfo info) {
-      this.histogramSupplier = info;
-      return this;
-    }
-
-    public MetricsConfigBuilder setHistoryHandler(PluginInfo info) {
-      this.historyHandler = info;
-      return this;
-    }
-
-    public MetricsConfig build() {
-      return new MetricsConfig(metricReporterPlugins, hiddenSysProps, counterSupplier, meterSupplier,
-          timerSupplier, histogramSupplier, historyHandler);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/NIOFSDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/NIOFSDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/NIOFSDirectoryFactory.java
deleted file mode 100644
index 459b12e..0000000
--- a/solr/core/src/java/org/apache/solr/core/NIOFSDirectoryFactory.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.NIOFSDirectory;
-
-
-/**
- * Factory to instantiate {@link org.apache.lucene.store.NIOFSDirectory}
- *
- **/
-public class NIOFSDirectoryFactory extends StandardDirectoryFactory {
-
-  @Override
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    // we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
-    return new NIOFSDirectory(new File(path).toPath(), lockFactory);
-  }
-  
-  @Override
-  public boolean isAbsolute(String path) {
-    return new File(path).isAbsolute();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/NRTCachingDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/NRTCachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/NRTCachingDirectoryFactory.java
deleted file mode 100644
index 789ffdb..0000000
--- a/solr/core/src/java/org/apache/solr/core/NRTCachingDirectoryFactory.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.NRTCachingDirectory;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-
-/**
- * Factory to instantiate {@link org.apache.lucene.store.NRTCachingDirectory}
- */
-public class NRTCachingDirectoryFactory extends StandardDirectoryFactory {
-  public static final int DEFAULT_MAX_MERGE_SIZE_MB = 4;
-  private double maxMergeSizeMB = DEFAULT_MAX_MERGE_SIZE_MB;
-  public static final int DEFAULT_MAX_CACHED_MB = 48;
-  private double maxCachedMB = DEFAULT_MAX_CACHED_MB;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    SolrParams params = args.toSolrParams();
-    maxMergeSizeMB = params.getDouble("maxMergeSizeMB", DEFAULT_MAX_MERGE_SIZE_MB);
-    if (maxMergeSizeMB <= 0){
-      throw new IllegalArgumentException("maxMergeSizeMB must be greater than 0");
-    }
-    maxCachedMB = params.getDouble("maxCachedMB", DEFAULT_MAX_CACHED_MB);
-    if (maxCachedMB <= 0){
-      throw new IllegalArgumentException("maxCachedMB must be greater than 0");
-    }
-  }
-
-  @Override
-  protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-    // we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
-    return new NRTCachingDirectory(FSDirectory.open(new File(path).toPath(), lockFactory), maxMergeSizeMB, maxCachedMB);
-  }
-  
-  @Override
-  public boolean isAbsolute(String path) {
-    return new File(path).isAbsolute();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/NodeConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/NodeConfig.java b/solr/core/src/java/org/apache/solr/core/NodeConfig.java
deleted file mode 100644
index dba194f..0000000
--- a/solr/core/src/java/org/apache/solr/core/NodeConfig.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.nio.file.Path;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.logging.LogWatcherConfig;
-import org.apache.solr.update.UpdateShardHandlerConfig;
-
-
-public class NodeConfig {
-
-  private final String nodeName;
-
-  private final Path coreRootDirectory;
-
-  private final Path solrDataHome;
-
-  private final Path configSetBaseDirectory;
-
-  private final String sharedLibDirectory;
-
-  private final PluginInfo shardHandlerFactoryConfig;
-
-  private final UpdateShardHandlerConfig updateShardHandlerConfig;
-
-  private final String coreAdminHandlerClass;
-
-  private final String collectionsAdminHandlerClass;
-
-  private final String healthCheckHandlerClass;
-
-  private final String infoHandlerClass;
-
-  private final String configSetsHandlerClass;
-
-  private final LogWatcherConfig logWatcherConfig;
-
-  private final CloudConfig cloudConfig;
-
-  private final Integer coreLoadThreads;
-
-  private final int replayUpdatesThreads;
-
-  @Deprecated
-  // This should be part of the transientCacheConfig, remove in 7.0
-  private final int transientCacheSize;
-
-  private final boolean useSchemaCache;
-
-  private final String managementPath;
-
-  private final PluginInfo[] backupRepositoryPlugins;
-
-  private final MetricsConfig metricsConfig;
-
-  private final PluginInfo transientCacheConfig;
-
-  private NodeConfig(String nodeName, Path coreRootDirectory, Path solrDataHome, Path configSetBaseDirectory, String sharedLibDirectory,
-                     PluginInfo shardHandlerFactoryConfig, UpdateShardHandlerConfig updateShardHandlerConfig,
-                     String coreAdminHandlerClass, String collectionsAdminHandlerClass,
-                     String healthCheckHandlerClass, String infoHandlerClass, String configSetsHandlerClass,
-                     LogWatcherConfig logWatcherConfig, CloudConfig cloudConfig, Integer coreLoadThreads, int replayUpdatesThreads,
-                     int transientCacheSize, boolean useSchemaCache, String managementPath, SolrResourceLoader loader,
-                     Properties solrProperties, PluginInfo[] backupRepositoryPlugins,
-                     MetricsConfig metricsConfig, PluginInfo transientCacheConfig) {
-    this.nodeName = nodeName;
-    this.coreRootDirectory = coreRootDirectory;
-    this.solrDataHome = solrDataHome;
-    this.configSetBaseDirectory = configSetBaseDirectory;
-    this.sharedLibDirectory = sharedLibDirectory;
-    this.shardHandlerFactoryConfig = shardHandlerFactoryConfig;
-    this.updateShardHandlerConfig = updateShardHandlerConfig;
-    this.coreAdminHandlerClass = coreAdminHandlerClass;
-    this.collectionsAdminHandlerClass = collectionsAdminHandlerClass;
-    this.healthCheckHandlerClass = healthCheckHandlerClass;
-    this.infoHandlerClass = infoHandlerClass;
-    this.configSetsHandlerClass = configSetsHandlerClass;
-    this.logWatcherConfig = logWatcherConfig;
-    this.cloudConfig = cloudConfig;
-    this.coreLoadThreads = coreLoadThreads;
-    this.replayUpdatesThreads = replayUpdatesThreads;
-    this.transientCacheSize = transientCacheSize;
-    this.useSchemaCache = useSchemaCache;
-    this.managementPath = managementPath;
-    this.loader = loader;
-    this.solrProperties = solrProperties;
-    this.backupRepositoryPlugins = backupRepositoryPlugins;
-    this.metricsConfig = metricsConfig;
-    this.transientCacheConfig = transientCacheConfig;
-
-    if (this.cloudConfig != null && this.getCoreLoadThreadCount(false) < 2) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "SolrCloud requires a value of at least 2 for coreLoadThreads (configured value = " + this.coreLoadThreads + ")");
-    }
-  }
-
-  public String getNodeName() {
-    return nodeName;
-  }
-
-  public Path getCoreRootDirectory() {
-    return coreRootDirectory;
-  }
-
-  public Path getSolrDataHome() {
-    return solrDataHome;
-  }
-
-  public PluginInfo getShardHandlerFactoryPluginInfo() {
-    return shardHandlerFactoryConfig;
-  }
-
-  public UpdateShardHandlerConfig getUpdateShardHandlerConfig() {
-    return updateShardHandlerConfig;
-  }
-
-  public int getCoreLoadThreadCount(boolean zkAware) {
-    return coreLoadThreads == null ?
-        (zkAware ? NodeConfigBuilder.DEFAULT_CORE_LOAD_THREADS_IN_CLOUD : NodeConfigBuilder.DEFAULT_CORE_LOAD_THREADS)
-        : coreLoadThreads;
-  }
-
-  public int getReplayUpdatesThreads() {
-    return replayUpdatesThreads;
-  }
-
-  public String getSharedLibDirectory() {
-    return sharedLibDirectory;
-  }
-
-  public String getCoreAdminHandlerClass() {
-    return coreAdminHandlerClass;
-  }
-  
-  public String getCollectionsHandlerClass() {
-    return collectionsAdminHandlerClass;
-  }
-
-  public String getHealthCheckHandlerClass() {
-    return healthCheckHandlerClass;
-  }
-
-  public String getInfoHandlerClass() {
-    return infoHandlerClass;
-  }
-
-  public String getConfigSetsHandlerClass() {
-    return configSetsHandlerClass;
-  }
-
-  public boolean hasSchemaCache() {
-    return useSchemaCache;
-  }
-
-  public String getManagementPath() {
-    return managementPath;
-  }
-
-  public Path getConfigSetBaseDirectory() {
-    return configSetBaseDirectory;
-  }
-
-  public LogWatcherConfig getLogWatcherConfig() {
-    return logWatcherConfig;
-  }
-
-  public CloudConfig getCloudConfig() {
-    return cloudConfig;
-  }
-
-  public int getTransientCacheSize() {
-    return transientCacheSize;
-  }
-
-  protected final SolrResourceLoader loader;
-  protected final Properties solrProperties;
-
-  public Properties getSolrProperties() {
-    return solrProperties;
-  }
-
-  public SolrResourceLoader getSolrResourceLoader() {
-    return loader;
-  }
-
-  public PluginInfo[] getBackupRepositoryPlugins() {
-    return backupRepositoryPlugins;
-  }
-
-  public MetricsConfig getMetricsConfig() {
-    return metricsConfig;
-  }
-
-  public PluginInfo getTransientCachePluginInfo() { return transientCacheConfig; }
-
-  public static class NodeConfigBuilder {
-
-    private Path coreRootDirectory;
-    private Path solrDataHome;
-    private Path configSetBaseDirectory;
-    private String sharedLibDirectory = "lib";
-    private PluginInfo shardHandlerFactoryConfig;
-    private UpdateShardHandlerConfig updateShardHandlerConfig = UpdateShardHandlerConfig.DEFAULT;
-    private String coreAdminHandlerClass = DEFAULT_ADMINHANDLERCLASS;
-    private String collectionsAdminHandlerClass = DEFAULT_COLLECTIONSHANDLERCLASS;
-    private String healthCheckHandlerClass = DEFAULT_HEALTHCHECKHANDLERCLASS;
-    private String infoHandlerClass = DEFAULT_INFOHANDLERCLASS;
-    private String configSetsHandlerClass = DEFAULT_CONFIGSETSHANDLERCLASS;
-    private LogWatcherConfig logWatcherConfig = new LogWatcherConfig(true, null, null, 50);
-    private CloudConfig cloudConfig;
-    private int coreLoadThreads = DEFAULT_CORE_LOAD_THREADS;
-    private int replayUpdatesThreads = Runtime.getRuntime().availableProcessors();
-    @Deprecated
-    //Remove in 7.0 and put it all in the transientCache element in solrconfig.xml
-    private int transientCacheSize = DEFAULT_TRANSIENT_CACHE_SIZE;
-    private boolean useSchemaCache = false;
-    private String managementPath;
-    private Properties solrProperties = new Properties();
-    private PluginInfo[] backupRepositoryPlugins;
-    private MetricsConfig metricsConfig;
-    private PluginInfo transientCacheConfig;
-
-    private final SolrResourceLoader loader;
-    private final String nodeName;
-
-    public static final int DEFAULT_CORE_LOAD_THREADS = 3;
-    //No:of core load threads in cloud mode is set to a default of 8
-    public static final int DEFAULT_CORE_LOAD_THREADS_IN_CLOUD = 8;
-
-    public static final int DEFAULT_TRANSIENT_CACHE_SIZE = Integer.MAX_VALUE;
-
-    private static final String DEFAULT_ADMINHANDLERCLASS = "org.apache.solr.handler.admin.CoreAdminHandler";
-    private static final String DEFAULT_INFOHANDLERCLASS = "org.apache.solr.handler.admin.InfoHandler";
-    private static final String DEFAULT_COLLECTIONSHANDLERCLASS = "org.apache.solr.handler.admin.CollectionsHandler";
-    private static final String DEFAULT_HEALTHCHECKHANDLERCLASS = "org.apache.solr.handler.admin.HealthCheckHandler";
-    private static final String DEFAULT_CONFIGSETSHANDLERCLASS = "org.apache.solr.handler.admin.ConfigSetsHandler";
-
-    public static final Set<String> DEFAULT_HIDDEN_SYS_PROPS = new HashSet<>(Arrays.asList(
-        "javax.net.ssl.keyStorePassword",
-        "javax.net.ssl.trustStorePassword",
-        "basicauth",
-        "zkDigestPassword",
-        "zkDigestReadonlyPassword"
-    ));
-
-    public NodeConfigBuilder(String nodeName, SolrResourceLoader loader) {
-      this.nodeName = nodeName;
-      this.loader = loader;
-      this.coreRootDirectory = loader.getInstancePath();
-      // always init from sysprop because <solrDataHome> config element may be missing
-      String dataHomeProperty = System.getProperty(SolrXmlConfig.SOLR_DATA_HOME);
-      if (dataHomeProperty != null && !dataHomeProperty.isEmpty()) {
-        solrDataHome = loader.getInstancePath().resolve(dataHomeProperty);
-      }
-      this.configSetBaseDirectory = loader.getInstancePath().resolve("configsets");
-      this.metricsConfig = new MetricsConfig.MetricsConfigBuilder().build();
-    }
-
-    public NodeConfigBuilder setCoreRootDirectory(String coreRootDirectory) {
-      this.coreRootDirectory = loader.getInstancePath().resolve(coreRootDirectory);
-      return this;
-    }
-
-    public NodeConfigBuilder setSolrDataHome(String solrDataHomeString) {
-      // keep it null unless explicitly set to non-empty value
-      if (solrDataHomeString != null && !solrDataHomeString.isEmpty()) {
-        this.solrDataHome = loader.getInstancePath().resolve(solrDataHomeString);
-      }
-      return this;
-    }
-
-    public NodeConfigBuilder setConfigSetBaseDirectory(String configSetBaseDirectory) {
-      this.configSetBaseDirectory = loader.getInstancePath().resolve(configSetBaseDirectory);
-      return this;
-    }
-
-    public NodeConfigBuilder setSharedLibDirectory(String sharedLibDirectory) {
-      this.sharedLibDirectory = sharedLibDirectory;
-      return this;
-    }
-
-    public NodeConfigBuilder setShardHandlerFactoryConfig(PluginInfo shardHandlerFactoryConfig) {
-      this.shardHandlerFactoryConfig = shardHandlerFactoryConfig;
-      return this;
-    }
-
-    public NodeConfigBuilder setUpdateShardHandlerConfig(UpdateShardHandlerConfig updateShardHandlerConfig) {
-      this.updateShardHandlerConfig = updateShardHandlerConfig;
-      return this;
-    }
-
-    public NodeConfigBuilder setCoreAdminHandlerClass(String coreAdminHandlerClass) {
-      this.coreAdminHandlerClass = coreAdminHandlerClass;
-      return this;
-    }
-
-    public NodeConfigBuilder setCollectionsAdminHandlerClass(String collectionsAdminHandlerClass) {
-      this.collectionsAdminHandlerClass = collectionsAdminHandlerClass;
-      return this;
-    }
-
-    public NodeConfigBuilder setHealthCheckHandlerClass(String healthCheckHandlerClass) {
-      this.healthCheckHandlerClass = healthCheckHandlerClass;
-      return this;
-    }
-
-    public NodeConfigBuilder setInfoHandlerClass(String infoHandlerClass) {
-      this.infoHandlerClass = infoHandlerClass;
-      return this;
-    }
-
-    public NodeConfigBuilder setConfigSetsHandlerClass(String configSetsHandlerClass) {
-      this.configSetsHandlerClass = configSetsHandlerClass;
-      return this;
-    }
-
-    public NodeConfigBuilder setLogWatcherConfig(LogWatcherConfig logWatcherConfig) {
-      this.logWatcherConfig = logWatcherConfig;
-      return this;
-    }
-
-    public NodeConfigBuilder setCloudConfig(CloudConfig cloudConfig) {
-      this.cloudConfig = cloudConfig;
-      return this;
-    }
-
-    public NodeConfigBuilder setCoreLoadThreads(int coreLoadThreads) {
-      this.coreLoadThreads = coreLoadThreads;
-      return this;
-    }
-
-    public NodeConfigBuilder setReplayUpdatesThreads(int replayUpdatesThreads) {
-      this.replayUpdatesThreads = replayUpdatesThreads;
-      return this;
-    }
-
-    // Remove in Solr 7.0
-    @Deprecated
-    public NodeConfigBuilder setTransientCacheSize(int transientCacheSize) {
-      this.transientCacheSize = transientCacheSize;
-      return this;
-    }
-
-    public NodeConfigBuilder setUseSchemaCache(boolean useSchemaCache) {
-      this.useSchemaCache = useSchemaCache;
-      return this;
-    }
-
-    public NodeConfigBuilder setManagementPath(String managementPath) {
-      this.managementPath = managementPath;
-      return this;
-    }
-
-    public NodeConfigBuilder setSolrProperties(Properties solrProperties) {
-      this.solrProperties = solrProperties;
-      return this;
-    }
-
-    public NodeConfigBuilder setBackupRepositoryPlugins(PluginInfo[] backupRepositoryPlugins) {
-      this.backupRepositoryPlugins = backupRepositoryPlugins;
-      return this;
-    }
-
-    public NodeConfigBuilder setMetricsConfig(MetricsConfig metricsConfig) {
-      this.metricsConfig = metricsConfig;
-      return this;
-    }
-    
-    public NodeConfigBuilder setSolrCoreCacheFactoryConfig(PluginInfo transientCacheConfig) {
-      this.transientCacheConfig = transientCacheConfig;
-      return this;
-    }
-
-    public NodeConfig build() {
-      return new NodeConfig(nodeName, coreRootDirectory, solrDataHome, configSetBaseDirectory, sharedLibDirectory, shardHandlerFactoryConfig,
-                            updateShardHandlerConfig, coreAdminHandlerClass, collectionsAdminHandlerClass, healthCheckHandlerClass, infoHandlerClass, configSetsHandlerClass,
-                            logWatcherConfig, cloudConfig, coreLoadThreads, replayUpdatesThreads, transientCacheSize, useSchemaCache, managementPath, loader, solrProperties,
-                            backupRepositoryPlugins, metricsConfig, transientCacheConfig);
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/PluginBag.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/PluginBag.java b/solr/core/src/java/org/apache/solr/core/PluginBag.java
deleted file mode 100644
index abd1a44..0000000
--- a/solr/core/src/java/org/apache/solr/core/PluginBag.java
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipInputStream;
-
-import org.apache.lucene.analysis.util.ResourceLoader;
-import org.apache.lucene.analysis.util.ResourceLoaderAware;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.api.ApiSupport;
-import org.apache.solr.cloud.CloudUtil;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
-import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
-import org.apache.solr.util.CryptoKeys;
-import org.apache.solr.util.SimplePostTool;
-import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-import org.apache.solr.util.plugin.PluginInfoInitialized;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.Collections.singletonMap;
-import static org.apache.solr.api.ApiBag.HANDLER_NAME;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-/**
- * This manages the lifecycle of a set of plugin of the same type .
- */
-public class PluginBag<T> implements AutoCloseable {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final Map<String, PluginHolder<T>> registry;
-  private final Map<String, PluginHolder<T>> immutableRegistry;
-  private String def;
-  private final Class klass;
-  private SolrCore core;
-  private final SolrConfig.SolrPluginInfo meta;
-  private final ApiBag apiBag;
-
-  /**
-   * Pass needThreadSafety=true if plugins can be added and removed concurrently with lookups.
-   */
-  public PluginBag(Class<T> klass, SolrCore core, boolean needThreadSafety) {
-    this.apiBag = klass == SolrRequestHandler.class ? new ApiBag(core != null) : null;
-    this.core = core;
-    this.klass = klass;
-    // TODO: since reads will dominate writes, we could also think about creating a new instance of a map each time it changes.
-    // Not sure how much benefit this would have over ConcurrentHashMap though
-    // We could also perhaps make this constructor into a factory method to return different implementations depending on thread safety needs.
-    this.registry = needThreadSafety ? new ConcurrentHashMap<>() : new HashMap<>();
-    this.immutableRegistry = Collections.unmodifiableMap(registry);
-    meta = SolrConfig.classVsSolrPluginInfo.get(klass.getName());
-    if (meta == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Plugin : " + klass.getName());
-    }
-  }
-
-  /**
-   * Constructs a non-threadsafe plugin registry
-   */
-  public PluginBag(Class<T> klass, SolrCore core) {
-    this(klass, core, false);
-  }
-
-  static void initInstance(Object inst, PluginInfo info) {
-    if (inst instanceof PluginInfoInitialized) {
-      ((PluginInfoInitialized) inst).init(info);
-    } else if (inst instanceof NamedListInitializedPlugin) {
-      ((NamedListInitializedPlugin) inst).init(info.initArgs);
-    } else if (inst instanceof SolrRequestHandler) {
-      ((SolrRequestHandler) inst).init(info.initArgs);
-    }
-    if (inst instanceof SearchComponent) {
-      ((SearchComponent) inst).setName(info.name);
-    }
-    if (inst instanceof RequestHandlerBase) {
-      ((RequestHandlerBase) inst).setPluginInfo(info);
-    }
-
-  }
-
-  /**
-   * Check if any of the mentioned names are missing. If yes, return the Set of missing names
-   */
-  public Set<String> checkContains(Collection<String> names) {
-    if (names == null || names.isEmpty()) return Collections.EMPTY_SET;
-    HashSet<String> result = new HashSet<>();
-    for (String s : names) if (!this.registry.containsKey(s)) result.add(s);
-    return result;
-  }
-
-  public PluginHolder<T> createPlugin(PluginInfo info) {
-    if ("true".equals(String.valueOf(info.attributes.get("runtimeLib")))) {
-      log.debug(" {} : '{}'  created with runtimeLib=true ", meta.getCleanTag(), info.name);
-      LazyPluginHolder<T> holder = new LazyPluginHolder<>(meta, info, core, "true".equals(System.getProperty("enable.runtime.lib")) ?
-          core.getMemClassLoader() :
-          core.getResourceLoader(), true);
-
-      return meta.clazz == UpdateRequestProcessorFactory.class ?
-          (PluginHolder<T>) new UpdateRequestProcessorChain.LazyUpdateProcessorFactoryHolder(holder) :
-          holder;
-    } else if ("lazy".equals(info.attributes.get("startup")) && meta.options.contains(SolrConfig.PluginOpts.LAZY)) {
-      log.debug("{} : '{}' created with startup=lazy ", meta.getCleanTag(), info.name);
-      return new LazyPluginHolder<T>(meta, info, core, core.getResourceLoader(), false);
-    } else {
-      T inst = core.createInstance(info.className, (Class<T>) meta.clazz, meta.getCleanTag(), null, core.getResourceLoader());
-      initInstance(inst, info);
-      return new PluginHolder<>(info, inst);
-    }
-  }
-
-  /** make a plugin available in an alternate name. This is an internal API and not for public use
-   * @param src key in which the plugin is already registered
-   * @param target the new key in which the plugin should be aliased to. If target exists already, the alias fails
-   * @return flag if the operation is successful or not
-   */
-  boolean alias(String src, String target) {
-    if (src == null) return false;
-    PluginHolder<T> a = registry.get(src);
-    if (a == null) return false;
-    PluginHolder<T> b = registry.get(target);
-    if (b != null) return false;
-    registry.put(target, a);
-    return true;
-  }
-
-  /**
-   * Get a plugin by name. If the plugin is not already instantiated, it is
-   * done here
-   */
-  public T get(String name) {
-    PluginHolder<T> result = registry.get(name);
-    return result == null ? null : result.get();
-  }
-
-  /**
-   * Fetches a plugin by name , or the default
-   *
-   * @param name       name using which it is registered
-   * @param useDefault Return the default , if a plugin by that name does not exist
-   */
-  public T get(String name, boolean useDefault) {
-    T result = get(name);
-    if (useDefault && result == null) return get(def);
-    return result;
-  }
-
-  public Set<String> keySet() {
-    return immutableRegistry.keySet();
-  }
-
-  /**
-   * register a plugin by a name
-   */
-  public T put(String name, T plugin) {
-    if (plugin == null) return null;
-    PluginHolder<T> pluginHolder = new PluginHolder<>(null, plugin);
-    pluginHolder.registerAPI = false;
-    PluginHolder<T> old = put(name, pluginHolder);
-    return old == null ? null : old.get();
-  }
-
-  PluginHolder<T> put(String name, PluginHolder<T> plugin) {
-    Boolean registerApi = null;
-    Boolean disableHandler = null;
-    if (plugin.pluginInfo != null) {
-      String registerAt = plugin.pluginInfo.attributes.get("registerPath");
-      if (registerAt != null) {
-        List<String> strs = StrUtils.splitSmart(registerAt, ',');
-        disableHandler = !strs.contains("/solr");
-        registerApi = strs.contains("/v2");
-      }
-    }
-
-    if (apiBag != null) {
-      if (plugin.isLoaded()) {
-        T inst = plugin.get();
-        if (inst instanceof ApiSupport) {
-          ApiSupport apiSupport = (ApiSupport) inst;
-          if (registerApi == null) registerApi = apiSupport.registerV2();
-          if (disableHandler == null) disableHandler = !apiSupport.registerV1();
-
-          if(registerApi) {
-            Collection<Api> apis = apiSupport.getApis();
-            if (apis != null) {
-              Map<String, String> nameSubstitutes = singletonMap(HANDLER_NAME, name);
-              for (Api api : apis) {
-                apiBag.register(api, nameSubstitutes);
-              }
-            }
-          }
-
-        }
-      } else {
-        if (registerApi != null && registerApi)
-          apiBag.registerLazy((PluginHolder<SolrRequestHandler>) plugin, plugin.pluginInfo);
-      }
-    }
-    if(disableHandler == null) disableHandler = Boolean.FALSE;
-    PluginHolder<T> old = null;
-    if(!disableHandler) old = registry.put(name, plugin);
-    if (plugin.pluginInfo != null && plugin.pluginInfo.isDefault()) setDefault(name);
-    if (plugin.isLoaded()) registerMBean(plugin.get(), core, name);
-    return old;
-  }
-
-  void setDefault(String def) {
-    if (!registry.containsKey(def)) return;
-    if (this.def != null) log.warn("Multiple defaults for : " + meta.getCleanTag());
-    this.def = def;
-  }
-
-  public Map<String, PluginHolder<T>> getRegistry() {
-    return immutableRegistry;
-  }
-
-  public boolean contains(String name) {
-    return registry.containsKey(name);
-  }
-
-  String getDefault() {
-    return def;
-  }
-
-  T remove(String name) {
-    PluginHolder<T> removed = registry.remove(name);
-    return removed == null ? null : removed.get();
-  }
-
-  void init(Map<String, T> defaults, SolrCore solrCore) {
-    init(defaults, solrCore, solrCore.getSolrConfig().getPluginInfos(klass.getName()));
-  }
-
-  /**
-   * Initializes the plugins after reading the meta data from {@link org.apache.solr.core.SolrConfig}.
-   *
-   * @param defaults These will be registered if not explicitly specified
-   */
-  void init(Map<String, T> defaults, SolrCore solrCore, List<PluginInfo> infos) {
-    core = solrCore;
-    for (PluginInfo info : infos) {
-      PluginHolder<T> o = createPlugin(info);
-      String name = info.name;
-      if (meta.clazz.equals(SolrRequestHandler.class)) name = RequestHandlers.normalize(info.name);
-      PluginHolder<T> old = put(name, o);
-      if (old != null) log.warn("Multiple entries of {} with name {}", meta.getCleanTag(), name);
-    }
-    if (infos.size() > 0) { // Aggregate logging
-      log.debug("[{}] Initialized {} plugins of type {}: {}", solrCore.getName(), infos.size(), meta.getCleanTag(),
-          infos.stream().map(i -> i.name).collect(Collectors.toList()));
-    }
-    for (Map.Entry<String, T> e : defaults.entrySet()) {
-      if (!contains(e.getKey())) {
-        put(e.getKey(), new PluginHolder<T>(null, e.getValue()));
-      }
-    }
-  }
-
-  /**
-   * To check if a plugin by a specified name is already loaded
-   */
-  public boolean isLoaded(String name) {
-    PluginHolder<T> result = registry.get(name);
-    if (result == null) return false;
-    return result.isLoaded();
-  }
-
-  private void registerMBean(Object inst, SolrCore core, String pluginKey) {
-    if (core == null) return;
-    if (inst instanceof SolrInfoBean) {
-      SolrInfoBean mBean = (SolrInfoBean) inst;
-      String name = (inst instanceof SolrRequestHandler) ? pluginKey : mBean.getName();
-      core.registerInfoBean(name, mBean);
-    }
-  }
-
-
-  /**
-   * Close this registry. This will in turn call a close on all the contained plugins
-   */
-  @Override
-  public void close() {
-    for (Map.Entry<String, PluginHolder<T>> e : registry.entrySet()) {
-      try {
-        e.getValue().close();
-      } catch (Exception exp) {
-        log.error("Error closing plugin " + e.getKey() + " of type : " + meta.getCleanTag(), exp);
-      }
-    }
-  }
-
-  /**
-   * An indirect reference to a plugin. It just wraps a plugin instance.
-   * subclasses may choose to lazily load the plugin
-   */
-  public static class PluginHolder<T> implements AutoCloseable {
-    private T inst;
-    protected final PluginInfo pluginInfo;
-    boolean registerAPI = false;
-
-    public PluginHolder(PluginInfo info) {
-      this.pluginInfo = info;
-    }
-
-    public PluginHolder(PluginInfo info, T inst) {
-      this.inst = inst;
-      this.pluginInfo = info;
-    }
-
-    public T get() {
-      return inst;
-    }
-
-    public boolean isLoaded() {
-      return inst != null;
-    }
-
-    @Override
-    public void close() throws Exception {
-      // TODO: there may be a race here.  One thread can be creating a plugin
-      // and another thread can come along and close everything (missing the plugin
-      // that is in the state of being created and will probably never have close() called on it).
-      // can close() be called concurrently with other methods?
-      if (isLoaded()) {
-        T myInst = get();
-        if (myInst != null && myInst instanceof AutoCloseable) ((AutoCloseable) myInst).close();
-      }
-    }
-
-    public String getClassName() {
-      if (isLoaded()) return inst.getClass().getName();
-      if (pluginInfo != null) return pluginInfo.className;
-      return null;
-    }
-
-    public PluginInfo getPluginInfo() {
-      return pluginInfo;
-    }
-  }
-
-  /**
-   * A class that loads plugins Lazily. When the get() method is invoked
-   * the Plugin is initialized and returned.
-   */
-  public class LazyPluginHolder<T> extends PluginHolder<T> {
-    private volatile T lazyInst;
-    private final SolrConfig.SolrPluginInfo pluginMeta;
-    protected SolrException solrException;
-    private final SolrCore core;
-    protected ResourceLoader resourceLoader;
-    private final boolean isRuntimeLib;
-
-
-    LazyPluginHolder(SolrConfig.SolrPluginInfo pluginMeta, PluginInfo pluginInfo, SolrCore core, ResourceLoader loader, boolean isRuntimeLib) {
-      super(pluginInfo);
-      this.pluginMeta = pluginMeta;
-      this.isRuntimeLib = isRuntimeLib;
-      this.core = core;
-      this.resourceLoader = loader;
-      if (loader instanceof MemClassLoader) {
-        if (!"true".equals(System.getProperty("enable.runtime.lib"))) {
-          String s = "runtime library loading is not enabled, start Solr with -Denable.runtime.lib=true";
-          log.warn(s);
-          solrException = new SolrException(SolrException.ErrorCode.SERVER_ERROR, s);
-        }
-      }
-    }
-
-    @Override
-    public boolean isLoaded() {
-      return lazyInst != null;
-    }
-
-    @Override
-    public T get() {
-      if (lazyInst != null) return lazyInst;
-      if (solrException != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"Unrecoverable error", solrException);
-      }
-      if (createInst()) {
-        // check if we created the instance to avoid registering it again
-        registerMBean(lazyInst, core, pluginInfo.name);
-      }
-      return lazyInst;
-    }
-
-    private synchronized boolean createInst() {
-      if (lazyInst != null) return false;
-      log.info("Going to create a new {} with {} ", pluginMeta.getCleanTag(), pluginInfo.toString());
-      if (resourceLoader instanceof MemClassLoader) {
-        MemClassLoader loader = (MemClassLoader) resourceLoader;
-        loader.loadJars();
-      }
-      Class<T> clazz = (Class<T>) pluginMeta.clazz;
-      T localInst = null;
-      try {
-        localInst = core.createInstance(pluginInfo.className, clazz, pluginMeta.getCleanTag(), null, resourceLoader);
-      } catch (SolrException e) {
-        if (isRuntimeLib && !(resourceLoader instanceof MemClassLoader)) {
-          throw new SolrException(SolrException.ErrorCode.getErrorCode(e.code()),
-              e.getMessage() + ". runtime library loading is not enabled, start Solr with -Denable.runtime.lib=true",
-              e.getCause());
-        }
-        throw e;
-
-
-      }
-      initInstance(localInst, pluginInfo);
-      if (localInst instanceof SolrCoreAware) {
-        SolrResourceLoader.assertAwareCompatibility(SolrCoreAware.class, localInst);
-        ((SolrCoreAware) localInst).inform(core);
-      }
-      if (localInst instanceof ResourceLoaderAware) {
-        SolrResourceLoader.assertAwareCompatibility(ResourceLoaderAware.class, localInst);
-        try {
-          ((ResourceLoaderAware) localInst).inform(core.getResourceLoader());
-        } catch (IOException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "error initializing component", e);
-        }
-      }
-      lazyInst = localInst;  // only assign the volatile until after the plugin is completely ready to use
-      return true;
-    }
-
-
-  }
-
-  /**
-   * This represents a Runtime Jar. A jar requires two details , name and version
-   */
-  public static class RuntimeLib implements PluginInfoInitialized, AutoCloseable {
-    private String name, version, sig;
-    private BlobRepository.BlobContentRef<ByteBuffer> jarContent;
-    private final CoreContainer coreContainer;
-    private boolean verified = false;
-
-    @Override
-    public void init(PluginInfo info) {
-      name = info.attributes.get(NAME);
-      Object v = info.attributes.get("version");
-      if (name == null || v == null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "runtimeLib must have name and version");
-      }
-      version = String.valueOf(v);
-      sig = info.attributes.get("sig");
-    }
-
-    public RuntimeLib(SolrCore core) {
-      coreContainer = core.getCoreContainer();
-    }
-
-
-    void loadJar() {
-      if (jarContent != null) return;
-      synchronized (this) {
-        if (jarContent != null) return;
-        jarContent = coreContainer.getBlobRepository().getBlobIncRef(name + "/" + version);
-      }
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public String getVersion() {
-      return version;
-    }
-
-    public String getSig() {
-      return sig;
-
-    }
-
-    public ByteBuffer getFileContent(String entryName) throws IOException {
-      if (jarContent == null)
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "jar not available: " + name + "/" + version);
-      return getFileContent(jarContent.blob, entryName);
-
-    }
-
-    public ByteBuffer getFileContent(BlobRepository.BlobContent<ByteBuffer> blobContent,  String entryName) throws IOException {
-      ByteBuffer buff = blobContent.get();
-      ByteArrayInputStream zipContents = new ByteArrayInputStream(buff.array(), buff.arrayOffset(), buff.limit());
-      ZipInputStream zis = new ZipInputStream(zipContents);
-      try {
-        ZipEntry entry;
-        while ((entry = zis.getNextEntry()) != null) {
-          if (entryName == null || entryName.equals(entry.getName())) {
-            SimplePostTool.BAOS out = new SimplePostTool.BAOS();
-            byte[] buffer = new byte[2048];
-            int size;
-            while ((size = zis.read(buffer, 0, buffer.length)) != -1) {
-              out.write(buffer, 0, size);
-            }
-            out.close();
-            return out.getByteBuffer();
-          }
-        }
-      } finally {
-        zis.closeEntry();
-      }
-      return null;
-    }
-
-
-    @Override
-    public void close() throws Exception {
-      if (jarContent != null) coreContainer.getBlobRepository().decrementBlobRefCount(jarContent);
-    }
-
-    public static List<RuntimeLib> getLibObjects(SolrCore core, List<PluginInfo> libs) {
-      List<RuntimeLib> l = new ArrayList<>(libs.size());
-      for (PluginInfo lib : libs) {
-        RuntimeLib rtl = new RuntimeLib(core);
-        rtl.init(lib);
-        l.add(rtl);
-      }
-      return l;
-    }
-
-    public void verify() throws Exception {
-      if (verified) return;
-      if (jarContent == null) {
-        log.error("Calling verify before loading the jar");
-        return;
-      }
-
-      if (!coreContainer.isZooKeeperAware())
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Signing jar is possible only in cloud");
-      Map<String, byte[]> keys = CloudUtil.getTrustedKeys(coreContainer.getZkController().getZkClient(), "exe");
-      if (keys.isEmpty()) {
-        if (sig == null) {
-          verified = true;
-          log.info("A run time lib {} is loaded  without verification ", name);
-          return;
-        } else {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "No public keys are available in ZK to verify signature for runtime lib  " + name);
-        }
-      } else if (sig == null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, StrUtils.formatString("runtimelib {0} should be signed with one of the keys in ZK /keys/exe ", name));
-      }
-
-      try {
-        String matchedKey = new CryptoKeys(keys).verify(sig, jarContent.blob.get());
-        if (matchedKey == null)
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "No key matched signature for jar : " + name + " version: " + version);
-        log.info("Jar {} signed with {} successfully verified", name, matchedKey);
-      } catch (Exception e) {
-        if (e instanceof SolrException) throw e;
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error verifying key ", e);
-      }
-    }
-  }
-
-
-  public Api v2lookup(String path, String method, Map<String, String> parts) {
-    if (apiBag == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "this should not happen, looking up for v2 API at the wrong place");
-    }
-    return apiBag.lookup(path, method, parts);
-  }
-
-  public ApiBag getApiBag() {
-    return apiBag;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/PluginInfo.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/PluginInfo.java b/solr/core/src/java/org/apache/solr/core/PluginInfo.java
deleted file mode 100644
index 10f8b8d..0000000
--- a/solr/core/src/java/org/apache/solr/core/PluginInfo.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.solr.common.MapSerializable;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.util.DOMUtil;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import java.util.*;
-
-import static java.util.Arrays.asList;
-import static java.util.Collections.unmodifiableList;
-import static java.util.Collections.unmodifiableMap;
-import static org.apache.solr.common.params.CoreAdminParams.NAME;
-import static org.apache.solr.schema.FieldType.CLASS_NAME;
-
-/**
- * An Object which represents a Plugin of any type 
- *
- */
-public class PluginInfo implements MapSerializable {
-  public final String name, className, type;
-  public final NamedList initArgs;
-  public final Map<String, String> attributes;
-  public final List<PluginInfo> children;
-  private boolean isFromSolrConfig;
-
-  public PluginInfo(String type, Map<String, String> attrs, NamedList initArgs, List<PluginInfo> children) {
-    this.type = type;
-    this.name = attrs.get(NAME);
-    this.className = attrs.get(CLASS_NAME);
-    this.initArgs = initArgs;
-    attributes = unmodifiableMap(attrs);
-    this.children = children == null ? Collections.<PluginInfo>emptyList(): unmodifiableList(children);
-    isFromSolrConfig = false;
-  }
-
-
-  public PluginInfo(Node node, String err, boolean requireName, boolean requireClass) {
-    type = node.getNodeName();
-    name = DOMUtil.getAttr(node, NAME, requireName ? err : null);
-    className = DOMUtil.getAttr(node, CLASS_NAME, requireClass ? err : null);
-    initArgs = DOMUtil.childNodesToNamedList(node);
-    attributes = unmodifiableMap(DOMUtil.toMap(node.getAttributes()));
-    children = loadSubPlugins(node);
-    isFromSolrConfig = true;
-  }
-
-  public PluginInfo(String type, Map<String,Object> map) {
-    LinkedHashMap m = new LinkedHashMap<>(map);
-    initArgs = new NamedList();
-    for (Map.Entry<String, Object> entry : map.entrySet()) {
-      if (NAME.equals(entry.getKey()) || CLASS_NAME.equals(entry.getKey())) continue;
-      Object value = entry.getValue();
-      if (value instanceof List) {
-        List list = (List) value;
-        if (!list.isEmpty() && list.get(0) instanceof Map) {//this is a subcomponent
-          for (Object o : list) {
-            if (o instanceof Map) o = new NamedList<>((Map) o);
-            initArgs.add(entry.getKey(), o);
-          }
-        } else {
-          initArgs.add(entry.getKey(), value);
-        }
-      } else {
-        if (value instanceof Map) value = new NamedList((Map) value);
-        initArgs.add(entry.getKey(), value);
-      }
-    }
-    this.type = type;
-    this.name = (String) m.get(NAME);
-    this.className = (String) m.get(CLASS_NAME);
-    attributes = unmodifiableMap(m);
-    this.children =  Collections.<PluginInfo>emptyList();
-    isFromSolrConfig = true;
-  }
-
-  private List<PluginInfo> loadSubPlugins(Node node) {
-    List<PluginInfo> children = new ArrayList<>();
-    //if there is another sub tag with a non namedlist tag that has to be another plugin
-    NodeList nlst = node.getChildNodes();
-    for (int i = 0; i < nlst.getLength(); i++) {
-      Node nd = nlst.item(i);
-      if (nd.getNodeType() != Node.ELEMENT_NODE) continue;
-      if (NL_TAGS.contains(nd.getNodeName())) continue;
-      PluginInfo pluginInfo = new PluginInfo(nd, null, false, false);
-      if (pluginInfo.isEnabled()) children.add(pluginInfo);
-    }
-    return children.isEmpty() ? Collections.<PluginInfo>emptyList() : unmodifiableList(children);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("{");
-    if (type != null) sb.append("type = " + type + ",");
-    if (name != null) sb.append("name = " + name + ",");
-    if (className != null) sb.append("class = " + className + ",");
-    if (attributes != null && attributes.size() > 0) sb.append("attributes = " + attributes + ",");
-    if (initArgs != null && initArgs.size() > 0) sb.append("args = " + initArgs);
-    sb.append("}");
-    return sb.toString();
-  }
-
-  public boolean isEnabled(){
-    String enable = attributes.get("enable");
-    return enable == null || Boolean.parseBoolean(enable); 
-  }
-
-  public boolean isDefault() {
-    return Boolean.parseBoolean(attributes.get("default"));
-  }
-
-  public PluginInfo getChild(String type){
-    List<PluginInfo> l = getChildren(type);
-    return  l.isEmpty() ? null:l.get(0);
-  }
-
-  public Map<String, Object> toMap(Map<String, Object> map) {
-    map.putAll(attributes);
-    Map m = map;
-    if(initArgs!=null ) m.putAll(initArgs.asMap(3));
-    if(children != null){
-      for (PluginInfo child : children) {
-        Object old = m.get(child.name);
-        if(old == null){
-          m.put(child.name, child.toMap(new LinkedHashMap<>()));
-        } else if (old instanceof List) {
-          List list = (List) old;
-          list.add(child.toMap(new LinkedHashMap<>()));
-        }  else {
-          ArrayList l = new ArrayList();
-          l.add(old);
-          l.add(child.toMap(new LinkedHashMap<>()));
-          m.put(child.name,l);
-        }
-      }
-
-    }
-    return m;
-  }
-
-  /**Filter children by type
-   * @param type The type name. must not be null
-   * @return The mathcing children
-   */
-  public List<PluginInfo> getChildren(String type){
-    if(children.isEmpty()) return children;
-    List<PluginInfo> result = new ArrayList<>();
-    for (PluginInfo child : children) if(type.equals(child.type)) result.add(child);
-    return result;
-  }
-  public static final PluginInfo EMPTY_INFO = new PluginInfo("",Collections.<String,String>emptyMap(), new NamedList(),Collections.<PluginInfo>emptyList());
-
-  private static final HashSet<String> NL_TAGS = new HashSet<>
-    (asList("lst", "arr",
-        "bool",
-        "str",
-        "int", "long",
-        "float", "double"));
-  public static final String DEFAULTS = "defaults";
-  public static final String APPENDS = "appends";
-  public static final String INVARIANTS = "invariants";
-
-  public boolean isFromSolrConfig(){
-    return isFromSolrConfig;
-
-  }
-  public PluginInfo copy() {
-    PluginInfo result = new PluginInfo(type, attributes,
-        initArgs != null ? initArgs.clone() : null, children);
-    result.isFromSolrConfig = isFromSolrConfig;
-    return result;
-  }
-}


[19/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
deleted file mode 100644
index cce2939..0000000
--- a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Pattern;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.ExitableDirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.mlt.MoreLikeThis;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.BoostQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.StringUtils;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.params.MoreLikeThisParams.TermStyle;
-import org.apache.solr.common.params.MoreLikeThisParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ContentStream;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.component.FacetComponent;
-import org.apache.solr.request.SimpleFacets;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocListAndSet;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.QParserPlugin;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.ReturnFields;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SolrQueryTimeoutImpl;
-import org.apache.solr.search.SolrReturnFields;
-import org.apache.solr.search.SortSpec;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.util.SolrPluginUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Solr MoreLikeThis --
- * 
- * Return similar documents either based on a single document or based on posted text.
- * 
- * @since solr 1.3
- */
-public class MoreLikeThisHandler extends RequestHandlerBase  
-{
-  // Pattern is thread safe -- TODO? share this with general 'fl' param
-  private static final Pattern splitList = Pattern.compile(",| ");
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  static final String ERR_MSG_QUERY_OR_TEXT_REQUIRED =
-      "MoreLikeThis requires either a query (?q=) or text to find similar documents.";
-
-  static final String ERR_MSG_SINGLE_STREAM_ONLY =
-      "MoreLikeThis does not support multiple ContentStreams";
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception 
-  {
-    SolrParams params = req.getParams();
-
-    long timeAllowed = (long)params.getInt( CommonParams.TIME_ALLOWED, -1 );
-    if(timeAllowed > 0) {
-      SolrQueryTimeoutImpl.set(timeAllowed);
-    }
-      try {
-
-        // Set field flags
-        ReturnFields returnFields = new SolrReturnFields(req);
-        rsp.setReturnFields(returnFields);
-        int flags = 0;
-        if (returnFields.wantsScore()) {
-          flags |= SolrIndexSearcher.GET_SCORES;
-        }
-
-        String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
-        String q = params.get(CommonParams.Q);
-        Query query = null;
-        SortSpec sortSpec = null;
-        List<Query> filters = null;
-
-        try {
-          if (q != null) {
-            QParser parser = QParser.getParser(q, defType, req);
-            query = parser.getQuery();
-            sortSpec = parser.getSortSpec(true);
-          }
-
-          String[] fqs = req.getParams().getParams(CommonParams.FQ);
-          if (fqs != null && fqs.length != 0) {
-            filters = new ArrayList<>();
-            for (String fq : fqs) {
-              if (fq != null && fq.trim().length() != 0) {
-                QParser fqp = QParser.getParser(fq, req);
-                filters.add(fqp.getQuery());
-              }
-            }
-          }
-        } catch (SyntaxError e) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-        }
-
-        SolrIndexSearcher searcher = req.getSearcher();
-
-        MoreLikeThisHelper mlt = new MoreLikeThisHelper(params, searcher);
-
-        // Hold on to the interesting terms if relevant
-        TermStyle termStyle = TermStyle.get(params.get(MoreLikeThisParams.INTERESTING_TERMS));
-        List<InterestingTerm> interesting = (termStyle == TermStyle.NONE)
-            ? null : new ArrayList<>(mlt.mlt.getMaxQueryTerms());
-
-        DocListAndSet mltDocs = null;
-
-        // Parse Required Params
-        // This will either have a single Reader or valid query
-        Reader reader = null;
-        try {
-          if (q == null || q.trim().length() < 1) {
-            Iterable<ContentStream> streams = req.getContentStreams();
-            if (streams != null) {
-              Iterator<ContentStream> iter = streams.iterator();
-              if (iter.hasNext()) {
-                reader = iter.next().getReader();
-              }
-              if (iter.hasNext()) {
-                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                    ERR_MSG_SINGLE_STREAM_ONLY);
-              }
-            }
-          }
-
-          int start = params.getInt(CommonParams.START, CommonParams.START_DEFAULT);
-          int rows = params.getInt(CommonParams.ROWS, CommonParams.ROWS_DEFAULT);
-
-          // Find documents MoreLikeThis - either with a reader or a query
-          // --------------------------------------------------------------------------------
-          if (reader != null) {
-            mltDocs = mlt.getMoreLikeThis(reader, start, rows, filters,
-                interesting, flags);
-          } else if (q != null) {
-            // Matching options
-            boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE,
-                true);
-            int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);
-            // Find the base match
-            DocList match = searcher.getDocList(query, null, null, matchOffset, 1,
-                flags); // only get the first one...
-            if (includeMatch) {
-              rsp.add("match", match);
-            }
-
-            // This is an iterator, but we only handle the first match
-            DocIterator iterator = match.iterator();
-            if (iterator.hasNext()) {
-              // do a MoreLikeThis query for each document in results
-              int id = iterator.nextDoc();
-              mltDocs = mlt.getMoreLikeThis(id, start, rows, filters, interesting,
-                  flags);
-            }
-          } else {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                ERR_MSG_QUERY_OR_TEXT_REQUIRED);
-          }
-
-        } finally {
-          if (reader != null) {
-            reader.close();
-          }
-        }
-
-        if (mltDocs == null) {
-          mltDocs = new DocListAndSet(); // avoid NPE
-        }
-        rsp.addResponse(mltDocs.docList);
-
-
-        if (interesting != null) {
-          if (termStyle == TermStyle.DETAILS) {
-            NamedList<Float> it = new NamedList<>();
-            for (InterestingTerm t : interesting) {
-              it.add(t.term.toString(), t.boost);
-            }
-            rsp.add("interestingTerms", it);
-          } else {
-            List<String> it = new ArrayList<>(interesting.size());
-            for (InterestingTerm t : interesting) {
-              it.add(t.term.text());
-            }
-            rsp.add("interestingTerms", it);
-          }
-        }
-
-        // maybe facet the results
-        if (params.getBool(FacetParams.FACET, false)) {
-          if (mltDocs.docSet == null) {
-            rsp.add("facet_counts", null);
-          } else {
-            SimpleFacets f = new SimpleFacets(req, mltDocs.docSet, params);
-            rsp.add("facet_counts", FacetComponent.getFacetCounts(f));
-          }
-        }
-        boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);
-
-        boolean dbgQuery = false, dbgResults = false;
-        if (dbg == false) {//if it's true, we are doing everything anyway.
-          String[] dbgParams = req.getParams().getParams(CommonParams.DEBUG);
-          if (dbgParams != null) {
-            for (String dbgParam : dbgParams) {
-              if (dbgParam.equals(CommonParams.QUERY)) {
-                dbgQuery = true;
-              } else if (dbgParam.equals(CommonParams.RESULTS)) {
-                dbgResults = true;
-              }
-            }
-          }
-        } else {
-          dbgQuery = true;
-          dbgResults = true;
-        }
-        // TODO resolve duplicated code with DebugComponent.  Perhaps it should be added to doStandardDebug?
-        if (dbg == true) {
-          try {
-            NamedList<Object> dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.getRawMLTQuery(), mltDocs.docList, dbgQuery, dbgResults);
-            if (null != dbgInfo) {
-              if (null != filters) {
-                dbgInfo.add("filter_queries", req.getParams().getParams(CommonParams.FQ));
-                List<String> fqs = new ArrayList<>(filters.size());
-                for (Query fq : filters) {
-                  fqs.add(QueryParsing.toString(fq, req.getSchema()));
-                }
-                dbgInfo.add("parsed_filter_queries", fqs);
-              }
-              rsp.add("debug", dbgInfo);
-            }
-          } catch (Exception e) {
-            SolrException.log(log, "Exception during debug", e);
-            rsp.add("exception_during_debug", SolrException.toStr(e));
-          }
-        }
-      } catch (ExitableDirectoryReader.ExitingReaderException ex) {
-        log.warn( "Query: " + req.getParamString() + "; " + ex.getMessage());
-      } finally {
-        SolrQueryTimeoutImpl.reset();
-      }
-  }
-  
-  public static class InterestingTerm
-  {
-    public Term term;
-    public float boost;
-
-  }
-  
-  /**
-   * Helper class for MoreLikeThis that can be called from other request handlers
-   */
-  public static class MoreLikeThisHelper 
-  { 
-    final SolrIndexSearcher searcher;
-    final MoreLikeThis mlt;
-    final IndexReader reader;
-    final SchemaField uniqueKeyField;
-    final boolean needDocSet;
-    Map<String,Float> boostFields;
-    
-    public MoreLikeThisHelper( SolrParams params, SolrIndexSearcher searcher )
-    {
-      this.searcher = searcher;
-      this.reader = searcher.getIndexReader();
-      this.uniqueKeyField = searcher.getSchema().getUniqueKeyField();
-      this.needDocSet = params.getBool(FacetParams.FACET,false);
-      
-      SolrParams required = params.required();
-      String[] fl = required.getParams(MoreLikeThisParams.SIMILARITY_FIELDS);
-      List<String> list = new ArrayList<>();
-      for (String f : fl) {
-        if (!StringUtils.isEmpty(f))  {
-          String[] strings = splitList.split(f);
-          for (String string : strings) {
-            if (!StringUtils.isEmpty(string)) {
-              list.add(string);
-            }
-          }
-        }
-      }
-      String[] fields = list.toArray(new String[list.size()]);
-      if( fields.length < 1 ) {
-        throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, 
-            "MoreLikeThis requires at least one similarity field: "+MoreLikeThisParams.SIMILARITY_FIELDS );
-      }
-      
-      this.mlt = new MoreLikeThis( reader ); // TODO -- after LUCENE-896, we can use , searcher.getSimilarity() );
-      mlt.setFieldNames(fields);
-      mlt.setAnalyzer( searcher.getSchema().getIndexAnalyzer() );
-      
-      // configurable params
-      
-      mlt.setMinTermFreq(       params.getInt(MoreLikeThisParams.MIN_TERM_FREQ,         MoreLikeThis.DEFAULT_MIN_TERM_FREQ));
-      mlt.setMinDocFreq(        params.getInt(MoreLikeThisParams.MIN_DOC_FREQ,          MoreLikeThis.DEFAULT_MIN_DOC_FREQ));
-      mlt.setMaxDocFreq(        params.getInt(MoreLikeThisParams.MAX_DOC_FREQ,          MoreLikeThis.DEFAULT_MAX_DOC_FREQ));
-      mlt.setMinWordLen(        params.getInt(MoreLikeThisParams.MIN_WORD_LEN,          MoreLikeThis.DEFAULT_MIN_WORD_LENGTH));
-      mlt.setMaxWordLen(        params.getInt(MoreLikeThisParams.MAX_WORD_LEN,          MoreLikeThis.DEFAULT_MAX_WORD_LENGTH));
-      mlt.setMaxQueryTerms(     params.getInt(MoreLikeThisParams.MAX_QUERY_TERMS,       MoreLikeThis.DEFAULT_MAX_QUERY_TERMS));
-      mlt.setMaxNumTokensParsed(params.getInt(MoreLikeThisParams.MAX_NUM_TOKENS_PARSED, MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED));
-      mlt.setBoost(            params.getBool(MoreLikeThisParams.BOOST, false ) );
-      
-      // There is no default for maxDocFreqPct. Also, it's a bit oddly expressed as an integer value 
-      // (percentage of the collection's documents count). We keep Lucene's convention here. 
-      if (params.getInt(MoreLikeThisParams.MAX_DOC_FREQ_PCT) != null) {
-        mlt.setMaxDocFreqPct(params.getInt(MoreLikeThisParams.MAX_DOC_FREQ_PCT));
-      }
-
-      boostFields = SolrPluginUtils.parseFieldBoosts(params.getParams(MoreLikeThisParams.QF));
-    }
-    
-    private Query rawMLTQuery;
-    private Query boostedMLTQuery;
-    private BooleanQuery realMLTQuery;
-    
-    public Query getRawMLTQuery(){
-      return rawMLTQuery;
-    }
-    
-    public Query getBoostedMLTQuery(){
-      return boostedMLTQuery;
-    }
-    
-    public Query getRealMLTQuery(){
-      return realMLTQuery;
-    }
-    
-    private Query getBoostedQuery(Query mltquery) {
-      BooleanQuery boostedQuery = (BooleanQuery)mltquery;
-      if (boostFields.size() > 0) {
-        BooleanQuery.Builder newQ = new BooleanQuery.Builder();
-        newQ.setMinimumNumberShouldMatch(boostedQuery.getMinimumNumberShouldMatch());
-        for (BooleanClause clause : boostedQuery) {
-          Query q = clause.getQuery();
-          float originalBoost = 1f;
-          if (q instanceof BoostQuery) {
-            BoostQuery bq = (BoostQuery) q;
-            q = bq.getQuery();
-            originalBoost = bq.getBoost();
-          }
-          Float fieldBoost = boostFields.get(((TermQuery) q).getTerm().field());
-          q = ((fieldBoost != null) ? new BoostQuery(q, fieldBoost * originalBoost) : clause.getQuery());
-          newQ.add(q, clause.getOccur());
-        }
-        boostedQuery = newQ.build();
-      }
-      return boostedQuery;
-    }
-    
-    public DocListAndSet getMoreLikeThis( int id, int start, int rows, List<Query> filters, List<InterestingTerm> terms, int flags ) throws IOException
-    {
-      Document doc = reader.document(id);
-      rawMLTQuery = mlt.like(id);
-      boostedMLTQuery = getBoostedQuery( rawMLTQuery );
-      if( terms != null ) {
-        fillInterestingTermsFromMLTQuery( rawMLTQuery, terms );
-      }
-
-      // exclude current document from results
-      BooleanQuery.Builder realMLTQuery = new BooleanQuery.Builder();
-      realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
-      realMLTQuery.add(
-          new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))), 
-            BooleanClause.Occur.MUST_NOT);
-      this.realMLTQuery = realMLTQuery.build();
-      
-      DocListAndSet results = new DocListAndSet();
-      if (this.needDocSet) {
-        results = searcher.getDocListAndSet(this.realMLTQuery, filters, null, start, rows, flags);
-      } else {
-        results.docList = searcher.getDocList(this.realMLTQuery, filters, null, start, rows, flags);
-      }
-      return results;
-    }
-
-    public DocListAndSet getMoreLikeThis( Reader reader, int start, int rows, List<Query> filters, List<InterestingTerm> terms, int flags ) throws IOException
-    {
-      // SOLR-5351: if only check against a single field, use the reader directly. Otherwise we
-      // repeat the stream's content for multiple fields so that query terms can be pulled from any
-      // of those fields.
-      String [] fields = mlt.getFieldNames();
-      if (fields.length == 1) {
-        rawMLTQuery = mlt.like(fields[0], reader);
-      } else {
-        CharsRefBuilder buffered = new CharsRefBuilder();
-        char [] chunk = new char [1024];
-        int len;
-        while ((len = reader.read(chunk)) >= 0) {
-          buffered.append(chunk, 0, len);
-        }
-
-        Collection<Object> streamValue = Collections.singleton(buffered.get().toString());
-        Map<String, Collection<Object>> multifieldDoc = new HashMap<>(fields.length);
-        for (String field : fields) {
-          multifieldDoc.put(field, streamValue);
-        }
-
-        rawMLTQuery = mlt.like(multifieldDoc);
-      }
-
-      boostedMLTQuery = getBoostedQuery( rawMLTQuery );
-      if (terms != null) {
-        fillInterestingTermsFromMLTQuery( boostedMLTQuery, terms );
-      }
-      DocListAndSet results = new DocListAndSet();
-      if (this.needDocSet) {
-        results = searcher.getDocListAndSet( boostedMLTQuery, filters, null, start, rows, flags);
-      } else {
-        results.docList = searcher.getDocList( boostedMLTQuery, filters, null, start, rows, flags);
-      }
-      return results;
-    }
-
-    public NamedList<BooleanQuery> getMoreLikeTheseQuery(DocList docs)
-        throws IOException {
-      IndexSchema schema = searcher.getSchema();
-      NamedList<BooleanQuery> result = new NamedList<>();
-      DocIterator iterator = docs.iterator();
-      while (iterator.hasNext()) {
-        int id = iterator.nextDoc();
-        String uniqueId = schema.printableUniqueKey(reader.document(id));
-
-        BooleanQuery mltquery = (BooleanQuery) mlt.like(id);
-        if (mltquery.clauses().size() == 0) {
-          return result;
-        }
-        mltquery = (BooleanQuery) getBoostedQuery(mltquery);
-        
-        // exclude current document from results
-        BooleanQuery.Builder mltQuery = new BooleanQuery.Builder();
-        mltQuery.add(mltquery, BooleanClause.Occur.MUST);
-        
-        mltQuery.add(
-            new TermQuery(new Term(uniqueKeyField.getName(), uniqueId)), BooleanClause.Occur.MUST_NOT);
-        result.add(uniqueId, mltQuery.build());
-      }
-
-      return result;
-    }
-    
-    private void fillInterestingTermsFromMLTQuery( Query query, List<InterestingTerm> terms )
-    { 
-      Collection<BooleanClause> clauses = ((BooleanQuery)query).clauses();
-      for( BooleanClause o : clauses ) {
-        Query q = o.getQuery();
-        float boost = 1f;
-        if (q instanceof BoostQuery) {
-          BoostQuery bq = (BoostQuery) q;
-          q = bq.getQuery();
-          boost = bq.getBoost();
-        }
-        InterestingTerm it = new InterestingTerm();
-        it.boost = boost;
-        it.term = ((TermQuery) q).getTerm();
-        terms.add( it );
-      } 
-      // alternatively we could use
-      // mltquery.extractTerms( terms );
-    }
-    
-    public MoreLikeThis getMoreLikeThis()
-    {
-      return mlt;
-    }
-  }
-  
-  
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Solr MoreLikeThis";
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java
deleted file mode 100644
index 7495ed0..0000000
--- a/solr/core/src/java/org/apache/solr/handler/NestedRequestHandler.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.request.SolrRequestHandler;
-
-
-/**An interface for RequestHandlers need to handle all paths under its registered path
- */
-public interface NestedRequestHandler {
-  /** Return a RequestHandler to handle a subpath from the path this handler is registered.
-   */
-  SolrRequestHandler getSubHandler(String subPath);
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/NotFoundRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/NotFoundRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/NotFoundRequestHandler.java
deleted file mode 100644
index 511edbe..0000000
--- a/solr/core/src/java/org/apache/solr/handler/NotFoundRequestHandler.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-import static org.apache.solr.common.params.CommonParams.PATH;
-
-/**
- * Does nothing other than showing a 404 message
- */
-public class NotFoundRequestHandler extends RequestHandlerBase{
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "" + req.getContext().get(PATH) + " is not found");
-  }
-
-  @Override
-  public String getDescription() {
-    return "No Operation";
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/OldBackupDirectory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/OldBackupDirectory.java b/solr/core/src/java/org/apache/solr/handler/OldBackupDirectory.java
deleted file mode 100644
index ee78efe..0000000
--- a/solr/core/src/java/org/apache/solr/handler/OldBackupDirectory.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.net.URI;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Locale;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-class OldBackupDirectory implements Comparable<OldBackupDirectory> {
-  private static final Pattern dirNamePattern = Pattern.compile("^snapshot[.](.*)$");
-
-  private URI basePath;
-  private String dirName;
-  private Optional<Date> timestamp = Optional.empty();
-
-  public OldBackupDirectory(URI basePath, String dirName) {
-    this.dirName = Objects.requireNonNull(dirName);
-    this.basePath = Objects.requireNonNull(basePath);
-    Matcher m = dirNamePattern.matcher(dirName);
-    if (m.find()) {
-      try {
-        this.timestamp = Optional.of(new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).parse(m.group(1)));
-      } catch (ParseException e) {
-        this.timestamp = Optional.empty();
-      }
-    }
-  }
-
-  public URI getPath() {
-    return this.basePath.resolve(dirName);
-  }
-
-  public String getDirName() {
-    return dirName;
-  }
-
-  public Optional<Date> getTimestamp() {
-    return timestamp;
-  }
-
-  @Override
-  public int compareTo(OldBackupDirectory that) {
-    if(this.timestamp.isPresent() && that.timestamp.isPresent()) {
-      return that.timestamp.get().compareTo(this.timestamp.get());
-    }
-    // Use absolute value of path in case the time-stamp is missing on either side.
-    return that.getPath().compareTo(this.getPath());
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
deleted file mode 100644
index 0cd9e1d..0000000
--- a/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Files;
-import java.time.Instant;
-import java.util.Locale;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-
-/**
- * Ping Request Handler for reporting SolrCore health to a Load Balancer.
- *
- * <p>
- * This handler is designed to be used as the endpoint for an HTTP 
- * Load-Balancer to use when checking the "health" or "up status" of a 
- * Solr server.
- * </p>
- * 
- * <p> 
- * In its simplest form, the PingRequestHandler should be
- * configured with some defaults indicating a request that should be
- * executed.  If the request succeeds, then the PingRequestHandler
- * will respond back with a simple "OK" status.  If the request fails,
- * then the PingRequestHandler will respond back with the
- * corresponding HTTP Error code.  Clients (such as load balancers)
- * can be configured to poll the PingRequestHandler monitoring for
- * these types of responses (or for a simple connection failure) to
- * know if there is a problem with the Solr server.
- * 
- * Note in case isShard=true, PingRequestHandler respond back with 
- * what the delegated handler returns (by default it's /select handler).
- * </p>
- *
- * <pre class="prettyprint">
- * &lt;requestHandler name="/admin/ping" class="solr.PingRequestHandler"&gt;
- *   &lt;lst name="invariants"&gt;
- *     &lt;str name="qt"&gt;/search&lt;/str&gt;&lt;!-- handler to delegate to --&gt;
- *     &lt;str name="q"&gt;some test query&lt;/str&gt;
- *   &lt;/lst&gt;
- * &lt;/requestHandler&gt;
- * </pre>
- *
- * <p>
- * A more advanced option available, is to configure the handler with a 
- * "healthcheckFile" which can be used to enable/disable the PingRequestHandler.
- * </p>
- *
- * <pre class="prettyprint">
- * &lt;requestHandler name="/admin/ping" class="solr.PingRequestHandler"&gt;
- *   &lt;!-- relative paths are resolved against the data dir --&gt;
- *   &lt;str name="healthcheckFile"&gt;server-enabled.txt&lt;/str&gt;
- *   &lt;lst name="invariants"&gt;
- *     &lt;str name="qt"&gt;/search&lt;/str&gt;&lt;!-- handler to delegate to --&gt;
- *     &lt;str name="q"&gt;some test query&lt;/str&gt;
- *   &lt;/lst&gt;
- * &lt;/requestHandler&gt;
- * </pre>
- *
- * <ul>
- *   <li>If the health check file exists, the handler will execute the 
- *       delegated query and return status as described above.
- *   </li>
- *   <li>If the health check file does not exist, the handler will return 
- *       an HTTP error even if the server is working fine and the delegated 
- *       query would have succeeded
- *   </li>
- * </ul>
- *
- * <p> 
- * This health check file feature can be used as a way to indicate
- * to some Load Balancers that the server should be "removed from
- * rotation" for maintenance, or upgrades, or whatever reason you may
- * wish.  
- * </p>
- *
- * <p> 
- * The health check file may be created/deleted by any external
- * system, or the PingRequestHandler itself can be used to
- * create/delete the file by specifying an "action" param in a
- * request: 
- * </p>
- *
- * <ul>
- *   <li><code>http://.../ping?action=enable</code>
- *       - creates the health check file if it does not already exist
- *   </li>
- *   <li><code>http://.../ping?action=disable</code>
- *       - deletes the health check file if it exists
- *   </li>
- *   <li><code>http://.../ping?action=status</code>
- *       - returns a status code indicating if the healthcheck file exists 
- *       ("<code>enabled</code>") or not ("<code>disabled</code>")
- *   </li>
- * </ul>
- *
- * @since solr 1.3
- */
-public class PingRequestHandler extends RequestHandlerBase implements SolrCoreAware
-{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String HEALTHCHECK_FILE_PARAM = "healthcheckFile";
-  protected enum ACTIONS {STATUS, ENABLE, DISABLE, PING};
-  
-  private String healthFileName = null;
-  private File healthcheck = null;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-    Object tmp = args.get(HEALTHCHECK_FILE_PARAM);
-    healthFileName = (null == tmp ? null : tmp.toString());
-  }
-
-  @Override
-  public void inform( SolrCore core ) {
-    if (null != healthFileName) {
-      healthcheck = new File(healthFileName);
-      if ( ! healthcheck.isAbsolute()) {
-        healthcheck = new File(core.getDataDir(), healthFileName);
-        healthcheck = healthcheck.getAbsoluteFile();
-      }
-
-      if ( ! healthcheck.getParentFile().canWrite()) {
-        // this is not fatal, users may not care about enable/disable via 
-        // solr request, file might be touched/deleted by an external system
-        log.warn("Directory for configured healthcheck file is not writable by solr, PingRequestHandler will not be able to control enable/disable: {}",
-                 healthcheck.getParentFile().getAbsolutePath());
-      }
-
-    }
-    
-  }
-  
-  /**
-   * Returns true if the healthcheck flag-file is enabled but does not exist, 
-   * otherwise (no file configured, or file configured and exists) 
-   * returns false. 
-   */
-  public boolean isPingDisabled() {
-    return (null != healthcheck && ! healthcheck.exists() );
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception 
-  {
-    
-    SolrParams params = req.getParams();
-    
-    // in this case, we want to default distrib to false so
-    // we only ping the single node
-    Boolean distrib = params.getBool(DISTRIB);
-    if (distrib == null)   {
-      ModifiableSolrParams mparams = new ModifiableSolrParams(params);
-      mparams.set(DISTRIB, false);
-      req.setParams(mparams);
-    }
-    
-    String actionParam = params.get("action");
-    ACTIONS action = null;
-    if (actionParam == null){
-      action = ACTIONS.PING;
-    }
-    else {
-      try {
-        action = ACTIONS.valueOf(actionParam.toUpperCase(Locale.ROOT));
-      }
-      catch (IllegalArgumentException iae){
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
-        "Unknown action: " + actionParam);
-      }
-    }
-    switch(action){
-      case PING:
-        if( isPingDisabled() ) {
-          SolrException e = new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, 
-                                  "Service disabled");
-          rsp.setException(e);
-          return;
-        }
-        handlePing(req, rsp);
-        break;
-      case ENABLE:
-        handleEnable(true);
-        break;
-      case DISABLE:
-        handleEnable(false);
-        break;
-      case STATUS:
-        if( healthcheck == null ){
-          SolrException e = new SolrException
-            (SolrException.ErrorCode.SERVICE_UNAVAILABLE, 
-             "healthcheck not configured");
-          rsp.setException(e);
-        } else {
-          rsp.add( "status", isPingDisabled() ? "disabled" : "enabled" );      
-        }
-    }
-  }
-  
-  protected void handlePing(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
-  {
-    
-    SolrParams params = req.getParams();
-    SolrCore core = req.getCore();
-    
-    // Get the RequestHandler
-    String qt = params.get( CommonParams.QT );//optional; you get the default otherwise    
-    SolrRequestHandler handler = core.getRequestHandler( qt );
-    if( handler == null ) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
-          "Unknown RequestHandler (qt): "+qt );
-    }
-    
-    if( handler instanceof PingRequestHandler ) {
-      // In case it's a query for shard, use default handler     
-      if (params.getBool(ShardParams.IS_SHARD, false)) {
-        handler = core.getRequestHandler( null );
-        ModifiableSolrParams wparams = new ModifiableSolrParams(params);
-        wparams.remove(CommonParams.QT);
-        req.setParams(wparams);
-      } else { 
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
-            "Cannot execute the PingRequestHandler recursively" );
-      }
-    }
-    
-    // Execute the ping query and catch any possible exception
-    Throwable ex = null;
-    
-    // In case it's a query for shard, return the result from delegated handler for distributed query to merge result
-    if (params.getBool(ShardParams.IS_SHARD, false)) {
-      try {
-        core.execute(handler, req, rsp );
-        ex = rsp.getException(); 
-      }
-      catch( Exception e ) {
-        ex = e;
-      }
-      // Send an error or return
-      if( ex != null ) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-            "Ping query caused exception: "+ex.getMessage(), ex );
-      }
-    } else {
-      try {
-        SolrQueryResponse pingrsp = new SolrQueryResponse();
-        core.execute(handler, req, pingrsp );
-        ex = pingrsp.getException(); 
-        NamedList<Object> headers = rsp.getResponseHeader();
-        if(headers != null) {
-          headers.add("zkConnected", pingrsp.getResponseHeader().get("zkConnected"));
-        }
-        
-      }
-      catch( Exception e ) {
-        ex = e;
-      }
-      
-      // Send an error or an 'OK' message (response code will be 200)
-      if( ex != null ) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-            "Ping query caused exception: "+ex.getMessage(), ex );
-      }
-      
-      rsp.add( "status", "OK" );     
-    }   
-
-  }
-  
-  protected void handleEnable(boolean enable) throws SolrException {
-    if (healthcheck == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, 
-        "No healthcheck file defined.");
-    }
-    if ( enable ) {
-      try {
-        // write out when the file was created
-        FileUtils.write(healthcheck, Instant.now().toString(), "UTF-8");
-      } catch (IOException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-                                "Unable to write healthcheck flag file", e);
-      }
-    } else {
-      try {
-        Files.deleteIfExists(healthcheck.toPath());
-      } catch (Throwable cause) {
-        throw new SolrException(SolrException.ErrorCode.NOT_FOUND,
-                                "Did not successfully delete healthcheck file: "
-                                +healthcheck.getAbsolutePath(), cause);
-      }
-    }
-  }
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "Reports application health to a load-balancer";
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.ADMIN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java b/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java
deleted file mode 100644
index 9f2b693..0000000
--- a/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.handler.component.HttpShardHandler;
-import org.apache.solr.handler.component.RealTimeGetComponent;
-import org.apache.solr.handler.component.SearchHandler;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-
-public class RealTimeGetHandler extends SearchHandler {
-  @Override
-  protected List<String> getDefaultComponents()
-  {
-    List<String> names = new ArrayList<>(1);
-    names.add(RealTimeGetComponent.COMPONENT_NAME);
-    return names;
-  }
-  
-  
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    // Tell HttpShardHandlerthat this request should only be distributed to NRT replicas
-    req.getContext().put(HttpShardHandler.ONLY_NRT_REPLICAS, Boolean.TRUE);
-    super.handleRequestBody(req, rsp);
-  }
-
-  //////////////////////// SolrInfoMBeans methods //////////////////////
-
-  @Override
-  public String getDescription() {
-    return "The realtime get handler";
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this, "core.RealtimeGet");
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-}
-
-
-
-
-
-
-


[22/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java
deleted file mode 100644
index 8ec3c8b..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorManager.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.http.client.HttpClient;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SolrjNamedThreadFactory;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE_STATUS;
-
-class CdcrReplicatorManager implements CdcrStateManager.CdcrStateObserver {
-
-  private static final int MAX_BOOTSTRAP_ATTEMPTS = 5;
-  private static final int BOOTSTRAP_RETRY_DELAY_MS = 2000;
-  // 6 hours is hopefully long enough for most indexes
-  private static final long BOOTSTRAP_TIMEOUT_SECONDS = 6L * 3600L * 3600L;
-
-  private List<CdcrReplicatorState> replicatorStates;
-
-  private final CdcrReplicatorScheduler scheduler;
-  private CdcrProcessStateManager processStateManager;
-  private CdcrLeaderStateManager leaderStateManager;
-
-  private SolrCore core;
-  private String path;
-
-  private ExecutorService bootstrapExecutor;
-  private volatile BootstrapStatusRunnable bootstrapStatusRunnable;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CdcrReplicatorManager(final SolrCore core, String path,
-                        SolrParams replicatorConfiguration,
-                        Map<String, List<SolrParams>> replicasConfiguration) {
-    this.core = core;
-    this.path = path;
-
-    // create states
-    replicatorStates = new ArrayList<>();
-    String myCollection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    List<SolrParams> targets = replicasConfiguration.get(myCollection);
-    if (targets != null) {
-      for (SolrParams params : targets) {
-        String zkHost = params.get(CdcrParams.ZK_HOST_PARAM);
-        String targetCollection = params.get(CdcrParams.TARGET_COLLECTION_PARAM);
-
-        CloudSolrClient client = new Builder(Collections.singletonList(zkHost), Optional.empty())
-            .sendUpdatesOnlyToShardLeaders()
-            .build();
-        client.setDefaultCollection(targetCollection);
-        replicatorStates.add(new CdcrReplicatorState(targetCollection, zkHost, client));
-      }
-    }
-
-    this.scheduler = new CdcrReplicatorScheduler(this, replicatorConfiguration);
-  }
-
-  void setProcessStateManager(final CdcrProcessStateManager processStateManager) {
-    this.processStateManager = processStateManager;
-    this.processStateManager.register(this);
-  }
-
-  void setLeaderStateManager(final CdcrLeaderStateManager leaderStateManager) {
-    this.leaderStateManager = leaderStateManager;
-    this.leaderStateManager.register(this);
-  }
-
-  /**
-   * <p>
-   * Inform the replicator manager of a change of state, and tell him to update its own state.
-   * </p>
-   * <p>
-   * If we are the leader and the process state is STARTED, we need to initialise the log readers and start the
-   * scheduled thread poll.
-   * Otherwise, if the process state is STOPPED or if we are not the leader, we need to close the log readers and stop
-   * the thread pool.
-   * </p>
-   * <p>
-   * This method is synchronised as it can both be called by the leaderStateManager and the processStateManager.
-   * </p>
-   */
-  @Override
-  public synchronized void stateUpdate() {
-    if (leaderStateManager.amILeader() && processStateManager.getState().equals(CdcrParams.ProcessState.STARTED)) {
-      if (replicatorStates.size() > 0)  {
-        this.bootstrapExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(replicatorStates.size(),
-            new SolrjNamedThreadFactory("cdcr-bootstrap-status"));
-      }
-      this.initLogReaders();
-      this.scheduler.start();
-      return;
-    }
-
-    this.scheduler.shutdown();
-    if (bootstrapExecutor != null)  {
-      IOUtils.closeQuietly(bootstrapStatusRunnable);
-      ExecutorUtil.shutdownAndAwaitTermination(bootstrapExecutor);
-    }
-    this.closeLogReaders();
-    Callable callable = core.getSolrCoreState().getCdcrBootstrapCallable();
-    if (callable != null)  {
-      CdcrRequestHandler.BootstrapCallable bootstrapCallable = (CdcrRequestHandler.BootstrapCallable) callable;
-      IOUtils.closeQuietly(bootstrapCallable);
-    }
-  }
-
-  List<CdcrReplicatorState> getReplicatorStates() {
-    return replicatorStates;
-  }
-
-  private void initLogReaders() {
-    String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    String shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-    CdcrUpdateLog ulog = (CdcrUpdateLog) core.getUpdateHandler().getUpdateLog();
-
-    for (CdcrReplicatorState state : replicatorStates) {
-      state.closeLogReader();
-      try {
-        long checkpoint = this.getCheckpoint(state);
-        log.info("Create new update log reader for target {} with checkpoint {} @ {}:{}", state.getTargetCollection(),
-            checkpoint, collectionName, shard);
-        CdcrUpdateLog.CdcrLogReader reader = ulog.newLogReader();
-        boolean seek = reader.seek(checkpoint);
-        state.init(reader);
-        if (!seek) {
-          // targetVersion is lower than the oldest known entry.
-          // In this scenario, it probably means that there is a gap in the updates log.
-          // the best we can do here is to bootstrap the target leader by replicating the full index
-          final String targetCollection = state.getTargetCollection();
-          state.setBootstrapInProgress(true);
-          log.info("Attempting to bootstrap target collection: {}, shard: {}", targetCollection, shard);
-          bootstrapStatusRunnable = new BootstrapStatusRunnable(core, state);
-          log.info("Submitting bootstrap task to executor");
-          try {
-            bootstrapExecutor.submit(bootstrapStatusRunnable);
-          } catch (Exception e) {
-            log.error("Unable to submit bootstrap call to executor", e);
-          }
-        }
-      } catch (IOException | SolrServerException | SolrException e) {
-        log.warn("Unable to instantiate the log reader for target collection " + state.getTargetCollection(), e);
-      } catch (InterruptedException e) {
-        log.warn("Thread interrupted while instantiate the log reader for target collection " + state.getTargetCollection(), e);
-        Thread.currentThread().interrupt();
-      }
-    }
-  }
-
-  private long getCheckpoint(CdcrReplicatorState state) throws IOException, SolrServerException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CommonParams.ACTION, CdcrParams.CdcrAction.COLLECTIONCHECKPOINT.toString());
-
-    SolrRequest request = new QueryRequest(params);
-    request.setPath(path);
-
-    NamedList response = state.getClient().request(request);
-    return (Long) response.get(CdcrParams.CHECKPOINT);
-  }
-
-  void closeLogReaders() {
-    for (CdcrReplicatorState state : replicatorStates) {
-      state.closeLogReader();
-    }
-  }
-
-  /**
-   * Shutdown all the {@link org.apache.solr.handler.CdcrReplicatorState} by closing their
-   * {@link org.apache.solr.client.solrj.impl.CloudSolrClient} and
-   * {@link org.apache.solr.update.CdcrUpdateLog.CdcrLogReader}.
-   */
-  void shutdown() {
-    this.scheduler.shutdown();
-    if (bootstrapExecutor != null)  {
-      IOUtils.closeQuietly(bootstrapStatusRunnable);
-      ExecutorUtil.shutdownAndAwaitTermination(bootstrapExecutor);
-    }
-    for (CdcrReplicatorState state : replicatorStates) {
-      state.shutdown();
-    }
-    replicatorStates.clear();
-  }
-
-  private class BootstrapStatusRunnable implements Runnable, Closeable {
-    private final CdcrReplicatorState state;
-    private final String targetCollection;
-    private final String shard;
-    private final String collectionName;
-    private final CdcrUpdateLog ulog;
-    private final String myCoreUrl;
-
-    private volatile boolean closed = false;
-
-    BootstrapStatusRunnable(SolrCore core, CdcrReplicatorState state) {
-      this.collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-      this.shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-      this.ulog = (CdcrUpdateLog) core.getUpdateHandler().getUpdateLog();
-      this.state = state;
-      this.targetCollection = state.getTargetCollection();
-      String baseUrl = core.getCoreContainer().getZkController().getBaseUrl();
-      this.myCoreUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, core.getName());
-    }
-
-    @Override
-    public void close() throws IOException {
-      closed = true;
-      try {
-        Replica leader = state.getClient().getZkStateReader().getLeaderRetry(targetCollection, shard, 30000); // assume same shard exists on target
-        String leaderCoreUrl = leader.getCoreUrl();
-        HttpClient httpClient = state.getClient().getLbClient().getHttpClient();
-        try (HttpSolrClient client = new HttpSolrClient.Builder(leaderCoreUrl).withHttpClient(httpClient).build()) {
-          sendCdcrCommand(client, CdcrParams.CdcrAction.CANCEL_BOOTSTRAP);
-        } catch (SolrServerException e) {
-          log.error("Error sending cancel bootstrap message to target collection: {} shard: {} leader: {}",
-              targetCollection, shard, leaderCoreUrl);
-        }
-      } catch (InterruptedException e) {
-        log.error("Interrupted while closing BootstrapStatusRunnable", e);
-        Thread.currentThread().interrupt();
-      }
-    }
-
-    @Override
-    public void run() {
-      int retries = 1;
-      boolean success = false;
-      try {
-        while (!closed && sendBootstrapCommand() != BootstrapStatus.SUBMITTED)  {
-          Thread.sleep(BOOTSTRAP_RETRY_DELAY_MS);
-        }
-        TimeOut timeOut = new TimeOut(BOOTSTRAP_TIMEOUT_SECONDS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-        while (!timeOut.hasTimedOut()) {
-          if (closed) {
-            log.warn("Cancelling waiting for bootstrap on target: {} shard: {} to complete", targetCollection, shard);
-            state.setBootstrapInProgress(false);
-            break;
-          }
-          BootstrapStatus status = getBoostrapStatus();
-          if (status == BootstrapStatus.RUNNING) {
-            try {
-              log.info("CDCR bootstrap running for {} seconds, sleeping for {} ms",
-                  BOOTSTRAP_TIMEOUT_SECONDS - timeOut.timeLeft(TimeUnit.SECONDS), BOOTSTRAP_RETRY_DELAY_MS);
-              timeOut.sleep(BOOTSTRAP_RETRY_DELAY_MS);
-            } catch (InterruptedException e) {
-              Thread.currentThread().interrupt();
-            }
-          } else if (status == BootstrapStatus.COMPLETED) {
-            log.info("CDCR bootstrap successful in {} seconds", BOOTSTRAP_TIMEOUT_SECONDS - timeOut.timeLeft(TimeUnit.SECONDS));
-            long checkpoint = CdcrReplicatorManager.this.getCheckpoint(state);
-            log.info("Create new update log reader for target {} with checkpoint {} @ {}:{}", state.getTargetCollection(),
-                checkpoint, collectionName, shard);
-            CdcrUpdateLog.CdcrLogReader reader1 = ulog.newLogReader();
-            reader1.seek(checkpoint);
-            // issue asynchronous request_recovery to the follower nodes of the shards of target collection
-            sendRequestRecoveryToFollowers(state);
-            success = true;
-            break;
-          } else if (status == BootstrapStatus.FAILED) {
-            log.warn("CDCR bootstrap failed in {} seconds", BOOTSTRAP_TIMEOUT_SECONDS - timeOut.timeLeft(TimeUnit.SECONDS));
-            // let's retry a fixed number of times before giving up
-            if (retries >= MAX_BOOTSTRAP_ATTEMPTS) {
-              log.error("Unable to bootstrap the target collection: {}, shard: {} even after {} retries", targetCollection, shard, retries);
-              break;
-            } else {
-              log.info("Retry: {} - Attempting to bootstrap target collection: {} shard: {}", retries, targetCollection, shard);
-              while (!closed && sendBootstrapCommand() != BootstrapStatus.SUBMITTED)  {
-                Thread.sleep(BOOTSTRAP_RETRY_DELAY_MS);
-              }
-              timeOut = new TimeOut(BOOTSTRAP_TIMEOUT_SECONDS, TimeUnit.SECONDS, TimeSource.NANO_TIME); // reset the timer
-              retries++;
-            }
-          } else if (status == BootstrapStatus.NOTFOUND || status == BootstrapStatus.CANCELLED) {
-            log.info("CDCR bootstrap " + (status == BootstrapStatus.NOTFOUND ? "not found" : "cancelled") + "in {} seconds",
-                BOOTSTRAP_TIMEOUT_SECONDS - timeOut.timeLeft(TimeUnit.SECONDS));
-            // the leader of the target shard may have changed and therefore there is no record of the
-            // bootstrap process so we must retry the operation
-            while (!closed && sendBootstrapCommand() != BootstrapStatus.SUBMITTED)  {
-              Thread.sleep(BOOTSTRAP_RETRY_DELAY_MS);
-            }
-            retries = 1;
-            timeOut = new TimeOut(6L * 3600L * 3600L, TimeUnit.SECONDS, TimeSource.NANO_TIME); // reset the timer
-          } else if (status == BootstrapStatus.UNKNOWN || status == BootstrapStatus.SUBMITTED) {
-            log.info("CDCR bootstrap is " + (status == BootstrapStatus.UNKNOWN ? "unknown" : "submitted"),
-                BOOTSTRAP_TIMEOUT_SECONDS - timeOut.timeLeft(TimeUnit.SECONDS));
-            // we were not able to query the status on the remote end
-            // so just sleep for a bit and try again
-            timeOut.sleep(BOOTSTRAP_RETRY_DELAY_MS);
-          }
-        }
-      } catch (InterruptedException e) {
-        log.info("Bootstrap thread interrupted");
-        state.reportError(CdcrReplicatorState.ErrorType.INTERNAL);
-        Thread.currentThread().interrupt();
-      } catch (IOException | SolrServerException | SolrException e) {
-        log.error("Unable to bootstrap the target collection " + targetCollection + " shard: " + shard, e);
-        state.reportError(CdcrReplicatorState.ErrorType.BAD_REQUEST);
-      } finally {
-        if (success) {
-          log.info("Bootstrap successful, giving the go-ahead to replicator");
-          state.setBootstrapInProgress(false);
-        }
-      }
-    }
-
-    private BootstrapStatus sendBootstrapCommand() throws InterruptedException {
-      Replica leader = state.getClient().getZkStateReader().getLeaderRetry(targetCollection, shard, 30000); // assume same shard exists on target
-      String leaderCoreUrl = leader.getCoreUrl();
-      HttpClient httpClient = state.getClient().getLbClient().getHttpClient();
-      try (HttpSolrClient client = new HttpSolrClient.Builder(leaderCoreUrl).withHttpClient(httpClient).build()) {
-        log.info("Attempting to bootstrap target collection: {} shard: {} leader: {}", targetCollection, shard, leaderCoreUrl);
-        try {
-          NamedList response = sendCdcrCommand(client, CdcrParams.CdcrAction.BOOTSTRAP, ReplicationHandler.MASTER_URL, myCoreUrl);
-          log.debug("CDCR Bootstrap response: {}", response);
-          String status = response.get(RESPONSE_STATUS).toString();
-          return BootstrapStatus.valueOf(status.toUpperCase(Locale.ROOT));
-        } catch (Exception e) {
-          log.error("Exception submitting bootstrap request", e);
-          return BootstrapStatus.UNKNOWN;
-        }
-      } catch (IOException e) {
-        log.error("There shouldn't be an IOException while closing but there was!", e);
-      }
-      return BootstrapStatus.UNKNOWN;
-    }
-
-    private BootstrapStatus getBoostrapStatus() throws InterruptedException {
-      try {
-        Replica leader = state.getClient().getZkStateReader().getLeaderRetry(targetCollection, shard, 30000); // assume same shard exists on target
-        String leaderCoreUrl = leader.getCoreUrl();
-        HttpClient httpClient = state.getClient().getLbClient().getHttpClient();
-        try (HttpSolrClient client = new HttpSolrClient.Builder(leaderCoreUrl).withHttpClient(httpClient).build()) {
-          NamedList response = sendCdcrCommand(client, CdcrParams.CdcrAction.BOOTSTRAP_STATUS);
-          String status = (String) response.get(RESPONSE_STATUS);
-          BootstrapStatus bootstrapStatus = BootstrapStatus.valueOf(status.toUpperCase(Locale.ROOT));
-          if (bootstrapStatus == BootstrapStatus.RUNNING) {
-            return BootstrapStatus.RUNNING;
-          } else if (bootstrapStatus == BootstrapStatus.COMPLETED) {
-            return BootstrapStatus.COMPLETED;
-          } else if (bootstrapStatus == BootstrapStatus.FAILED) {
-            return BootstrapStatus.FAILED;
-          } else if (bootstrapStatus == BootstrapStatus.NOTFOUND) {
-            log.warn("Bootstrap process was not found on target collection: {} shard: {}, leader: {}", targetCollection, shard, leaderCoreUrl);
-            return BootstrapStatus.NOTFOUND;
-          } else if (bootstrapStatus == BootstrapStatus.CANCELLED) {
-            return BootstrapStatus.CANCELLED;
-          } else {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                "Unknown status: " + status + " returned by BOOTSTRAP_STATUS command");
-          }
-        }
-      } catch (Exception e) {
-        log.error("Exception during bootstrap status request", e);
-        return BootstrapStatus.UNKNOWN;
-      }
-    }
-  }
-
-  private NamedList sendCdcrCommand(SolrClient client, CdcrParams.CdcrAction action, String... params) throws SolrServerException, IOException {
-    ModifiableSolrParams solrParams = new ModifiableSolrParams();
-    solrParams.set(CommonParams.QT, "/cdcr");
-    solrParams.set(CommonParams.ACTION, action.toString());
-    for (int i = 0; i < params.length - 1; i+=2) {
-      solrParams.set(params[i], params[i + 1]);
-    }
-    SolrRequest request = new QueryRequest(solrParams);
-    return client.request(request);
-  }
-
-  private void sendRequestRecoveryToFollowers(CdcrReplicatorState state) throws SolrServerException, IOException {
-    Collection<Slice> slices = state.getClient().getZkStateReader().getClusterState().getCollection(state.getTargetCollection()).getActiveSlices();
-    for (Slice slice : slices) {
-      Collection<Replica> replicas = slice.getReplicas();
-      for (Replica replica : replicas) {
-        if (slice.getLeader().getCoreName().equals(replica.getCoreName())) {
-          continue; // no need to request recovery for leader
-        }
-        sendRequestRecoveryToFollower(state.getClient(), replica.getCoreName());
-        log.info("RequestRecovery cmd is issued by core: " + replica.getCoreName() + " of shard: " + slice.getName() +
-            "for target: " + state.getTargetCollection());
-      }
-    }
-  }
-
-  private NamedList sendRequestRecoveryToFollower(SolrClient client, String coreName) throws SolrServerException, IOException {
-    CoreAdminRequest.RequestRecovery recoverRequestCmd = new CoreAdminRequest.RequestRecovery();
-    recoverRequestCmd.setAction(CoreAdminParams.CoreAdminAction.REQUESTRECOVERY);
-    recoverRequestCmd.setCoreName(coreName);
-    return client.request(recoverRequestCmd);
-  }
-
-
-  private enum BootstrapStatus  {
-    SUBMITTED,
-    RUNNING,
-    COMPLETED,
-    FAILED,
-    NOTFOUND,
-    CANCELLED,
-    UNKNOWN
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorScheduler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorScheduler.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorScheduler.java
deleted file mode 100644
index 62abeab..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorScheduler.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.*;
-
-/**
- * Schedule the execution of the {@link org.apache.solr.handler.CdcrReplicator} threads at
- * regular time interval. It relies on a queue of {@link org.apache.solr.handler.CdcrReplicatorState} in
- * order to avoid that one {@link org.apache.solr.handler.CdcrReplicatorState} is used by two threads at the same
- * time.
- */
-class CdcrReplicatorScheduler {
-
-  private boolean isStarted = false;
-
-  private ScheduledExecutorService scheduler;
-  private ExecutorService replicatorsPool;
-
-  private final CdcrReplicatorManager replicatorManager;
-  private final ConcurrentLinkedQueue<CdcrReplicatorState> statesQueue;
-
-  private int poolSize = DEFAULT_POOL_SIZE;
-  private int timeSchedule = DEFAULT_TIME_SCHEDULE;
-  private int batchSize = DEFAULT_BATCH_SIZE;
-
-  private static final int DEFAULT_POOL_SIZE = 2;
-  private static final int DEFAULT_TIME_SCHEDULE = 10;
-  private static final int DEFAULT_BATCH_SIZE = 128;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CdcrReplicatorScheduler(final CdcrReplicatorManager replicatorStatesManager, final SolrParams replicatorConfiguration) {
-    this.replicatorManager = replicatorStatesManager;
-    this.statesQueue = new ConcurrentLinkedQueue<>(replicatorManager.getReplicatorStates());
-    if (replicatorConfiguration != null) {
-      poolSize = replicatorConfiguration.getInt(CdcrParams.THREAD_POOL_SIZE_PARAM, DEFAULT_POOL_SIZE);
-      timeSchedule = replicatorConfiguration.getInt(CdcrParams.SCHEDULE_PARAM, DEFAULT_TIME_SCHEDULE);
-      batchSize = replicatorConfiguration.getInt(CdcrParams.BATCH_SIZE_PARAM, DEFAULT_BATCH_SIZE);
-    }
-  }
-
-  void start() {
-    if (!isStarted) {
-      scheduler = Executors.newSingleThreadScheduledExecutor(new DefaultSolrThreadFactory("cdcr-scheduler"));
-      replicatorsPool = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new DefaultSolrThreadFactory("cdcr-replicator"));
-
-      // the scheduler thread is executed every second and submits one replication task
-      // per available state in the queue
-      scheduler.scheduleWithFixedDelay(() -> {
-        int nCandidates = statesQueue.size();
-        for (int i = 0; i < nCandidates; i++) {
-          // a thread that poll one state from the queue, execute the replication task, and push back
-          // the state in the queue when the task is completed
-          replicatorsPool.execute(() -> {
-            CdcrReplicatorState state = statesQueue.poll();
-            assert state != null; // Should never happen
-            try {
-              if (!state.isBootstrapInProgress()) {
-                new CdcrReplicator(state, batchSize).run();
-              } else  {
-                log.debug("Replicator state is bootstrapping, skipping replication for target collection {}", state.getTargetCollection());
-              }
-            } finally {
-              statesQueue.offer(state);
-            }
-          });
-
-        }
-      }, 0, timeSchedule, TimeUnit.MILLISECONDS);
-      isStarted = true;
-    }
-  }
-
-  void shutdown() {
-    if (isStarted) {
-      // interrupts are often dangerous in Lucene / Solr code, but the
-      // test for this will leak threads without
-      replicatorsPool.shutdown();
-      try {
-        replicatorsPool.awaitTermination(60, TimeUnit.SECONDS);
-      } catch (InterruptedException e) {
-        log.warn("Thread interrupted while waiting for CDCR replicator threadpool close.");
-        Thread.currentThread().interrupt();
-      } finally {
-        scheduler.shutdownNow();
-        isStarted = false;
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
deleted file mode 100644
index bf80608..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.UpdateLog;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The state of the replication with a target cluster.
- */
-class CdcrReplicatorState {
-
-  private final String targetCollection;
-  private final String zkHost;
-  private final CloudSolrClient targetClient;
-
-  private CdcrUpdateLog.CdcrLogReader logReader;
-
-  private long consecutiveErrors = 0;
-  private final Map<ErrorType, Long> errorCounters = new HashMap<>();
-  private final FixedQueue<ErrorQueueEntry> errorsQueue = new FixedQueue<>(100); // keep the last 100 errors
-
-  private BenchmarkTimer benchmarkTimer;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final AtomicBoolean bootstrapInProgress = new AtomicBoolean(false);
-  private final AtomicInteger numBootstraps = new AtomicInteger();
-
-  CdcrReplicatorState(final String targetCollection, final String zkHost, final CloudSolrClient targetClient) {
-    this.targetCollection = targetCollection;
-    this.targetClient = targetClient;
-    this.zkHost = zkHost;
-    this.benchmarkTimer = new BenchmarkTimer();
-  }
-
-  /**
-   * Initialise the replicator state with a {@link org.apache.solr.update.CdcrUpdateLog.CdcrLogReader}
-   * that is positioned at the last target cluster checkpoint.
-   */
-  void init(final CdcrUpdateLog.CdcrLogReader logReader) {
-    this.logReader = logReader;
-  }
-
-  void closeLogReader() {
-    if (logReader != null) {
-      logReader.close();
-      logReader = null;
-    }
-  }
-
-  CdcrUpdateLog.CdcrLogReader getLogReader() {
-    return logReader;
-  }
-
-  String getTargetCollection() {
-    return targetCollection;
-  }
-
-  String getZkHost() {
-    return zkHost;
-  }
-
-  CloudSolrClient getClient() {
-    return targetClient;
-  }
-
-  void shutdown() {
-    try {
-      targetClient.close();
-    } catch (IOException ioe) {
-      log.warn("Caught exception trying to close server: ", ioe.getMessage());
-    }
-    this.closeLogReader();
-  }
-
-  void reportError(ErrorType error) {
-    if (!errorCounters.containsKey(error)) {
-      errorCounters.put(error, 0l);
-    }
-    errorCounters.put(error, errorCounters.get(error) + 1);
-    errorsQueue.add(new ErrorQueueEntry(error, new Date()));
-    consecutiveErrors++;
-  }
-
-  void resetConsecutiveErrors() {
-    consecutiveErrors = 0;
-  }
-
-  /**
-   * Returns the number of consecutive errors encountered while trying to forward updates to the target.
-   */
-  long getConsecutiveErrors() {
-    return consecutiveErrors;
-  }
-
-  /**
-   * Gets the number of errors of a particular type.
-   */
-  long getErrorCount(ErrorType type) {
-    if (errorCounters.containsKey(type)) {
-      return errorCounters.get(type);
-    } else {
-      return 0;
-    }
-  }
-
-  /**
-   * Gets the last errors ordered by timestamp (most recent first)
-   */
-  List<String[]> getLastErrors() {
-    List<String[]> lastErrors = new ArrayList<>();
-    synchronized (errorsQueue) {
-      Iterator<ErrorQueueEntry> it = errorsQueue.iterator();
-      while (it.hasNext()) {
-        ErrorQueueEntry entry = it.next();
-        lastErrors.add(new String[]{entry.timestamp.toInstant().toString(), entry.type.toLower()});
-      }
-    }
-    return lastErrors;
-  }
-
-  /**
-   * Return the timestamp of the last processed operations
-   */
-  String getTimestampOfLastProcessedOperation() {
-    if (logReader != null && logReader.getLastVersion() != -1) {
-      // Shift back to the right by 20 bits the version number - See VersionInfo#getNewClock
-      return Instant.ofEpochMilli(logReader.getLastVersion() >> 20).toString();
-    }
-    return "";
-  }
-
-  /**
-   * Gets the benchmark timer.
-   */
-  BenchmarkTimer getBenchmarkTimer() {
-    return this.benchmarkTimer;
-  }
-
-  /**
-   * @return true if a bootstrap operation is in progress, false otherwise
-   */
-  boolean isBootstrapInProgress() {
-    return bootstrapInProgress.get();
-  }
-
-  void setBootstrapInProgress(boolean inProgress) {
-    if (bootstrapInProgress.compareAndSet(true, false)) {
-      numBootstraps.incrementAndGet();
-    }
-    bootstrapInProgress.set(inProgress);
-  }
-
-  public int getNumBootstraps() {
-    return numBootstraps.get();
-  }
-
-  enum ErrorType {
-    INTERNAL,
-    BAD_REQUEST;
-
-    public String toLower() {
-      return toString().toLowerCase(Locale.ROOT);
-    }
-
-  }
-
-  static class BenchmarkTimer {
-
-    private long startTime;
-    private long runTime = 0;
-    private Map<Integer, Long> opCounters = new HashMap<>();
-
-    /**
-     * Start recording time.
-     */
-    void start() {
-      startTime = System.nanoTime();
-    }
-
-    /**
-     * Stop recording time.
-     */
-    void stop() {
-      runTime += System.nanoTime() - startTime;
-      startTime = -1;
-    }
-
-    void incrementCounter(final int operationType) {
-      switch (operationType) {
-        case UpdateLog.ADD:
-        case UpdateLog.DELETE:
-        case UpdateLog.DELETE_BY_QUERY: {
-          if (!opCounters.containsKey(operationType)) {
-            opCounters.put(operationType, 0l);
-          }
-          opCounters.put(operationType, opCounters.get(operationType) + 1);
-          return;
-        }
-
-        default:
-      }
-    }
-
-    long getRunTime() {
-      long totalRunTime = runTime;
-      if (startTime != -1) { // we are currently recording the time
-        totalRunTime += System.nanoTime() - startTime;
-      }
-      return totalRunTime;
-    }
-
-    double getOperationsPerSecond() {
-      long total = 0;
-      for (long counter : opCounters.values()) {
-        total += counter;
-      }
-      double elapsedTimeInSeconds = ((double) this.getRunTime() / 1E9);
-      return total / elapsedTimeInSeconds;
-    }
-
-    double getAddsPerSecond() {
-      long total = opCounters.get(UpdateLog.ADD) != null ? opCounters.get(UpdateLog.ADD) : 0;
-      double elapsedTimeInSeconds = ((double) this.getRunTime() / 1E9);
-      return total / elapsedTimeInSeconds;
-    }
-
-    double getDeletesPerSecond() {
-      long total = opCounters.get(UpdateLog.DELETE) != null ? opCounters.get(UpdateLog.DELETE) : 0;
-      total += opCounters.get(UpdateLog.DELETE_BY_QUERY) != null ? opCounters.get(UpdateLog.DELETE_BY_QUERY) : 0;
-      double elapsedTimeInSeconds = ((double) this.getRunTime() / 1E9);
-      return total / elapsedTimeInSeconds;
-    }
-
-  }
-
-  private static class ErrorQueueEntry {
-
-    private ErrorType type;
-    private Date timestamp;
-
-    private ErrorQueueEntry(ErrorType type, Date timestamp) {
-      this.type = type;
-      this.timestamp = timestamp;
-    }
-  }
-
-  private static class FixedQueue<E> extends LinkedList<E> {
-
-    private int maxSize;
-
-    public FixedQueue(int maxSize) {
-      this.maxSize = maxSize;
-    }
-
-    @Override
-    public synchronized boolean add(E e) {
-      super.addFirst(e);
-      if (size() > maxSize) {
-        removeLast();
-      }
-      return true;
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
deleted file mode 100644
index 1453841..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
+++ /dev/null
@@ -1,861 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CloseHook;
-import org.apache.solr.core.PluginBag;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.request.SolrRequestInfo;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.SolrCoreState;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.update.VersionInfo;
-import org.apache.solr.update.processor.DistributedUpdateProcessor;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.handler.admin.CoreAdminHandler.COMPLETED;
-import static org.apache.solr.handler.admin.CoreAdminHandler.FAILED;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE_MESSAGE;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RESPONSE_STATUS;
-import static org.apache.solr.handler.admin.CoreAdminHandler.RUNNING;
-
-/**
- * <p>
- * This request handler implements the CDCR API and is responsible of the execution of the
- * {@link CdcrReplicator} threads.
- * </p>
- * <p>
- * It relies on three classes, {@link org.apache.solr.handler.CdcrLeaderStateManager},
- * {@link org.apache.solr.handler.CdcrBufferStateManager} and {@link org.apache.solr.handler.CdcrProcessStateManager}
- * to synchronise the state of the CDCR across all the nodes.
- * </p>
- * <p>
- * The CDCR process can be either {@link org.apache.solr.handler.CdcrParams.ProcessState#STOPPED} or {@link org.apache.solr.handler.CdcrParams.ProcessState#STARTED} by using the
- * actions {@link org.apache.solr.handler.CdcrParams.CdcrAction#STOP} and {@link org.apache.solr.handler.CdcrParams.CdcrAction#START} respectively. If a node is leader and the process
- * state is {@link org.apache.solr.handler.CdcrParams.ProcessState#STARTED}, the {@link CdcrReplicatorManager} will
- * start the {@link CdcrReplicator} threads. If a node becomes non-leader or if the process state becomes
- * {@link org.apache.solr.handler.CdcrParams.ProcessState#STOPPED}, the {@link CdcrReplicator} threads are stopped.
- * </p>
- * <p>
- * The CDCR can be switched to a "buffering" mode, in which the update log will never delete old transaction log
- * files. Such a mode can be enabled or disabled using the action {@link org.apache.solr.handler.CdcrParams.CdcrAction#ENABLEBUFFER} and
- * {@link org.apache.solr.handler.CdcrParams.CdcrAction#DISABLEBUFFER} respectively.
- * </p>
- * <p>
- * Known limitations: The source and target clusters must have the same topology. Replication between clusters
- * with a different number of shards will likely results in an inconsistent index.
- * </p>
- */
-public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAware {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private SolrCore core;
-  private String collection;
-  private String shard;
-  private String path;
-
-  private SolrParams updateLogSynchronizerConfiguration;
-  private SolrParams replicatorConfiguration;
-  private SolrParams bufferConfiguration;
-  private Map<String, List<SolrParams>> replicasConfiguration;
-
-  private CdcrProcessStateManager processStateManager;
-  private CdcrBufferStateManager bufferStateManager;
-  private CdcrReplicatorManager replicatorManager;
-  private CdcrLeaderStateManager leaderStateManager;
-  private CdcrUpdateLogSynchronizer updateLogSynchronizer;
-  private CdcrBufferManager bufferManager;
-
-  @Override
-  public void init(NamedList args) {
-    super.init(args);
-
-    if (args != null) {
-      // Configuration of the Update Log Synchronizer
-      Object updateLogSynchonizerParam = args.get(CdcrParams.UPDATE_LOG_SYNCHRONIZER_PARAM);
-      if (updateLogSynchonizerParam != null && updateLogSynchonizerParam instanceof NamedList) {
-        updateLogSynchronizerConfiguration = ((NamedList) updateLogSynchonizerParam).toSolrParams();
-      }
-
-      // Configuration of the Replicator
-      Object replicatorParam = args.get(CdcrParams.REPLICATOR_PARAM);
-      if (replicatorParam != null && replicatorParam instanceof NamedList) {
-        replicatorConfiguration = ((NamedList) replicatorParam).toSolrParams();
-      }
-
-      // Configuration of the Buffer
-      Object bufferParam = args.get(CdcrParams.BUFFER_PARAM);
-      if (bufferParam != null && bufferParam instanceof NamedList) {
-        bufferConfiguration = ((NamedList) bufferParam).toSolrParams();
-      }
-
-      // Configuration of the Replicas
-      replicasConfiguration = new HashMap<>();
-      List replicas = args.getAll(CdcrParams.REPLICA_PARAM);
-      for (Object replica : replicas) {
-        if (replica != null && replica instanceof NamedList) {
-          SolrParams params = ((NamedList) replica).toSolrParams();
-          if (!replicasConfiguration.containsKey(params.get(CdcrParams.SOURCE_COLLECTION_PARAM))) {
-            replicasConfiguration.put(params.get(CdcrParams.SOURCE_COLLECTION_PARAM), new ArrayList<>());
-          }
-          replicasConfiguration.get(params.get(CdcrParams.SOURCE_COLLECTION_PARAM)).add(params);
-        }
-      }
-    }
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    // Pick the action
-    SolrParams params = req.getParams();
-    CdcrParams.CdcrAction action = null;
-    String a = params.get(CommonParams.ACTION);
-    if (a != null) {
-      action = CdcrParams.CdcrAction.get(a);
-    }
-    if (action == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
-    }
-
-    switch (action) {
-      case START: {
-        this.handleStartAction(req, rsp);
-        break;
-      }
-      case STOP: {
-        this.handleStopAction(req, rsp);
-        break;
-      }
-      case STATUS: {
-        this.handleStatusAction(req, rsp);
-        break;
-      }
-      case COLLECTIONCHECKPOINT: {
-        this.handleCollectionCheckpointAction(req, rsp);
-        break;
-      }
-      case SHARDCHECKPOINT: {
-        this.handleShardCheckpointAction(req, rsp);
-        break;
-      }
-      case ENABLEBUFFER: {
-        this.handleEnableBufferAction(req, rsp);
-        break;
-      }
-      case DISABLEBUFFER: {
-        this.handleDisableBufferAction(req, rsp);
-        break;
-      }
-      case LASTPROCESSEDVERSION: {
-        this.handleLastProcessedVersionAction(req, rsp);
-        break;
-      }
-      case QUEUES: {
-        this.handleQueuesAction(req, rsp);
-        break;
-      }
-      case OPS: {
-        this.handleOpsAction(req, rsp);
-        break;
-      }
-      case ERRORS: {
-        this.handleErrorsAction(req, rsp);
-        break;
-      }
-      case BOOTSTRAP: {
-        this.handleBootstrapAction(req, rsp);
-        break;
-      }
-      case BOOTSTRAP_STATUS:  {
-        this.handleBootstrapStatus(req, rsp);
-        break;
-      }
-      case CANCEL_BOOTSTRAP:  {
-        this.handleCancelBootstrap(req, rsp);
-        break;
-      }
-      default: {
-        throw new RuntimeException("Unknown action: " + action);
-      }
-    }
-
-    rsp.setHttpCaching(false);
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    this.core = core;
-    collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-
-    // Make sure that the core is ZKAware
-    if (!core.getCoreContainer().isZooKeeperAware()) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Solr instance is not running in SolrCloud mode.");
-    }
-
-    // Make sure that the core is using the CdcrUpdateLog implementation
-    if (!(core.getUpdateHandler().getUpdateLog() instanceof CdcrUpdateLog)) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Solr instance is not configured with the cdcr update log.");
-    }
-
-    // Find the registered path of the handler
-    path = null;
-    for (Map.Entry<String, PluginBag.PluginHolder<SolrRequestHandler>> entry : core.getRequestHandlers().getRegistry().entrySet()) {
-      if (core.getRequestHandlers().isLoaded(entry.getKey()) && entry.getValue().get() == this) {
-        path = entry.getKey();
-        break;
-      }
-    }
-    if (path == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "The CdcrRequestHandler is not registered with the current core.");
-    }
-    if (!path.startsWith("/")) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "The CdcrRequestHandler needs to be registered to a path. Typically this is '/cdcr'");
-    }
-
-    // Initialisation phase
-    // If the Solr cloud is being initialised, each CDCR node will start up in its default state, i.e., STOPPED
-    // and non-leader. The leader state will be updated later, when all the Solr cores have been loaded.
-    // If the Solr cloud has already been initialised, and the core is reloaded (i.e., because a node died or a new node
-    // is added to the cluster), the CDCR node will synchronise its state with the global CDCR state that is stored
-    // in zookeeper.
-
-    // Initialise the buffer state manager
-    bufferStateManager = new CdcrBufferStateManager(core, bufferConfiguration);
-    // Initialise the process state manager
-    processStateManager = new CdcrProcessStateManager(core);
-    // Initialise the leader state manager
-    leaderStateManager = new CdcrLeaderStateManager(core);
-
-    // Initialise the replicator states manager
-    replicatorManager = new CdcrReplicatorManager(core, path, replicatorConfiguration, replicasConfiguration);
-    replicatorManager.setProcessStateManager(processStateManager);
-    replicatorManager.setLeaderStateManager(leaderStateManager);
-    // we need to inform it of a state event since the process and leader state
-    // may have been synchronised during the initialisation
-    replicatorManager.stateUpdate();
-
-    // Initialise the update log synchronizer
-    updateLogSynchronizer = new CdcrUpdateLogSynchronizer(core, path, updateLogSynchronizerConfiguration);
-    updateLogSynchronizer.setLeaderStateManager(leaderStateManager);
-    // we need to inform it of a state event since the leader state
-    // may have been synchronised during the initialisation
-    updateLogSynchronizer.stateUpdate();
-
-    // Initialise the buffer manager
-    bufferManager = new CdcrBufferManager(core);
-    bufferManager.setLeaderStateManager(leaderStateManager);
-    bufferManager.setBufferStateManager(bufferStateManager);
-    // we need to inform it of a state event since the leader state
-    // may have been synchronised during the initialisation
-    bufferManager.stateUpdate();
-
-    // register the close hook
-    this.registerCloseHook(core);
-  }
-
-  /**
-   * register a close hook to properly shutdown the state manager and scheduler
-   */
-  private void registerCloseHook(SolrCore core) {
-    core.addCloseHook(new CloseHook() {
-
-      @Override
-      public void preClose(SolrCore core) {
-        log.info("Solr core is being closed - shutting down CDCR handler @ {}:{}", collection, shard);
-
-        updateLogSynchronizer.shutdown();
-        replicatorManager.shutdown();
-        bufferStateManager.shutdown();
-        processStateManager.shutdown();
-        leaderStateManager.shutdown();
-      }
-
-      @Override
-      public void postClose(SolrCore core) {
-      }
-
-    });
-  }
-
-  /**
-   * <p>
-   * Update and synchronize the process state.
-   * </p>
-   * <p>
-   * The process state manager must notify the replicator states manager of the change of state.
-   * </p>
-   */
-  private void handleStartAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    if (processStateManager.getState() == CdcrParams.ProcessState.STOPPED) {
-      processStateManager.setState(CdcrParams.ProcessState.STARTED);
-      processStateManager.synchronize();
-    }
-
-    rsp.add(CdcrParams.CdcrAction.STATUS.toLower(), this.getStatus());
-  }
-
-  private void handleStopAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    if (processStateManager.getState() == CdcrParams.ProcessState.STARTED) {
-      processStateManager.setState(CdcrParams.ProcessState.STOPPED);
-      processStateManager.synchronize();
-    }
-
-    rsp.add(CdcrParams.CdcrAction.STATUS.toLower(), this.getStatus());
-  }
-
-  private void handleStatusAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    rsp.add(CdcrParams.CdcrAction.STATUS.toLower(), this.getStatus());
-  }
-
-  private NamedList getStatus() {
-    NamedList status = new NamedList();
-    status.add(CdcrParams.ProcessState.getParam(), processStateManager.getState().toLower());
-    status.add(CdcrParams.BufferState.getParam(), bufferStateManager.getState().toLower());
-    return status;
-  }
-
-  /**
-   * This action is generally executed on the target cluster in order to retrieve the latest update checkpoint.
-   * This checkpoint is used on the source cluster to setup the
-   * {@link org.apache.solr.update.CdcrUpdateLog.CdcrLogReader} of a shard leader. <br/>
-   * This method will execute in parallel one
-   * {@link org.apache.solr.handler.CdcrParams.CdcrAction#SHARDCHECKPOINT} request per shard leader. It will
-   * then pick the lowest version number as checkpoint. Picking the lowest amongst all shards will ensure that we do not
-   * pick a checkpoint that is ahead of the source cluster. This can occur when other shard leaders are sending new
-   * updates to the target cluster while we are currently instantiating the
-   * {@link org.apache.solr.update.CdcrUpdateLog.CdcrLogReader}.
-   * This solution only works in scenarios where the topology of the source and target clusters are identical.
-   */
-  private void handleCollectionCheckpointAction(SolrQueryRequest req, SolrQueryResponse rsp)
-      throws IOException, SolrServerException {
-    ZkController zkController = core.getCoreContainer().getZkController();
-    try {
-      zkController.getZkStateReader().forceUpdateCollection(collection);
-    } catch (Exception e) {
-      log.warn("Error when updating cluster state", e);
-    }
-    ClusterState cstate = zkController.getClusterState();
-    DocCollection docCollection = cstate.getCollectionOrNull(collection);
-    Collection<Slice> shards = docCollection == null? null : docCollection.getActiveSlices();
-
-    ExecutorService parallelExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(new DefaultSolrThreadFactory("parallelCdcrExecutor"));
-
-    long checkpoint = Long.MAX_VALUE;
-    try {
-      List<Callable<Long>> callables = new ArrayList<>();
-      for (Slice shard : shards) {
-        ZkNodeProps leaderProps = zkController.getZkStateReader().getLeaderRetry(collection, shard.getName());
-        ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-        callables.add(new SliceCheckpointCallable(nodeProps.getCoreUrl(), path));
-      }
-
-      for (final Future<Long> future : parallelExecutor.invokeAll(callables)) {
-        long version = future.get();
-        if (version < checkpoint) { // we must take the lowest checkpoint from all the shards
-          checkpoint = version;
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error while requesting shard's checkpoints", e);
-    } catch (ExecutionException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error while requesting shard's checkpoints", e);
-    } finally {
-      parallelExecutor.shutdown();
-    }
-
-    rsp.add(CdcrParams.CHECKPOINT, checkpoint);
-  }
-
-  /**
-   * Retrieve the version number of the latest entry of the {@link org.apache.solr.update.UpdateLog}.
-   */
-  private void handleShardCheckpointAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    if (!leaderStateManager.amILeader()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Action '" + CdcrParams.CdcrAction.SHARDCHECKPOINT +
-          "' sent to non-leader replica");
-    }
-
-    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-    VersionInfo versionInfo = ulog.getVersionInfo();
-    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-      long maxVersionFromRecent = recentUpdates.getMaxRecentVersion();
-      long maxVersionFromIndex = versionInfo.getMaxVersionFromIndex(req.getSearcher());
-      log.info("Found maxVersionFromRecent {} maxVersionFromIndex {}", maxVersionFromRecent, maxVersionFromIndex);
-      // there is no race with ongoing bootstrap because we don't expect any updates to come from the source
-      long maxVersion = Math.max(maxVersionFromIndex, maxVersionFromRecent);
-      if (maxVersion == 0L) {
-        maxVersion = -1;
-      }
-      rsp.add(CdcrParams.CHECKPOINT, maxVersion);
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Action '" + CdcrParams.CdcrAction.SHARDCHECKPOINT +
-          "' could not read max version");
-    }
-  }
-
-  private void handleEnableBufferAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    if (bufferStateManager.getState() == CdcrParams.BufferState.DISABLED) {
-      bufferStateManager.setState(CdcrParams.BufferState.ENABLED);
-      bufferStateManager.synchronize();
-    }
-
-    rsp.add(CdcrParams.CdcrAction.STATUS.toLower(), this.getStatus());
-  }
-
-  private void handleDisableBufferAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    if (bufferStateManager.getState() == CdcrParams.BufferState.ENABLED) {
-      bufferStateManager.setState(CdcrParams.BufferState.DISABLED);
-      bufferStateManager.synchronize();
-    }
-
-    rsp.add(CdcrParams.CdcrAction.STATUS.toLower(), this.getStatus());
-  }
-
-  /**
-   * <p>
-   * We have to take care of four cases:
-   * <ul>
-   * <li>Replication & Buffering</li>
-   * <li>Replication & No Buffering</li>
-   * <li>No Replication & Buffering</li>
-   * <li>No Replication & No Buffering</li>
-   * </ul>
-   * In the first three cases, at least one log reader should have been initialised. We should take the lowest
-   * last processed version across all the initialised readers. In the last case, there isn't a log reader
-   * initialised. We should instantiate one and get the version of the first entries.
-   * </p>
-   */
-  private void handleLastProcessedVersionAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    String shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-
-    if (!leaderStateManager.amILeader()) {
-      log.warn("Action {} sent to non-leader replica @ {}:{}", CdcrParams.CdcrAction.LASTPROCESSEDVERSION, collectionName, shard);
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Action " + CdcrParams.CdcrAction.LASTPROCESSEDVERSION +
-          " sent to non-leader replica");
-    }
-
-    // take care of the first three cases
-    // first check the log readers from the replicator states
-    long lastProcessedVersion = Long.MAX_VALUE;
-    for (CdcrReplicatorState state : replicatorManager.getReplicatorStates()) {
-      long version = Long.MAX_VALUE;
-      if (state.getLogReader() != null) {
-        version = state.getLogReader().getLastVersion();
-      }
-      lastProcessedVersion = Math.min(lastProcessedVersion, version);
-    }
-
-    // next check the log reader of the buffer
-    CdcrUpdateLog.CdcrLogReader bufferLogReader = ((CdcrUpdateLog) core.getUpdateHandler().getUpdateLog()).getBufferToggle();
-    if (bufferLogReader != null) {
-      lastProcessedVersion = Math.min(lastProcessedVersion, bufferLogReader.getLastVersion());
-    }
-
-    // the fourth case: no cdc replication, no buffering: all readers were null
-    if (processStateManager.getState().equals(CdcrParams.ProcessState.STOPPED) &&
-        bufferStateManager.getState().equals(CdcrParams.BufferState.DISABLED)) {
-      CdcrUpdateLog.CdcrLogReader logReader = ((CdcrUpdateLog) core.getUpdateHandler().getUpdateLog()).newLogReader();
-      try {
-        // let the reader initialize lastVersion
-        logReader.next();
-        lastProcessedVersion = Math.min(lastProcessedVersion, logReader.getLastVersion());
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Error while fetching the last processed version", e);
-      } catch (IOException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Error while fetching the last processed version", e);
-      } finally {
-        logReader.close();
-      }
-    }
-
-    log.debug("Returning the lowest last processed version {}  @ {}:{}", lastProcessedVersion, collectionName, shard);
-    rsp.add(CdcrParams.LAST_PROCESSED_VERSION, lastProcessedVersion);
-  }
-
-  private void handleQueuesAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    NamedList hosts = new NamedList();
-
-    for (CdcrReplicatorState state : replicatorManager.getReplicatorStates()) {
-      NamedList queueStats = new NamedList();
-
-      CdcrUpdateLog.CdcrLogReader logReader = state.getLogReader();
-      if (logReader == null) {
-        String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
-        String shard = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
-        log.warn("The log reader for target collection {} is not initialised @ {}:{}",
-            state.getTargetCollection(), collectionName, shard);
-        queueStats.add(CdcrParams.QUEUE_SIZE, -1l);
-      } else {
-        queueStats.add(CdcrParams.QUEUE_SIZE, logReader.getNumberOfRemainingRecords());
-      }
-      queueStats.add(CdcrParams.LAST_TIMESTAMP, state.getTimestampOfLastProcessedOperation());
-
-      if (hosts.get(state.getZkHost()) == null) {
-        hosts.add(state.getZkHost(), new NamedList());
-      }
-      ((NamedList) hosts.get(state.getZkHost())).add(state.getTargetCollection(), queueStats);
-    }
-
-    rsp.add(CdcrParams.QUEUES, hosts);
-    UpdateLog updateLog = core.getUpdateHandler().getUpdateLog();
-    rsp.add(CdcrParams.TLOG_TOTAL_SIZE, updateLog.getTotalLogsSize());
-    rsp.add(CdcrParams.TLOG_TOTAL_COUNT, updateLog.getTotalLogsNumber());
-    rsp.add(CdcrParams.UPDATE_LOG_SYNCHRONIZER,
-        updateLogSynchronizer.isStarted() ? CdcrParams.ProcessState.STARTED.toLower() : CdcrParams.ProcessState.STOPPED.toLower());
-  }
-
-  private void handleOpsAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    NamedList hosts = new NamedList();
-
-    for (CdcrReplicatorState state : replicatorManager.getReplicatorStates()) {
-      NamedList ops = new NamedList();
-      ops.add(CdcrParams.COUNTER_ALL, state.getBenchmarkTimer().getOperationsPerSecond());
-      ops.add(CdcrParams.COUNTER_ADDS, state.getBenchmarkTimer().getAddsPerSecond());
-      ops.add(CdcrParams.COUNTER_DELETES, state.getBenchmarkTimer().getDeletesPerSecond());
-
-      if (hosts.get(state.getZkHost()) == null) {
-        hosts.add(state.getZkHost(), new NamedList());
-      }
-      ((NamedList) hosts.get(state.getZkHost())).add(state.getTargetCollection(), ops);
-    }
-
-    rsp.add(CdcrParams.OPERATIONS_PER_SECOND, hosts);
-  }
-
-  private void handleErrorsAction(SolrQueryRequest req, SolrQueryResponse rsp) {
-    NamedList hosts = new NamedList();
-
-    for (CdcrReplicatorState state : replicatorManager.getReplicatorStates()) {
-      NamedList errors = new NamedList();
-
-      errors.add(CdcrParams.CONSECUTIVE_ERRORS, state.getConsecutiveErrors());
-      errors.add(CdcrReplicatorState.ErrorType.BAD_REQUEST.toLower(), state.getErrorCount(CdcrReplicatorState.ErrorType.BAD_REQUEST));
-      errors.add(CdcrReplicatorState.ErrorType.INTERNAL.toLower(), state.getErrorCount(CdcrReplicatorState.ErrorType.INTERNAL));
-
-      NamedList lastErrors = new NamedList();
-      for (String[] lastError : state.getLastErrors()) {
-        lastErrors.add(lastError[0], lastError[1]);
-      }
-      errors.add(CdcrParams.LAST, lastErrors);
-
-      if (hosts.get(state.getZkHost()) == null) {
-        hosts.add(state.getZkHost(), new NamedList());
-      }
-      ((NamedList) hosts.get(state.getZkHost())).add(state.getTargetCollection(), errors);
-    }
-
-    rsp.add(CdcrParams.ERRORS, hosts);
-  }
-
-  private void handleBootstrapAction(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, InterruptedException, SolrServerException {
-    String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    String shard = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-    if (!leaderStateManager.amILeader()) {
-      log.warn("Action {} sent to non-leader replica @ {}:{}", CdcrParams.CdcrAction.BOOTSTRAP, collectionName, shard);
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Action " + CdcrParams.CdcrAction.BOOTSTRAP +
-          " sent to non-leader replica");
-    }
-    CountDownLatch latch = new CountDownLatch(1); // latch to make sure BOOTSTRAP_STATUS gives correct response
-
-    Runnable runnable = () -> {
-      Lock recoveryLock = req.getCore().getSolrCoreState().getRecoveryLock();
-      boolean locked = recoveryLock.tryLock();
-      SolrCoreState coreState = core.getSolrCoreState();
-      try {
-        if (!locked)  {
-          handleCancelBootstrap(req, rsp);
-        } else if (leaderStateManager.amILeader())  {
-          coreState.setCdcrBootstrapRunning(true);
-          latch.countDown(); // free the latch as current bootstrap is executing
-          //running.set(true);
-          String masterUrl = req.getParams().get(ReplicationHandler.MASTER_URL);
-          BootstrapCallable bootstrapCallable = new BootstrapCallable(masterUrl, core);
-          coreState.setCdcrBootstrapCallable(bootstrapCallable);
-          Future<Boolean> bootstrapFuture = core.getCoreContainer().getUpdateShardHandler().getRecoveryExecutor()
-              .submit(bootstrapCallable);
-          coreState.setCdcrBootstrapFuture(bootstrapFuture);
-          try {
-            bootstrapFuture.get();
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            log.warn("Bootstrap was interrupted", e);
-          } catch (ExecutionException e) {
-            log.error("Bootstrap operation failed", e);
-          }
-        } else  {
-          log.error("Action {} sent to non-leader replica @ {}:{}. Aborting bootstrap.", CdcrParams.CdcrAction.BOOTSTRAP, collectionName, shard);
-        }
-      } finally {
-        if (locked) {
-          coreState.setCdcrBootstrapRunning(false);
-          recoveryLock.unlock();
-        } else {
-          latch.countDown(); // free the latch as current bootstrap is executing
-        }
-      }
-    };
-
-    try {
-      core.getCoreContainer().getUpdateShardHandler().getUpdateExecutor().submit(runnable);
-      rsp.add(RESPONSE_STATUS, "submitted");
-      latch.await(10000, TimeUnit.MILLISECONDS); // put the latch for current bootstrap command
-    } catch (RejectedExecutionException ree)  {
-      // no problem, we're probably shutting down
-      rsp.add(RESPONSE_STATUS, "failed");
-    }
-  }
-
-  private void handleCancelBootstrap(SolrQueryRequest req, SolrQueryResponse rsp) {
-    BootstrapCallable callable = (BootstrapCallable)core.getSolrCoreState().getCdcrBootstrapCallable();
-    IOUtils.closeQuietly(callable);
-    rsp.add(RESPONSE_STATUS, "cancelled");
-  }
-
-  private void handleBootstrapStatus(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, SolrServerException {
-    SolrCoreState coreState = core.getSolrCoreState();
-    if (coreState.getCdcrBootstrapRunning()) {
-      rsp.add(RESPONSE_STATUS, RUNNING);
-      return;
-    }
-
-    Future<Boolean> future = coreState.getCdcrBootstrapFuture();
-    BootstrapCallable callable = (BootstrapCallable)coreState.getCdcrBootstrapCallable();
-    if (future == null) {
-      rsp.add(RESPONSE_STATUS, "notfound");
-      rsp.add(RESPONSE_MESSAGE, "No bootstrap found in running, completed or failed states");
-    } else if (future.isCancelled() || callable.isClosed()) {
-      rsp.add(RESPONSE_STATUS, "cancelled");
-    } else if (future.isDone()) {
-      // could be a normal termination or an exception
-      try {
-        Boolean result = future.get();
-        if (result) {
-          rsp.add(RESPONSE_STATUS, COMPLETED);
-        } else {
-          rsp.add(RESPONSE_STATUS, FAILED);
-        }
-      } catch (InterruptedException e) {
-        // should not happen?
-      } catch (ExecutionException e) {
-        rsp.add(RESPONSE_STATUS, FAILED);
-        rsp.add(RESPONSE, e);
-      } catch (CancellationException ce) {
-        rsp.add(RESPONSE_STATUS, FAILED);
-        rsp.add(RESPONSE_MESSAGE, "Bootstrap was cancelled");
-      }
-    } else {
-      rsp.add(RESPONSE_STATUS, RUNNING);
-    }
-  }
-
-  static class BootstrapCallable implements Callable<Boolean>, Closeable {
-    private final String masterUrl;
-    private final SolrCore core;
-    private volatile boolean closed = false;
-
-    BootstrapCallable(String masterUrl, SolrCore core) {
-      this.masterUrl = masterUrl;
-      this.core = core;
-    }
-
-    @Override
-    public void close() throws IOException {
-      closed = true;
-      SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH);
-      ReplicationHandler replicationHandler = (ReplicationHandler) handler;
-      replicationHandler.abortFetch();
-    }
-
-    public boolean isClosed() {
-      return closed;
-    }
-
-    @Override
-    public Boolean call() throws Exception {
-      boolean success = false;
-      UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-      // we start buffering updates as a safeguard however we do not expect
-      // to receive any updates from the source during bootstrap
-      ulog.bufferUpdates();
-      try {
-        commitOnLeader(masterUrl);
-        // use rep handler directly, so we can do this sync rather than async
-        SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH);
-        ReplicationHandler replicationHandler = (ReplicationHandler) handler;
-
-        if (replicationHandler == null) {
-          throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
-              "Skipping recovery, no " + ReplicationHandler.PATH + " handler found");
-        }
-
-        ModifiableSolrParams solrParams = new ModifiableSolrParams();
-        solrParams.set(ReplicationHandler.MASTER_URL, masterUrl);
-        // we do not want the raw tlog files from the source
-        solrParams.set(ReplicationHandler.TLOG_FILES, false);
-
-        success = replicationHandler.doFetch(solrParams, false).getSuccessful();
-
-        // this is required because this callable can race with HttpSolrCall#destroy
-        // which clears the request info.
-        // Applying buffered updates fails without the following line because LogReplayer
-        // also tries to set request info and fails with AssertionError
-        SolrRequestInfo.clearRequestInfo();
-
-        Future<UpdateLog.RecoveryInfo> future = ulog.applyBufferedUpdates();
-        if (future == null) {
-          // no replay needed
-          log.info("No replay needed.");
-        } else {
-          log.info("Replaying buffered documents.");
-          // wait for replay
-          UpdateLog.RecoveryInfo report = future.get();
-          if (report.failed) {
-            SolrException.log(log, "Replay failed");
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Replay failed");
-          }
-        }
-        return success;
-      } finally {
-        if (closed || !success) {
-          // we cannot apply the buffer in this case because it will introduce newer versions in the
-          // update log and then the source cluster will get those versions via collectioncheckpoint
-          // causing the versions in between to be completely missed
-          boolean dropped = ulog.dropBufferedUpdates();
-          assert dropped;
-        }
-      }
-    }
-
-    private void commitOnLeader(String leaderUrl) throws SolrServerException,
-        IOException {
-      try (HttpSolrClient client = new HttpSolrClient.Builder(leaderUrl)
-          .withConnectionTimeout(30000)
-          .build()) {
-        UpdateRequest ureq = new UpdateRequest();
-        ureq.setParams(new ModifiableSolrParams());
-        ureq.getParams().set(DistributedUpdateProcessor.COMMIT_END_POINT, true);
-        ureq.getParams().set(UpdateParams.OPEN_SEARCHER, false);
-        ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true).process(
-            client);
-      }
-    }
-  }
-
-  @Override
-  public String getDescription() {
-    return "Manage Cross Data Center Replication";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.REPLICATION;
-  }
-
-  /**
-   * A thread subclass for executing a single
-   * {@link org.apache.solr.handler.CdcrParams.CdcrAction#SHARDCHECKPOINT} action.
-   */
-  private static final class SliceCheckpointCallable implements Callable<Long> {
-
-    final String baseUrl;
-    final String cdcrPath;
-
-    SliceCheckpointCallable(final String baseUrl, final String cdcrPath) {
-      this.baseUrl = baseUrl;
-      this.cdcrPath = cdcrPath;
-    }
-
-    @Override
-    public Long call() throws Exception {
-      try (HttpSolrClient server = new HttpSolrClient.Builder(baseUrl)
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CommonParams.ACTION, CdcrParams.CdcrAction.SHARDCHECKPOINT.toString());
-
-        SolrRequest request = new QueryRequest(params);
-        request.setPath(cdcrPath);
-
-        NamedList response = server.request(request);
-        return (Long) response.get(CdcrParams.CHECKPOINT);
-      }
-    }
-
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrStateManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrStateManager.java b/solr/core/src/java/org/apache/solr/handler/CdcrStateManager.java
deleted file mode 100644
index 151615e..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrStateManager.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * A state manager which implements an observer pattern to notify observers
- * of a state change.
- */
-abstract class CdcrStateManager {
-
-  private List<CdcrStateObserver> observers = new ArrayList<>();
-
-  void register(CdcrStateObserver observer) {
-    this.observers.add(observer);
-  }
-
-  void callback() {
-    for (CdcrStateObserver observer : observers) {
-      observer.stateUpdate();
-    }
-  }
-
-  interface CdcrStateObserver {
-
-    void stateUpdate();
-
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java b/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java
deleted file mode 100644
index 80f27ce..0000000
--- a/solr/core/src/java/org/apache/solr/handler/CdcrUpdateLogSynchronizer.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * <p>
- * Synchronize periodically the update log of non-leader nodes with their leaders.
- * </p>
- * <p>
- * Non-leader nodes must always buffer updates in case of leader failures. They have to periodically
- * synchronize their update logs with their leader to remove old transaction logs that will never be used anymore.
- * This is performed by a background thread that is scheduled with a fixed delay. The background thread is sending
- * the action {@link org.apache.solr.handler.CdcrParams.CdcrAction#LASTPROCESSEDVERSION} to the leader to retrieve
- * the lowest last version number processed. This version is then used to move forward the buffer log reader.
- * </p>
- */
-class CdcrUpdateLogSynchronizer implements CdcrStateManager.CdcrStateObserver {
-
-  private CdcrLeaderStateManager leaderStateManager;
-  private ScheduledExecutorService scheduler;
-
-  private final SolrCore core;
-  private final String collection;
-  private final String shardId;
-  private final String path;
-
-  private int timeSchedule = DEFAULT_TIME_SCHEDULE;
-
-  private static final int DEFAULT_TIME_SCHEDULE = 60000;  // by default, every minute
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  CdcrUpdateLogSynchronizer(SolrCore core, String path, SolrParams updateLogSynchonizerConfiguration) {
-    this.core = core;
-    this.path = path;
-    this.collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    this.shardId = core.getCoreDescriptor().getCloudDescriptor().getShardId();
-    if (updateLogSynchonizerConfiguration != null) {
-      this.timeSchedule = updateLogSynchonizerConfiguration.getInt(CdcrParams.SCHEDULE_PARAM, DEFAULT_TIME_SCHEDULE);
-    }
-  }
-
-  void setLeaderStateManager(final CdcrLeaderStateManager leaderStateManager) {
-    this.leaderStateManager = leaderStateManager;
-    this.leaderStateManager.register(this);
-  }
-
-  @Override
-  public void stateUpdate() {
-    // If I am not the leader, I need to synchronise periodically my update log with my leader.
-    if (!leaderStateManager.amILeader()) {
-      scheduler = Executors.newSingleThreadScheduledExecutor(new DefaultSolrThreadFactory("cdcr-update-log-synchronizer"));
-      scheduler.scheduleWithFixedDelay(new UpdateLogSynchronisation(), 0, timeSchedule, TimeUnit.MILLISECONDS);
-      return;
-    }
-
-    this.shutdown();
-  }
-
-  boolean isStarted() {
-    return scheduler != null;
-  }
-
-  void shutdown() {
-    if (scheduler != null) {
-      // interrupts are often dangerous in Lucene / Solr code, but the
-      // test for this will leak threads without
-      scheduler.shutdownNow();
-      scheduler = null;
-    }
-  }
-
-  private class UpdateLogSynchronisation implements Runnable {
-
-    private String getLeaderUrl() {
-      ZkController zkController = core.getCoreContainer().getZkController();
-      ClusterState cstate = zkController.getClusterState();
-      DocCollection docCollection = cstate.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shardId);
-      if (leaderProps == null) { // we might not have a leader yet, returns null
-        return null;
-      }
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-      return nodeProps.getCoreUrl();
-    }
-
-    @Override
-    public void run() {
-      try {
-        String leaderUrl = getLeaderUrl();
-        if (leaderUrl == null) { // we might not have a leader yet, stop and try again later
-          return;
-        }
-
-        HttpSolrClient server = new HttpSolrClient.Builder(leaderUrl)
-            .withConnectionTimeout(15000)
-            .withSocketTimeout(60000)
-            .build();
-
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CommonParams.ACTION, CdcrParams.CdcrAction.LASTPROCESSEDVERSION.toString());
-
-        SolrRequest request = new QueryRequest(params);
-        request.setPath(path);
-
-        long lastVersion;
-        try {
-          NamedList response = server.request(request);
-          lastVersion = (Long) response.get(CdcrParams.LAST_PROCESSED_VERSION);
-          log.debug("My leader {} says its last processed _version_ number is: {}. I am {}", leaderUrl, lastVersion,
-              core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-        } catch (IOException | SolrServerException e) {
-          log.warn("Couldn't get last processed version from leader {}: {}", leaderUrl, e.getMessage());
-          return;
-        } finally {
-          try {
-            server.close();
-          } catch (IOException ioe) {
-            log.warn("Caught exception trying to close server: ", leaderUrl, ioe.getMessage());
-          }
-        }
-
-        // if we received -1, it means that the log reader on the leader has not yet started to read log entries
-        // do nothing
-        if (lastVersion == -1) {
-          return;
-        }
-
-        try {
-          CdcrUpdateLog ulog = (CdcrUpdateLog) core.getUpdateHandler().getUpdateLog();
-          if (ulog.isBuffering()) {
-            log.debug("Advancing replica buffering tlog reader to {} @ {}:{}", lastVersion, collection, shardId);
-            ulog.getBufferToggle().seek(lastVersion);
-          }
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          log.warn("Couldn't advance replica buffering tlog reader to {} (to remove old tlogs): {}", lastVersion, e.getMessage());
-        } catch (IOException e) {
-          log.warn("Couldn't advance replica buffering tlog reader to {} (to remove old tlogs): {}", lastVersion, e.getMessage());
-        }
-      } catch (Throwable e) {
-        log.warn("Caught unexpected exception", e);
-        throw e;
-      }
-    }
-  }
-
-}
-


[51/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
Add gradle support for Solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0ae21ad0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0ae21ad0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0ae21ad0

Branch: refs/heads/jira/gradle
Commit: 0ae21ad0b8b0923d30e3289041cc3dd97e9bca43
Parents: 527c6f3
Author: Cao Manh Dat <da...@apache.org>
Authored: Mon Oct 22 18:04:11 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Mon Oct 22 18:04:11 2018 +0700

----------------------------------------------------------------------
 build.gradle                                    |   40 +-
 lucene/analysis/icu/build.gradle                |    2 +-
 lucene/analysis/kuromoji/build.gradle           |   21 +-
 lucene/analysis/morfologik/build.gradle         |   12 +-
 lucene/analysis/morfologik/ivy.xml              |    2 +-
 lucene/analysis/nori/build.gradle               |    8 +-
 lucene/analysis/opennlp/build.gradle            |    9 +-
 lucene/analysis/phonetic/build.gradle           |    9 +-
 lucene/analysis/smartcn/build.gradle            |    2 +
 lucene/backward-codecs/build.gradle             |   13 +
 lucene/demo/build.gradle                        |    2 +
 lucene/facet/build.gradle                       |    2 +
 lucene/queryparser/build.gradle                 |   13 +
 lucene/replicator/build.gradle                  |   17 +-
 lucene/spatial-extras/build.gradle              |   14 +-
 lucene/spatial/build.gradle                     |    2 +-
 lucene/spatial3d/build.gradle                   |   13 +
 settings.gradle                                 |    6 +-
 solr/core/build.gradle                          |  140 +
 .../solr/analysis/ReversedWildcardFilter.java   |  155 -
 .../analysis/ReversedWildcardFilterFactory.java |  138 -
 .../org/apache/solr/analysis/SolrAnalyzer.java  |   42 -
 .../apache/solr/analysis/TokenizerChain.java    |  139 -
 .../org/apache/solr/analysis/package-info.java  |   26 -
 solr/core/src/java/org/apache/solr/api/Api.java |   68 -
 .../src/java/org/apache/solr/api/ApiBag.java    |  360 --
 .../java/org/apache/solr/api/ApiSupport.java    |   46 -
 .../java/org/apache/solr/api/V2HttpCall.java    |  384 --
 .../java/org/apache/solr/api/package-info.java  |   21 -
 .../solrj/embedded/EmbeddedSolrServer.java      |  322 --
 .../solr/client/solrj/embedded/JettyConfig.java |  131 -
 .../client/solrj/embedded/JettySolrRunner.java  |  586 --
 .../solr/client/solrj/embedded/SSLConfig.java   |  166 -
 .../client/solrj/embedded/package-info.java     |   25 -
 .../org/apache/solr/cloud/ActionThrottle.java   |   95 -
 .../apache/solr/cloud/ActiveReplicaWatcher.java |  170 -
 .../solr/cloud/CloudConfigSetService.java       |   64 -
 .../org/apache/solr/cloud/CloudDescriptor.java  |  179 -
 .../java/org/apache/solr/cloud/CloudUtil.java   |  145 -
 .../cloud/CurrentCoreDescriptorProvider.java    |   28 -
 .../org/apache/solr/cloud/DistributedMap.java   |  127 -
 .../org/apache/solr/cloud/ElectionContext.java  |  764 ---
 .../solr/cloud/ExclusiveSliceProperty.java      |  346 --
 .../org/apache/solr/cloud/LeaderElector.java    |  396 --
 .../java/org/apache/solr/cloud/LockTree.java    |  182 -
 .../java/org/apache/solr/cloud/Overseer.java    |  840 ---
 .../OverseerCollectionConfigSetProcessor.java   |  107 -
 .../cloud/OverseerConfigSetMessageHandler.java  |  377 --
 .../solr/cloud/OverseerMessageHandler.java      |   63 -
 .../solr/cloud/OverseerNodePrioritizer.java     |  113 -
 .../apache/solr/cloud/OverseerSolrResponse.java |   52 -
 .../solr/cloud/OverseerTaskProcessor.java       |  628 ---
 .../apache/solr/cloud/OverseerTaskQueue.java    |  339 --
 .../solr/cloud/RecoveringCoreTermWatcher.java   |   85 -
 .../org/apache/solr/cloud/RecoveryStrategy.java |  873 ---
 .../apache/solr/cloud/ReplicateFromLeader.java  |  136 -
 .../solr/cloud/SizeLimitedDistributedMap.java   |   88 -
 .../org/apache/solr/cloud/SolrZkServer.java     |  334 --
 .../src/java/org/apache/solr/cloud/Stats.java   |  147 -
 .../org/apache/solr/cloud/SyncStrategy.java     |  320 --
 .../src/java/org/apache/solr/cloud/ZkCLI.java   |  373 --
 .../apache/solr/cloud/ZkCollectionTerms.java    |   65 -
 .../org/apache/solr/cloud/ZkController.java     | 2590 ---------
 .../apache/solr/cloud/ZkDistributedQueue.java   |  587 --
 .../solr/cloud/ZkDistributedQueueFactory.java   |   43 -
 .../org/apache/solr/cloud/ZkShardTerms.java     |  627 ---
 .../apache/solr/cloud/ZkSolrResourceLoader.java |  188 -
 .../cloud/api/collections/AddReplicaCmd.java    |  409 --
 .../solr/cloud/api/collections/Assign.java      |  663 ---
 .../solr/cloud/api/collections/BackupCmd.java   |  226 -
 .../cloud/api/collections/CreateAliasCmd.java   |  164 -
 .../api/collections/CreateCollectionCmd.java    |  620 ---
 .../cloud/api/collections/CreateShardCmd.java   |  121 -
 .../api/collections/CreateSnapshotCmd.java      |  179 -
 .../cloud/api/collections/DeleteAliasCmd.java   |   43 -
 .../api/collections/DeleteCollectionCmd.java    |  207 -
 .../cloud/api/collections/DeleteNodeCmd.java    |  133 -
 .../cloud/api/collections/DeleteReplicaCmd.java |  281 -
 .../cloud/api/collections/DeleteShardCmd.java   |  178 -
 .../api/collections/DeleteSnapshotCmd.java      |  160 -
 .../api/collections/LeaderRecoveryWatcher.java  |   88 -
 .../api/collections/MaintainRoutedAliasCmd.java |  305 --
 .../solr/cloud/api/collections/MigrateCmd.java  |  340 --
 .../cloud/api/collections/MoveReplicaCmd.java   |  328 --
 .../OverseerCollectionMessageHandler.java       | 1003 ----
 .../cloud/api/collections/OverseerRoleCmd.java  |  102 -
 .../api/collections/OverseerStatusCmd.java      |  113 -
 .../cloud/api/collections/ReplaceNodeCmd.java   |  253 -
 .../solr/cloud/api/collections/RestoreCmd.java  |  395 --
 .../cloud/api/collections/SetAliasPropCmd.java  |   84 -
 .../cloud/api/collections/SplitShardCmd.java    |  778 ---
 .../cloud/api/collections/TimeRoutedAlias.java  |  254 -
 .../cloud/api/collections/UtilizeNodeCmd.java   |  133 -
 .../cloud/api/collections/package-info.java     |   23 -
 .../solr/cloud/autoscaling/ActionContext.java   |   68 -
 .../autoscaling/AutoAddReplicasPlanAction.java  |   63 -
 .../solr/cloud/autoscaling/AutoScaling.java     |  240 -
 .../cloud/autoscaling/AutoScalingHandler.java   |  698 ---
 .../cloud/autoscaling/ComputePlanAction.java    |  302 --
 .../cloud/autoscaling/ExecutePlanAction.java    |  182 -
 .../cloud/autoscaling/HttpTriggerListener.java  |  164 -
 .../autoscaling/InactiveShardPlanAction.java    |  152 -
 .../cloud/autoscaling/IndexSizeTrigger.java     |  479 --
 .../solr/cloud/autoscaling/LoggingListener.java |   38 -
 .../solr/cloud/autoscaling/MetricTrigger.java   |  219 -
 .../cloud/autoscaling/NodeAddedTrigger.java     |  231 -
 .../solr/cloud/autoscaling/NodeLostTrigger.java |  228 -
 .../autoscaling/OverseerTriggerThread.java      |  405 --
 .../cloud/autoscaling/ScheduledTrigger.java     |  222 -
 .../cloud/autoscaling/ScheduledTriggers.java    |  802 ---
 .../cloud/autoscaling/SearchRateTrigger.java    |  797 ---
 .../cloud/autoscaling/SystemLogListener.java    |  212 -
 .../solr/cloud/autoscaling/TriggerAction.java   |   51 -
 .../cloud/autoscaling/TriggerActionBase.java    |   87 -
 .../autoscaling/TriggerActionException.java     |   33 -
 .../solr/cloud/autoscaling/TriggerBase.java     |  267 -
 .../solr/cloud/autoscaling/TriggerEvent.java    |  309 --
 .../cloud/autoscaling/TriggerEventQueue.java    |  114 -
 .../solr/cloud/autoscaling/TriggerListener.java |   65 -
 .../cloud/autoscaling/TriggerListenerBase.java  |   97 -
 .../solr/cloud/autoscaling/TriggerUtils.java    |   87 -
 .../autoscaling/TriggerValidationException.java |   74 -
 .../solr/cloud/autoscaling/package-info.java    |   21 -
 .../cloud/overseer/ClusterStateMutator.java     |  204 -
 .../solr/cloud/overseer/CollectionMutator.java  |  165 -
 .../apache/solr/cloud/overseer/NodeMutator.java |   86 -
 .../solr/cloud/overseer/OverseerAction.java     |   55 -
 .../solr/cloud/overseer/ReplicaMutator.java     |  497 --
 .../solr/cloud/overseer/SliceMutator.java       |  273 -
 .../solr/cloud/overseer/ZkStateWriter.java      |  259 -
 .../solr/cloud/overseer/ZkWriteCommand.java     |   50 -
 .../solr/cloud/overseer/package-info.java       |   23 -
 .../org/apache/solr/cloud/package-info.java     |   23 -
 .../apache/solr/cloud/rule/ImplicitSnitch.java  |   65 -
 .../apache/solr/cloud/rule/ReplicaAssigner.java |  447 --
 .../java/org/apache/solr/cloud/rule/Rule.java   |  386 --
 .../solr/cloud/rule/ServerSnitchContext.java    |   58 -
 .../apache/solr/cloud/rule/package-info.java    |   23 -
 .../solr/core/AbstractSolrEventListener.java    |   80 -
 .../org/apache/solr/core/BlobRepository.java    |  291 -
 .../solr/core/CachingDirectoryFactory.java      |  524 --
 .../java/org/apache/solr/core/CloseHook.java    |   53 -
 .../java/org/apache/solr/core/CloudConfig.java  |  215 -
 .../java/org/apache/solr/core/CodecFactory.java |   32 -
 .../src/java/org/apache/solr/core/Config.java   |  493 --
 .../org/apache/solr/core/ConfigOverlay.java     |  269 -
 .../java/org/apache/solr/core/ConfigSet.java    |   65 -
 .../apache/solr/core/ConfigSetProperties.java   |   82 -
 .../org/apache/solr/core/ConfigSetService.java  |  243 -
 .../org/apache/solr/core/CoreContainer.java     | 1874 -------
 .../org/apache/solr/core/CoreDescriptor.java    |  396 --
 .../apache/solr/core/CorePropertiesLocator.java |  210 -
 .../java/org/apache/solr/core/CoreSorter.java   |  185 -
 .../java/org/apache/solr/core/CoresLocator.java |   71 -
 .../java/org/apache/solr/core/Diagnostics.java  |   54 -
 .../org/apache/solr/core/DirectoryFactory.java  |  434 --
 .../solr/core/EphemeralDirectoryFactory.java    |   75 -
 .../apache/solr/core/HdfsDirectoryFactory.java  |  610 ---
 .../solr/core/IndexDeletionPolicyWrapper.java   |  271 -
 .../apache/solr/core/IndexReaderFactory.java    |   74 -
 .../java/org/apache/solr/core/InitParams.java   |  144 -
 .../apache/solr/core/MMapDirectoryFactory.java  |   78 -
 .../org/apache/solr/core/MemClassLoader.java    |  181 -
 .../org/apache/solr/core/MetricsConfig.java     |  134 -
 .../apache/solr/core/NIOFSDirectoryFactory.java |   43 -
 .../solr/core/NRTCachingDirectoryFactory.java   |   63 -
 .../java/org/apache/solr/core/NodeConfig.java   |  402 --
 .../java/org/apache/solr/core/PluginBag.java    |  602 ---
 .../java/org/apache/solr/core/PluginInfo.java   |  190 -
 .../apache/solr/core/QuerySenderListener.java   |  105 -
 .../apache/solr/core/RAMDirectoryFactory.java   |   47 -
 .../org/apache/solr/core/RequestHandlers.java   |  170 -
 .../org/apache/solr/core/RequestParams.java     |  269 -
 .../apache/solr/core/SchemaCodecFactory.java    |  125 -
 .../solr/core/ShutdownAwareDirectory.java       |   30 -
 .../solr/core/SimpleFSDirectoryFactory.java     |   42 -
 .../solr/core/SimpleTextCodecFactory.java       |   38 -
 .../java/org/apache/solr/core/SolrConfig.java   |  963 ----
 .../src/java/org/apache/solr/core/SolrCore.java | 3154 -----------
 .../core/SolrCoreInitializationException.java   |   32 -
 .../java/org/apache/solr/core/SolrCores.java    |  568 --
 .../apache/solr/core/SolrDeletionPolicy.java    |  237 -
 .../org/apache/solr/core/SolrEventListener.java |   59 -
 .../java/org/apache/solr/core/SolrInfoBean.java |   96 -
 .../apache/solr/core/SolrResourceLoader.java    |  918 ----
 .../core/SolrResourceNotFoundException.java     |   38 -
 .../org/apache/solr/core/SolrXmlConfig.java     |  553 --
 .../solr/core/StandardDirectoryFactory.java     |  165 -
 .../solr/core/StandardIndexReaderFactory.java   |   41 -
 .../solr/core/TransientSolrCoreCache.java       |  127 -
 .../core/TransientSolrCoreCacheDefault.java     |  198 -
 .../core/TransientSolrCoreCacheFactory.java     |   85 -
 .../TransientSolrCoreCacheFactoryDefault.java   |   31 -
 .../java/org/apache/solr/core/ZkContainer.java  |  247 -
 .../apache/solr/core/backup/BackupManager.java  |  292 -
 .../apache/solr/core/backup/package-info.java   |   22 -
 .../backup/repository/BackupRepository.java     |  184 -
 .../repository/BackupRepositoryFactory.java     |   88 -
 .../backup/repository/HdfsBackupRepository.java |  189 -
 .../repository/LocalFileSystemRepository.java   |  158 -
 .../core/backup/repository/package-info.java    |   23 -
 .../java/org/apache/solr/core/package-info.java |   23 -
 .../snapshots/CollectionSnapshotMetaData.java   |  242 -
 .../core/snapshots/SolrSnapshotManager.java     |  300 --
 .../snapshots/SolrSnapshotMetaDataManager.java  |  416 --
 .../solr/core/snapshots/SolrSnapshotsTool.java  |  467 --
 .../solr/core/snapshots/package-info.java       |   22 -
 .../handler/AnalysisRequestHandlerBase.java     |  537 --
 .../apache/solr/handler/AnalyzeEvaluator.java   |  111 -
 .../org/apache/solr/handler/BlobHandler.java    |  316 --
 .../apache/solr/handler/CalciteJDBCStream.java  |   76 -
 .../apache/solr/handler/CdcrBufferManager.java  |   71 -
 .../solr/handler/CdcrBufferStateManager.java    |  174 -
 .../solr/handler/CdcrLeaderStateManager.java    |  160 -
 .../org/apache/solr/handler/CdcrParams.java     |  256 -
 .../solr/handler/CdcrProcessStateManager.java   |  174 -
 .../org/apache/solr/handler/CdcrReplicator.java |  251 -
 .../solr/handler/CdcrReplicatorManager.java     |  453 --
 .../solr/handler/CdcrReplicatorScheduler.java   |  114 -
 .../solr/handler/CdcrReplicatorState.java       |  299 --
 .../apache/solr/handler/CdcrRequestHandler.java |  861 ---
 .../apache/solr/handler/CdcrStateManager.java   |   47 -
 .../solr/handler/CdcrUpdateLogSynchronizer.java |  188 -
 .../org/apache/solr/handler/ClassifyStream.java |  229 -
 .../solr/handler/ContentStreamHandlerBase.java  |   86 -
 .../solr/handler/ContentStreamLoader.java       |   49 -
 .../handler/DocumentAnalysisRequestHandler.java |  346 --
 .../apache/solr/handler/DumpRequestHandler.java |  126 -
 .../org/apache/solr/handler/ExportHandler.java  |   49 -
 .../handler/FieldAnalysisRequestHandler.java    |  233 -
 .../org/apache/solr/handler/GraphHandler.java   |  233 -
 .../solr/handler/HaversineMetersEvaluator.java  |   59 -
 .../org/apache/solr/handler/IndexFetcher.java   | 1900 -------
 .../solr/handler/MoreLikeThisHandler.java       |  519 --
 .../solr/handler/NestedRequestHandler.java      |   28 -
 .../solr/handler/NotFoundRequestHandler.java    |   38 -
 .../apache/solr/handler/OldBackupDirectory.java |   69 -
 .../apache/solr/handler/PingRequestHandler.java |  343 --
 .../apache/solr/handler/RealTimeGetHandler.java |   72 -
 .../apache/solr/handler/ReplicationHandler.java | 1826 -------
 .../apache/solr/handler/RequestHandlerBase.java |  329 --
 .../solr/handler/RequestHandlerUtils.java       |  135 -
 .../org/apache/solr/handler/RestoreCore.java    |  162 -
 .../org/apache/solr/handler/SQLHandler.java     |  201 -
 .../org/apache/solr/handler/SchemaHandler.java  |  257 -
 .../org/apache/solr/handler/SnapShooter.java    |  308 --
 .../apache/solr/handler/SolrConfigHandler.java  |  898 ----
 .../solr/handler/SolrDefaultStreamFactory.java  |   54 -
 .../solr/handler/StandardRequestHandler.java    |   37 -
 .../org/apache/solr/handler/StreamHandler.java  |  449 --
 .../solr/handler/UpdateRequestHandler.java      |  187 -
 .../solr/handler/UpdateRequestHandlerApi.java   |   73 -
 .../solr/handler/admin/AdminHandlersProxy.java  |  128 -
 .../admin/AutoscalingHistoryHandler.java        |  165 -
 .../apache/solr/handler/admin/BackupCoreOp.java |   74 -
 .../handler/admin/BaseHandlerApiSupport.java    |  196 -
 .../solr/handler/admin/ClusterStatus.java       |  246 -
 .../handler/admin/CollectionHandlerApi.java     |  130 -
 .../solr/handler/admin/CollectionsHandler.java  | 1383 -----
 .../solr/handler/admin/ConfigSetsHandler.java   |  333 --
 .../handler/admin/ConfigSetsHandlerApi.java     |   89 -
 .../solr/handler/admin/CoreAdminHandler.java    |  427 --
 .../solr/handler/admin/CoreAdminHandlerApi.java |   85 -
 .../solr/handler/admin/CoreAdminOperation.java  |  368 --
 .../solr/handler/admin/CreateSnapshotOp.java    |   58 -
 .../solr/handler/admin/DeleteSnapshotOp.java    |   51 -
 .../solr/handler/admin/HealthCheckHandler.java  |  110 -
 .../apache/solr/handler/admin/InfoHandler.java  |  157 -
 .../org/apache/solr/handler/admin/InvokeOp.java |   58 -
 .../solr/handler/admin/LoggingHandler.java      |  165 -
 .../solr/handler/admin/LukeRequestHandler.java  |  802 ---
 .../solr/handler/admin/MergeIndexesOp.java      |  142 -
 .../handler/admin/MetricsCollectorHandler.java  |  235 -
 .../solr/handler/admin/MetricsHandler.java      |  350 --
 .../handler/admin/MetricsHistoryHandler.java    |  964 ----
 .../solr/handler/admin/PluginInfoHandler.java   |   85 -
 .../solr/handler/admin/PrepRecoveryOp.java      |  191 -
 .../handler/admin/PropertiesRequestHandler.java |   78 -
 .../solr/handler/admin/RebalanceLeaders.java    |  328 --
 .../handler/admin/RequestApplyUpdatesOp.java    |   71 -
 .../solr/handler/admin/RequestSyncShardOp.java  |   98 -
 .../solr/handler/admin/RestoreCoreOp.java       |   77 -
 .../solr/handler/admin/SecurityConfHandler.java |  318 --
 .../handler/admin/SecurityConfHandlerLocal.java |  104 -
 .../handler/admin/SecurityConfHandlerZk.java    |   92 -
 .../admin/SegmentsInfoRequestHandler.java       |  131 -
 .../handler/admin/ShowFileRequestHandler.java   |  371 --
 .../handler/admin/SolrInfoMBeanHandler.java     |  296 -
 .../org/apache/solr/handler/admin/SplitOp.java  |  169 -
 .../org/apache/solr/handler/admin/StatusOp.java |   58 -
 .../solr/handler/admin/SystemInfoHandler.java   |  416 --
 .../solr/handler/admin/ThreadDumpHandler.java   |  139 -
 .../handler/admin/ZookeeperInfoHandler.java     |  857 ---
 .../handler/admin/ZookeeperStatusHandler.java   |  222 -
 .../apache/solr/handler/admin/package-info.java |   23 -
 .../solr/handler/component/DebugComponent.java  |  394 --
 .../solr/handler/component/ExpandComponent.java |  828 ---
 .../solr/handler/component/FacetComponent.java  | 1570 ------
 .../solr/handler/component/FieldFacetStats.java |  203 -
 .../handler/component/HighlightComponent.java   |  299 --
 .../handler/component/HttpShardHandler.java     |  512 --
 .../component/HttpShardHandlerFactory.java      |  484 --
 .../component/IterativeMergeStrategy.java       |  137 -
 .../solr/handler/component/MergeStrategy.java   |   75 -
 .../component/MoreLikeThisComponent.java        |  428 --
 .../PhrasesIdentificationComponent.java         | 1129 ----
 .../solr/handler/component/PivotFacet.java      |  163 -
 .../solr/handler/component/PivotFacetField.java |  397 --
 .../PivotFacetFieldValueCollection.java         |  341 --
 .../handler/component/PivotFacetHelper.java     |  189 -
 .../handler/component/PivotFacetProcessor.java  |  441 --
 .../solr/handler/component/PivotFacetValue.java |  263 -
 .../solr/handler/component/QueryComponent.java  | 1481 -----
 .../component/QueryElevationComponent.java      | 1134 ----
 .../handler/component/RangeFacetProcessor.java  |  276 -
 .../handler/component/RangeFacetRequest.java    |  863 ---
 .../handler/component/RealTimeGetComponent.java | 1268 -----
 .../component/ReplicaListTransformer.java       |   35 -
 .../solr/handler/component/ResponseBuilder.java |  495 --
 .../handler/component/ResponseLogComponent.java |  118 -
 .../solr/handler/component/SearchComponent.java |  147 -
 .../solr/handler/component/SearchHandler.java   |  496 --
 .../apache/solr/handler/component/ShardDoc.java |   84 -
 .../component/ShardFieldSortedHitQueue.java     |  165 -
 .../solr/handler/component/ShardHandler.java    |   27 -
 .../handler/component/ShardHandlerFactory.java  |   61 -
 .../solr/handler/component/ShardRequest.java    |   74 -
 .../solr/handler/component/ShardResponse.java   |   99 -
 .../ShufflingReplicaListTransformer.java        |   39 -
 .../component/SortedDateStatsValues.java        |   89 -
 .../component/SortedNumericStatsValues.java     |  106 -
 .../handler/component/SpatialHeatmapFacets.java |  157 -
 .../handler/component/SpellCheckComponent.java  |  870 ---
 .../handler/component/SpellCheckMergeData.java  |   52 -
 .../solr/handler/component/StatsComponent.java  |  255 -
 .../solr/handler/component/StatsField.java      |  754 ---
 .../solr/handler/component/StatsValues.java     |   81 -
 .../handler/component/StatsValuesFactory.java   |  865 ---
 .../handler/component/SuggestComponent.java     |  555 --
 .../handler/component/TermVectorComponent.java  |  487 --
 .../solr/handler/component/TermsComponent.java  |  690 ---
 .../solr/handler/component/package-info.java    |   24 -
 .../solr/handler/export/BoolFieldWriter.java    |   63 -
 .../solr/handler/export/DateFieldWriter.java    |   56 -
 .../apache/solr/handler/export/DoubleCmp.java   |   43 -
 .../solr/handler/export/DoubleFieldWriter.java  |   56 -
 .../apache/solr/handler/export/DoubleValue.java |  101 -
 .../solr/handler/export/DoubleValueSortDoc.java |  102 -
 .../solr/handler/export/ExportWriter.java       |  459 --
 .../apache/solr/handler/export/FieldWriter.java |   27 -
 .../apache/solr/handler/export/FloatCmp.java    |   44 -
 .../solr/handler/export/FloatFieldWriter.java   |   56 -
 .../apache/solr/handler/export/FloatValue.java  |   98 -
 .../org/apache/solr/handler/export/IntComp.java |   45 -
 .../solr/handler/export/IntFieldWriter.java     |   55 -
 .../apache/solr/handler/export/IntValue.java    |   98 -
 .../org/apache/solr/handler/export/LongCmp.java |   45 -
 .../solr/handler/export/LongFieldWriter.java    |   55 -
 .../apache/solr/handler/export/LongValue.java   |   98 -
 .../solr/handler/export/MultiFieldWriter.java   |  104 -
 .../solr/handler/export/PriorityQueue.java      |  218 -
 .../solr/handler/export/QuadValueSortDoc.java   |  139 -
 .../solr/handler/export/SingleValueSortDoc.java |   89 -
 .../org/apache/solr/handler/export/SortDoc.java |  127 -
 .../apache/solr/handler/export/SortQueue.java   |   52 -
 .../apache/solr/handler/export/SortValue.java   |   38 -
 .../solr/handler/export/StringFieldWriter.java  |   62 -
 .../apache/solr/handler/export/StringValue.java |  119 -
 .../solr/handler/export/TripleValueSortDoc.java |  121 -
 .../solr/handler/export/package-info.java       |   23 -
 .../apache/solr/handler/loader/CSVLoader.java   |   45 -
 .../solr/handler/loader/CSVLoaderBase.java      |  393 --
 .../handler/loader/ContentStreamLoader.java     |   55 -
 .../solr/handler/loader/JavabinLoader.java      |  220 -
 .../apache/solr/handler/loader/JsonLoader.java  |  716 ---
 .../apache/solr/handler/loader/XMLLoader.java   |  531 --
 .../solr/handler/loader/package-info.java       |   23 -
 .../org/apache/solr/handler/package-info.java   |   23 -
 .../solr/handler/sql/CalciteSolrDriver.java     |   69 -
 .../apache/solr/handler/sql/LimitStream.java    |   89 -
 .../apache/solr/handler/sql/SolrAggregate.java  |  112 -
 .../apache/solr/handler/sql/SolrEnumerator.java |  147 -
 .../org/apache/solr/handler/sql/SolrFilter.java |  382 --
 .../org/apache/solr/handler/sql/SolrMethod.java |   44 -
 .../apache/solr/handler/sql/SolrProject.java    |   64 -
 .../org/apache/solr/handler/sql/SolrRel.java    |  106 -
 .../org/apache/solr/handler/sql/SolrRules.java  |  248 -
 .../org/apache/solr/handler/sql/SolrSchema.java |  141 -
 .../org/apache/solr/handler/sql/SolrSort.java   |   79 -
 .../org/apache/solr/handler/sql/SolrTable.java  |  885 ---
 .../apache/solr/handler/sql/SolrTableScan.java  |   85 -
 .../handler/sql/SolrToEnumerableConverter.java  |  136 -
 .../sql/SolrToEnumerableConverterRule.java      |   39 -
 .../apache/solr/handler/sql/package-info.java   |   21 -
 .../solr/handler/tagger/OffsetCorrector.java    |  178 -
 .../solr/handler/tagger/TagClusterReducer.java  |  103 -
 .../org/apache/solr/handler/tagger/TagLL.java   |  176 -
 .../org/apache/solr/handler/tagger/Tagger.java  |  230 -
 .../handler/tagger/TaggerRequestHandler.java    |  397 --
 .../solr/handler/tagger/TaggingAttribute.java   |   65 -
 .../handler/tagger/TaggingAttributeImpl.java    |   79 -
 .../solr/handler/tagger/TermPrefixCursor.java   |  189 -
 .../solr/handler/tagger/XmlOffsetCorrector.java |  113 -
 .../solr/handler/tagger/package-info.java       |   27 -
 .../highlight/BreakIteratorBoundaryScanner.java |   76 -
 .../apache/solr/highlight/DefaultEncoder.java   |   42 -
 .../solr/highlight/DefaultSolrHighlighter.java  |  991 ----
 .../apache/solr/highlight/GapFragmenter.java    |   99 -
 .../solr/highlight/HighlightingPluginBase.java  |   87 -
 .../org/apache/solr/highlight/HtmlEncoder.java  |   42 -
 .../apache/solr/highlight/HtmlFormatter.java    |   48 -
 .../solr/highlight/LuceneRegexFragmenter.java   |  217 -
 .../solr/highlight/PostingsSolrHighlighter.java |   71 -
 .../apache/solr/highlight/RegexFragmenter.java  |   90 -
 .../highlight/ScoreOrderFragmentsBuilder.java   |   42 -
 .../solr/highlight/SimpleBoundaryScanner.java   |   45 -
 .../solr/highlight/SimpleFragListBuilder.java   |   44 -
 .../solr/highlight/SimpleFragmentsBuilder.java  |   42 -
 .../solr/highlight/SingleFragListBuilder.java   |   44 -
 .../solr/highlight/SolrBoundaryScanner.java     |   35 -
 .../org/apache/solr/highlight/SolrEncoder.java  |   43 -
 .../apache/solr/highlight/SolrFormatter.java    |   44 -
 .../solr/highlight/SolrFragListBuilder.java     |   42 -
 .../apache/solr/highlight/SolrFragmenter.java   |   44 -
 .../solr/highlight/SolrFragmentsBuilder.java    |   80 -
 .../apache/solr/highlight/SolrHighlighter.java  |  123 -
 .../solr/highlight/UnifiedSolrHighlighter.java  |  419 --
 .../solr/highlight/WeightedFragListBuilder.java |   44 -
 .../org/apache/solr/highlight/package-info.java |   25 -
 .../solr/index/DefaultMergePolicyFactory.java   |   45 -
 .../index/LogByteSizeMergePolicyFactory.java    |   38 -
 .../solr/index/LogDocMergePolicyFactory.java    |   38 -
 .../apache/solr/index/MergePolicyFactory.java   |   40 -
 .../solr/index/MergePolicyFactoryArgs.java      |   66 -
 .../apache/solr/index/NoMergePolicyFactory.java |   34 -
 .../solr/index/SimpleMergePolicyFactory.java    |   42 -
 .../solr/index/SlowCompositeReaderWrapper.java  |  298 -
 .../apache/solr/index/SortingMergePolicy.java   |   44 -
 .../solr/index/SortingMergePolicyFactory.java   |   49 -
 .../solr/index/TieredMergePolicyFactory.java    |   38 -
 .../UninvertDocValuesMergePolicyFactory.java    |  219 -
 .../index/UpgradeIndexMergePolicyFactory.java   |   39 -
 .../solr/index/WrapperMergePolicyFactory.java   |  121 -
 .../apache/solr/index/hdfs/CheckHdfsIndex.java  |   79 -
 .../apache/solr/index/hdfs/package-info.java    |   22 -
 .../org/apache/solr/index/package-info.java     |   22 -
 .../org/apache/solr/internal/csv/CSVParser.java |  561 --
 .../apache/solr/internal/csv/CSVPrinter.java    |  305 --
 .../apache/solr/internal/csv/CSVStrategy.java   |  245 -
 .../org/apache/solr/internal/csv/CSVUtils.java  |  121 -
 .../apache/solr/internal/csv/CharBuffer.java    |  209 -
 .../internal/csv/ExtendedBufferedReader.java    |  315 --
 .../apache/solr/internal/csv/package-info.java  |   23 -
 .../solr/internal/csv/writer/CSVConfig.java     |  283 -
 .../internal/csv/writer/CSVConfigGuesser.java   |  185 -
 .../solr/internal/csv/writer/CSVField.java      |  108 -
 .../solr/internal/csv/writer/CSVWriter.java     |  132 -
 .../solr/internal/csv/writer/package-info.java  |   23 -
 .../org/apache/solr/legacy/BBoxStrategy.java    |  705 ---
 .../org/apache/solr/legacy/BBoxValueSource.java |   98 -
 .../apache/solr/legacy/DistanceValueSource.java |  120 -
 .../apache/solr/legacy/LegacyDoubleField.java   |  174 -
 .../org/apache/solr/legacy/LegacyField.java     |   90 -
 .../org/apache/solr/legacy/LegacyFieldType.java |  149 -
 .../apache/solr/legacy/LegacyFloatField.java    |  174 -
 .../org/apache/solr/legacy/LegacyIntField.java  |  175 -
 .../org/apache/solr/legacy/LegacyLongField.java |  184 -
 .../solr/legacy/LegacyNumericRangeQuery.java    |  537 --
 .../solr/legacy/LegacyNumericTokenStream.java   |  357 --
 .../apache/solr/legacy/LegacyNumericType.java   |   34 -
 .../apache/solr/legacy/LegacyNumericUtils.java  |  510 --
 .../apache/solr/legacy/PointVectorStrategy.java |  289 -
 .../solr/legacy/doc-files/nrq-formula-1.png     |  Bin 3171 -> 0 bytes
 .../solr/legacy/doc-files/nrq-formula-2.png     |  Bin 3694 -> 0 bytes
 .../org/apache/solr/legacy/package-info.java    |   21 -
 .../org/apache/solr/logging/CircularList.java   |  153 -
 .../org/apache/solr/logging/ListenerConfig.java |   35 -
 .../org/apache/solr/logging/LogWatcher.java     |  194 -
 .../apache/solr/logging/LogWatcherConfig.java   |   73 -
 .../org/apache/solr/logging/LoggerInfo.java     |   69 -
 .../apache/solr/logging/MDCLoggingContext.java  |  160 -
 .../org/apache/solr/logging/jul/JulInfo.java    |   72 -
 .../org/apache/solr/logging/jul/JulWatcher.java |  169 -
 .../apache/solr/logging/jul/RecordHandler.java  |   49 -
 .../apache/solr/logging/jul/package-info.java   |   22 -
 .../solr/logging/log4j2/Log4j2Watcher.java      |  293 -
 .../solr/logging/log4j2/package-info.java       |   22 -
 .../org/apache/solr/logging/package-info.java   |   22 -
 .../apache/solr/metrics/AggregateMetric.java    |  200 -
 .../solr/metrics/AltBufferPoolMetricSet.java    |   47 -
 .../metrics/FilteringSolrMetricReporter.java    |   59 -
 .../apache/solr/metrics/MetricSuppliers.java    |  363 --
 .../org/apache/solr/metrics/MetricsMap.java     |  197 -
 .../solr/metrics/OperatingSystemMetricSet.java  |   46 -
 .../solr/metrics/SolrCoreContainerReporter.java |   47 -
 .../solr/metrics/SolrCoreMetricManager.java     |  232 -
 .../apache/solr/metrics/SolrCoreReporter.java   |   47 -
 .../org/apache/solr/metrics/SolrMetricInfo.java |  111 -
 .../apache/solr/metrics/SolrMetricManager.java  | 1180 ----
 .../apache/solr/metrics/SolrMetricProducer.java |   35 -
 .../apache/solr/metrics/SolrMetricReporter.java |  126 -
 .../org/apache/solr/metrics/package-info.java   |   23 -
 .../metrics/reporters/ReporterClientCache.java  |   84 -
 .../metrics/reporters/SolrGangliaReporter.java  |  135 -
 .../metrics/reporters/SolrGraphiteReporter.java |  118 -
 .../solr/metrics/reporters/SolrJmxReporter.java |  243 -
 .../metrics/reporters/SolrSlf4jReporter.java    |  172 -
 .../reporters/jmx/JmxMetricsReporter.java       |  754 ---
 .../reporters/jmx/JmxObjectNameFactory.java     |  174 -
 .../metrics/reporters/jmx/package-info.java     |   21 -
 .../solr/metrics/reporters/package-info.java    |   22 -
 .../reporters/solr/SolrClusterReporter.java     |  295 -
 .../metrics/reporters/solr/SolrReporter.java    |  407 --
 .../reporters/solr/SolrShardReporter.java       |  189 -
 .../metrics/reporters/solr/package-info.java    |   22 -
 .../apache/solr/metrics/rrd/SolrRrdBackend.java |  138 -
 .../solr/metrics/rrd/SolrRrdBackendFactory.java |  451 --
 .../apache/solr/metrics/rrd/package-info.java   |   22 -
 .../src/java/org/apache/solr/package-info.java  |   22 -
 .../java/org/apache/solr/parser/CharStream.java |   99 -
 .../org/apache/solr/parser/FastCharStream.java  |  129 -
 .../org/apache/solr/parser/ParseException.java  |  187 -
 .../org/apache/solr/parser/QueryParser.java     |  933 ----
 .../java/org/apache/solr/parser/QueryParser.jj  |  352 --
 .../solr/parser/QueryParserConstants.java       |  135 -
 .../solr/parser/QueryParserTokenManager.java    | 1619 ------
 .../apache/solr/parser/SolrQueryParserBase.java | 1257 -----
 .../src/java/org/apache/solr/parser/Token.java  |  131 -
 .../org/apache/solr/parser/TokenMgrError.java   |  147 -
 .../org/apache/solr/parser/package-info.java    |   23 -
 .../java/org/apache/solr/query/FilterQuery.java |   96 -
 .../org/apache/solr/query/SolrRangeQuery.java   |  510 --
 .../src/java/org/apache/solr/query/package.html |   27 -
 .../apache/solr/request/DocValuesFacets.java    |  396 --
 .../org/apache/solr/request/DocValuesStats.java |  233 -
 .../org/apache/solr/request/IntervalFacets.java |  934 ----
 .../solr/request/LocalSolrQueryRequest.java     |   71 -
 .../org/apache/solr/request/NumericFacets.java  |  523 --
 .../request/PerSegmentSingleValuedFaceting.java |  427 --
 .../solr/request/RegexBytesRefFilter.java       |   46 -
 .../org/apache/solr/request/SimpleFacets.java   | 1202 -----
 .../apache/solr/request/SolrQueryRequest.java   |  139 -
 .../solr/request/SolrQueryRequestBase.java      |  217 -
 .../apache/solr/request/SolrRequestHandler.java |   65 -
 .../apache/solr/request/SolrRequestInfo.java    |  161 -
 .../solr/request/SubstringBytesRefFilter.java   |   52 -
 .../org/apache/solr/request/json/JSONUtil.java  |   79 -
 .../solr/request/json/JsonQueryConverter.java   |  141 -
 .../apache/solr/request/json/ObjectUtil.java    |  113 -
 .../apache/solr/request/json/RequestUtil.java   |  330 --
 .../org/apache/solr/request/json/package.html   |   27 -
 .../solr/request/macro/MacroExpander.java       |  195 -
 .../org/apache/solr/request/macro/package.html  |   27 -
 .../org/apache/solr/request/package-info.java   |   23 -
 .../solr/response/BasicResultContext.java       |   75 -
 .../response/BinaryQueryResponseWriter.java     |   37 -
 .../solr/response/BinaryResponseWriter.java     |  179 -
 .../apache/solr/response/CSVResponseWriter.java |  505 --
 .../org/apache/solr/response/DocsStreamer.java  |  228 -
 .../solr/response/GeoJSONResponseWriter.java    |  336 --
 .../solr/response/GraphMLResponseWriter.java    |  162 -
 .../solr/response/JSONResponseWriter.java       |  277 -
 .../org/apache/solr/response/JSONWriter.java    |  181 -
 .../apache/solr/response/PHPResponseWriter.java |  122 -
 .../response/PHPSerializedResponseWriter.java   |  278 -
 .../solr/response/PythonResponseWriter.java     |  151 -
 .../solr/response/QueryResponseWriter.java      |   87 -
 .../solr/response/QueryResponseWriterUtil.java  |   76 -
 .../apache/solr/response/RawResponseWriter.java |  108 -
 .../org/apache/solr/response/ResultContext.java |   59 -
 .../solr/response/RubyResponseWriter.java       |   94 -
 .../solr/response/SchemaXmlResponseWriter.java  |   49 -
 .../apache/solr/response/SchemaXmlWriter.java   |  477 --
 .../solr/response/SmileResponseWriter.java      |  204 -
 .../apache/solr/response/SolrQueryResponse.java |  363 --
 .../solr/response/TextResponseWriter.java       |  189 -
 .../apache/solr/response/XMLResponseWriter.java |   49 -
 .../org/apache/solr/response/XMLWriter.java     |  370 --
 .../solr/response/XSLTResponseWriter.java       |  137 -
 .../org/apache/solr/response/package-info.java  |   23 -
 .../transform/BaseEditorialTransformer.java     |   81 -
 .../response/transform/ChildDocTransformer.java |  253 -
 .../transform/ChildDocTransformerFactory.java   |  169 -
 .../transform/DocIdAugmenterFactory.java        |   60 -
 .../solr/response/transform/DocTransformer.java |  140 -
 .../response/transform/DocTransformers.java     |   98 -
 .../transform/ElevatedMarkerFactory.java        |   54 -
 .../transform/ExcludedMarkerFactory.java        |   57 -
 .../transform/ExplainAugmenterFactory.java      |  133 -
 .../transform/GeoTransformerFactory.java        |  231 -
 .../transform/RawValueTransformerFactory.java   |  165 -
 .../transform/RenameFieldTransformer.java       |   53 -
 .../solr/response/transform/ScoreAugmenter.java |   52 -
 .../transform/ShardAugmenterFactory.java        |   44 -
 .../transform/SubQueryAugmenterFactory.java     |  376 --
 .../response/transform/TransformerFactory.java  |   55 -
 .../transform/ValueAugmenterFactory.java        |  103 -
 .../transform/ValueSourceAugmenter.java         |  105 -
 .../response/transform/WriteableGeoJSON.java    |   55 -
 .../solr/response/transform/package-info.java   |   23 -
 .../org/apache/solr/rest/BaseSolrResource.java  |  216 -
 .../java/org/apache/solr/rest/DELETEable.java   |   26 -
 .../src/java/org/apache/solr/rest/GETable.java  |   26 -
 .../org/apache/solr/rest/ManagedResource.java   |  434 --
 .../solr/rest/ManagedResourceObserver.java      |   37 -
 .../solr/rest/ManagedResourceStorage.java       |  534 --
 .../src/java/org/apache/solr/rest/POSTable.java |   26 -
 .../src/java/org/apache/solr/rest/PUTable.java  |   26 -
 .../java/org/apache/solr/rest/RestManager.java  |  797 ---
 .../org/apache/solr/rest/SolrSchemaRestApi.java |   78 -
 .../java/org/apache/solr/rest/package-info.java |   23 -
 .../solr/rest/schema/FieldTypeXmlAdapter.java   |  186 -
 .../analysis/BaseManagedTokenFilterFactory.java |   82 -
 .../analysis/ManagedStopFilterFactory.java      |   95 -
 .../analysis/ManagedSynonymFilterFactory.java   |  443 --
 .../ManagedSynonymGraphFilterFactory.java       |  438 --
 .../schema/analysis/ManagedWordSetResource.java |  200 -
 .../solr/rest/schema/analysis/package-info.java |   23 -
 .../apache/solr/rest/schema/package-info.java   |   22 -
 .../apache/solr/schema/AbstractEnumField.java   |  313 --
 .../solr/schema/AbstractSpatialFieldType.java   |  459 --
 .../AbstractSpatialPrefixTreeFieldType.java     |  116 -
 .../solr/schema/AbstractSubTypeFieldType.java   |  138 -
 .../java/org/apache/solr/schema/BBoxField.java  |  196 -
 .../org/apache/solr/schema/BinaryField.java     |  108 -
 .../java/org/apache/solr/schema/BoolField.java  |  299 --
 .../solr/schema/ClassicIndexSchemaFactory.java  |   38 -
 .../org/apache/solr/schema/CollationField.java  |  281 -
 .../apache/solr/schema/CoordinateFieldType.java |   49 -
 .../java/org/apache/solr/schema/CopyField.java  |   81 -
 .../org/apache/solr/schema/CurrencyField.java   |  111 -
 .../apache/solr/schema/CurrencyFieldType.java   |  672 ---
 .../org/apache/solr/schema/CurrencyValue.java   |  231 -
 .../org/apache/solr/schema/DatePointField.java  |  254 -
 .../org/apache/solr/schema/DateRangeField.java  |  180 -
 .../apache/solr/schema/DateValueFieldType.java  |   23 -
 .../apache/solr/schema/DoublePointField.java    |  164 -
 .../solr/schema/DoubleValueFieldType.java       |   23 -
 .../java/org/apache/solr/schema/EnumField.java  |  202 -
 .../org/apache/solr/schema/EnumFieldType.java   |  224 -
 .../solr/schema/ExchangeRateProvider.java       |   66 -
 .../apache/solr/schema/ExternalFileField.java   |  131 -
 .../solr/schema/ExternalFileFieldReloader.java  |   92 -
 .../org/apache/solr/schema/FieldProperties.java |  127 -
 .../java/org/apache/solr/schema/FieldType.java  | 1295 -----
 .../solr/schema/FieldTypePluginLoader.java      |  432 --
 .../solr/schema/FileExchangeRateProvider.java   |  230 -
 .../org/apache/solr/schema/FloatPointField.java |  164 -
 .../apache/solr/schema/FloatValueFieldType.java |   23 -
 .../org/apache/solr/schema/GeoHashField.java    |  103 -
 .../solr/schema/HasImplicitIndexAnalyzer.java   |   25 -
 .../org/apache/solr/schema/IndexSchema.java     | 1944 -------
 .../apache/solr/schema/IndexSchemaFactory.java  |  100 -
 .../org/apache/solr/schema/IntPointField.java   |  163 -
 .../apache/solr/schema/IntValueFieldType.java   |   24 -
 .../solr/schema/JsonPreAnalyzedParser.java      |  278 -
 .../solr/schema/LatLonPointSpatialField.java    |  306 --
 .../java/org/apache/solr/schema/LatLonType.java |  598 ---
 .../org/apache/solr/schema/LongPointField.java  |  162 -
 .../apache/solr/schema/LongValueFieldType.java  |   23 -
 .../apache/solr/schema/ManagedIndexSchema.java  | 1393 -----
 .../solr/schema/ManagedIndexSchemaFactory.java  |  413 --
 .../java/org/apache/solr/schema/NumberType.java |   49 -
 .../apache/solr/schema/NumericFieldType.java    |  305 --
 .../solr/schema/NumericValueFieldType.java      |   23 -
 .../schema/OpenExchangeRatesOrgProvider.java    |  294 -
 .../java/org/apache/solr/schema/PointField.java |  303 --
 .../java/org/apache/solr/schema/PointType.java  |  310 --
 .../apache/solr/schema/PreAnalyzedField.java    |  386 --
 .../apache/solr/schema/PrimitiveFieldType.java  |   45 -
 .../org/apache/solr/schema/RandomSortField.java |  203 -
 .../schema/RptWithGeometrySpatialField.java     |  238 -
 .../org/apache/solr/schema/SchemaAware.java     |   37 -
 .../org/apache/solr/schema/SchemaField.java     |  431 --
 .../org/apache/solr/schema/SchemaManager.java   |  435 --
 .../apache/solr/schema/SimilarityFactory.java   |   87 -
 .../solr/schema/SimplePreAnalyzedParser.java    |  574 --
 .../apache/solr/schema/SortableTextField.java   |  215 -
 .../schema/SpatialPointVectorFieldType.java     |  102 -
 .../apache/solr/schema/SpatialQueryable.java    |   34 -
 .../SpatialRecursivePrefixTreeFieldType.java    |   56 -
 .../SpatialTermQueryPrefixTreeFieldType.java    |   33 -
 .../java/org/apache/solr/schema/StrField.java   |  137 -
 .../org/apache/solr/schema/StrFieldSource.java  |   80 -
 .../java/org/apache/solr/schema/TextField.java  |  212 -
 .../org/apache/solr/schema/TrieDateField.java   |  104 -
 .../org/apache/solr/schema/TrieDoubleField.java |  136 -
 .../java/org/apache/solr/schema/TrieField.java  |  683 ---
 .../org/apache/solr/schema/TrieFloatField.java  |  136 -
 .../org/apache/solr/schema/TrieIntField.java    |  134 -
 .../org/apache/solr/schema/TrieLongField.java   |  134 -
 .../java/org/apache/solr/schema/UUIDField.java  |  104 -
 .../apache/solr/schema/ZkIndexSchemaReader.java |  223 -
 .../org/apache/solr/schema/package-info.java    |   24 -
 .../apache/solr/search/AbstractReRankQuery.java |   86 -
 .../org/apache/solr/search/AnalyticsQuery.java  |   80 -
 .../java/org/apache/solr/search/BitDocSet.java  |  388 --
 .../solr/search/BitsFilteredDocIdSet.java       |   62 -
 .../solr/search/BitsFilteredPostingsEnum.java   |   57 -
 .../apache/solr/search/BoolQParserPlugin.java   |   58 -
 .../apache/solr/search/BoostQParserPlugin.java  |   87 -
 .../org/apache/solr/search/CacheConfig.java     |  161 -
 .../apache/solr/search/CacheRegenerator.java    |   42 -
 .../solr/search/CollapsingQParserPlugin.java    | 2843 ----------
 .../solr/search/ComplexPhraseQParserPlugin.java |  175 -
 .../java/org/apache/solr/search/CursorMark.java |  296 -
 .../apache/solr/search/DelegatingCollector.java |   89 -
 .../org/apache/solr/search/DisMaxQParser.java   |  294 -
 .../apache/solr/search/DisMaxQParserPlugin.java |  118 -
 .../org/apache/solr/search/DocIterator.java     |   52 -
 .../java/org/apache/solr/search/DocList.java    |  141 -
 .../org/apache/solr/search/DocListAndSet.java   |   36 -
 .../src/java/org/apache/solr/search/DocSet.java |  135 -
 .../java/org/apache/solr/search/DocSetBase.java |  266 -
 .../org/apache/solr/search/DocSetBuilder.java   |  215 -
 .../org/apache/solr/search/DocSetCollector.java |  176 -
 .../org/apache/solr/search/DocSetProducer.java  |   24 -
 .../java/org/apache/solr/search/DocSetUtil.java |  288 -
 .../java/org/apache/solr/search/DocSlice.java   |  182 -
 .../solr/search/EarlyTerminatingCollector.java  |   78 -
 .../EarlyTerminatingCollectorException.java     |   62 -
 .../EarlyTerminatingSortingCollector.java       |  132 -
 .../apache/solr/search/ExportQParserPlugin.java |  193 -
 .../solr/search/ExtendedDismaxQParser.java      | 1763 ------
 .../search/ExtendedDismaxQParserPlugin.java     |   33 -
 .../org/apache/solr/search/ExtendedQuery.java   |   39 -
 .../apache/solr/search/ExtendedQueryBase.java   |   81 -
 .../org/apache/solr/search/FastLRUCache.java    |  306 --
 .../org/apache/solr/search/FieldParams.java     |   45 -
 .../apache/solr/search/FieldQParserPlugin.java  |   49 -
 .../src/java/org/apache/solr/search/Filter.java |  145 -
 .../apache/solr/search/FilteredDocIdSet.java    |  114 -
 .../solr/search/FloatPayloadValueSource.java    |  225 -
 .../org/apache/solr/search/FunctionQParser.java |  459 --
 .../solr/search/FunctionQParserPlugin.java      |   34 -
 .../solr/search/FunctionRangeQParserPlugin.java |   72 -
 .../apache/solr/search/FunctionRangeQuery.java  |   76 -
 .../solr/search/GraphTermsQParserPlugin.java    |  785 ---
 .../java/org/apache/solr/search/Grouping.java   | 1037 ----
 .../java/org/apache/solr/search/HashDocSet.java |  310 --
 .../apache/solr/search/HashQParserPlugin.java   |  376 --
 .../solr/search/IGainTermsQParserPlugin.java    |  246 -
 .../java/org/apache/solr/search/Insanity.java   |  132 -
 .../apache/solr/search/JoinQParserPlugin.java   |  584 --
 .../java/org/apache/solr/search/LFUCache.java   |  318 --
 .../java/org/apache/solr/search/LRUCache.java   |  402 --
 .../search/LegacyNumericRangeQueryBuilder.java  |  136 -
 .../org/apache/solr/search/LuceneQParser.java   |   58 -
 .../apache/solr/search/LuceneQParserPlugin.java |   40 -
 .../apache/solr/search/MaxScoreCollector.java   |   55 -
 .../org/apache/solr/search/MaxScoreQParser.java |   97 -
 .../solr/search/MaxScoreQParserPlugin.java      |   36 -
 .../apache/solr/search/NestedQParserPlugin.java |   73 -
 .../org/apache/solr/search/NoOpRegenerator.java |   37 -
 .../solr/search/PayloadCheckQParserPlugin.java  |  109 -
 .../solr/search/PayloadScoreQParserPlugin.java  |   92 -
 .../org/apache/solr/search/PointMerger.java     |  456 --
 .../java/org/apache/solr/search/PostFilter.java |   46 -
 .../apache/solr/search/PrefixQParserPlugin.java |   45 -
 .../java/org/apache/solr/search/QParser.java    |  375 --
 .../org/apache/solr/search/QParserPlugin.java   |  122 -
 .../org/apache/solr/search/QueryCommand.java    |  222 -
 .../org/apache/solr/search/QueryContext.java    |  109 -
 .../org/apache/solr/search/QueryParsing.java    |  381 --
 .../org/apache/solr/search/QueryResult.java     |   85 -
 .../org/apache/solr/search/QueryResultKey.java  |  156 -
 .../java/org/apache/solr/search/QueryUtils.java |  143 -
 .../java/org/apache/solr/search/RankQuery.java  |   36 -
 .../apache/solr/search/RawQParserPlugin.java    |   47 -
 .../org/apache/solr/search/ReRankCollector.java |  188 -
 .../apache/solr/search/ReRankQParserPlugin.java |  134 -
 .../org/apache/solr/search/ReRankWeight.java    |   48 -
 .../org/apache/solr/search/ReturnFields.java    |   89 -
 .../org/apache/solr/search/ScoreFilter.java     |   21 -
 .../search/SignificantTermsQParserPlugin.java   |  269 -
 .../apache/solr/search/SimpleQParserPlugin.java |  245 -
 .../java/org/apache/solr/search/SolrCache.java  |  129 -
 .../org/apache/solr/search/SolrCacheBase.java   |  137 -
 .../solr/search/SolrConstantScoreQuery.java     |  139 -
 .../org/apache/solr/search/SolrCoreParser.java  |  108 -
 .../apache/solr/search/SolrDocumentFetcher.java |  787 ---
 .../apache/solr/search/SolrFieldCacheBean.java  |   77 -
 .../java/org/apache/solr/search/SolrFilter.java |   45 -
 .../apache/solr/search/SolrIndexSearcher.java   | 2462 ---------
 .../apache/solr/search/SolrQueryBuilder.java    |   34 -
 .../org/apache/solr/search/SolrQueryParser.java |   31 -
 .../solr/search/SolrQueryTimeoutImpl.java       |   90 -
 .../apache/solr/search/SolrReturnFields.java    |  509 --
 .../solr/search/SolrSpanQueryBuilder.java       |   33 -
 .../java/org/apache/solr/search/SortSpec.java   |  112 -
 .../org/apache/solr/search/SortSpecParsing.java |  225 -
 .../org/apache/solr/search/SortedIntDocSet.java |  807 ---
 .../java/org/apache/solr/search/Sorting.java    |   79 -
 .../solr/search/SpatialBoxQParserPlugin.java    |   32 -
 .../solr/search/SpatialFilterQParser.java       |  100 -
 .../solr/search/SpatialFilterQParserPlugin.java |   58 -
 .../org/apache/solr/search/SpatialOptions.java  |   47 -
 .../java/org/apache/solr/search/StrParser.java  |  336 --
 .../solr/search/SurroundQParserPlugin.java      |  103 -
 .../apache/solr/search/SwitchQParserPlugin.java |  194 -
 .../org/apache/solr/search/SyntaxError.java     |   30 -
 .../apache/solr/search/TermQParserPlugin.java   |   69 -
 .../apache/solr/search/TermsQParserPlugin.java  |  143 -
 .../TextLogisticRegressionQParserPlugin.java    |  287 -
 .../apache/solr/search/ValueSourceParser.java   | 1574 ------
 .../org/apache/solr/search/WrappedQuery.java    |   72 -
 .../apache/solr/search/XmlQParserPlugin.java    |  109 -
 .../solr/search/facet/AggValueSource.java       |   60 -
 .../org/apache/solr/search/facet/AvgAgg.java    |   58 -
 .../org/apache/solr/search/facet/BlockJoin.java |   75 -
 .../org/apache/solr/search/facet/CountAgg.java  |   35 -
 .../apache/solr/search/facet/FacetBucket.java   |  189 -
 .../solr/search/facet/FacetDebugInfo.java       |   93 -
 .../apache/solr/search/facet/FacetField.java    |  190 -
 .../solr/search/facet/FacetFieldMerger.java     |  234 -
 .../solr/search/facet/FacetFieldProcessor.java  |  736 ---
 .../facet/FacetFieldProcessorByArray.java       |  146 -
 .../facet/FacetFieldProcessorByArrayDV.java     |  338 --
 .../facet/FacetFieldProcessorByArrayUIF.java    |   71 -
 .../FacetFieldProcessorByEnumTermsStream.java   |  369 --
 .../facet/FacetFieldProcessorByHashDV.java      |  481 --
 .../apache/solr/search/facet/FacetHeatmap.java  |  520 --
 .../apache/solr/search/facet/FacetMerger.java   |  152 -
 .../apache/solr/search/facet/FacetModule.java   |  498 --
 .../solr/search/facet/FacetProcessor.java       |  495 --
 .../apache/solr/search/facet/FacetQuery.java    |   69 -
 .../apache/solr/search/facet/FacetRange.java    |  887 ---
 .../solr/search/facet/FacetRangeMerger.java     |  167 -
 .../apache/solr/search/facet/FacetRequest.java  | 1034 ----
 .../search/facet/FacetRequestSortedMerger.java  |  315 --
 .../org/apache/solr/search/facet/FieldUtil.java |  208 -
 .../org/apache/solr/search/facet/HLLAgg.java    |  250 -
 .../apache/solr/search/facet/LegacyFacet.java   |  318 --
 .../org/apache/solr/search/facet/MinMaxAgg.java |  350 --
 .../apache/solr/search/facet/PercentileAgg.java |  224 -
 .../solr/search/facet/RelatednessAgg.java       |  504 --
 .../solr/search/facet/SimpleAggValueSource.java |   62 -
 .../org/apache/solr/search/facet/SlotAcc.java   |  679 ---
 .../org/apache/solr/search/facet/StddevAgg.java |   66 -
 .../solr/search/facet/StrAggValueSource.java    |   50 -
 .../org/apache/solr/search/facet/SumAgg.java    |   52 -
 .../org/apache/solr/search/facet/SumsqAgg.java  |   37 -
 .../solr/search/facet/UnInvertedField.java      |  641 ---
 .../org/apache/solr/search/facet/UniqueAgg.java |  265 -
 .../solr/search/facet/UniqueBlockAgg.java       |   91 -
 .../solr/search/facet/UniqueMultiDvSlotAcc.java |   93 -
 .../search/facet/UniqueMultivaluedSlotAcc.java  |   71 -
 .../search/facet/UniqueSinglevaluedSlotAcc.java |   97 -
 .../apache/solr/search/facet/UniqueSlotAcc.java |  152 -
 .../apache/solr/search/facet/VarianceAgg.java   |   65 -
 .../org/apache/solr/search/facet/package.html   |   28 -
 .../search/function/CollapseScoreFunction.java  |   73 -
 .../search/function/ConcatStringFunction.java   |   53 -
 .../solr/search/function/EqualFunction.java     |   61 -
 .../search/function/FieldNameValueSource.java   |   60 -
 .../solr/search/function/FileFloatSource.java   |  368 --
 .../search/function/MultiStringFunction.java    |  146 -
 .../solr/search/function/OrdFieldSource.java    |  174 -
 .../search/function/ReverseOrdFieldSource.java  |  134 -
 .../function/SolrComparisonBoolFunction.java    |   60 -
 .../search/function/ValueSourceRangeFilter.java |  147 -
 .../distance/GeoDistValueSourceParser.java      |  212 -
 .../function/distance/GeohashFunction.java      |   99 -
 .../distance/GeohashHaversineFunction.java      |  133 -
 .../distance/HaversineConstFunction.java        |  118 -
 .../function/distance/HaversineFunction.java    |  152 -
 .../distance/SquaredEuclideanFunction.java      |   74 -
 .../distance/StringDistanceFunction.java        |  109 -
 .../distance/VectorDistanceFunction.java        |  217 -
 .../search/function/distance/package-info.java  |   24 -
 .../solr/search/function/package-info.java      |   24 -
 .../apache/solr/search/grouping/Command.java    |   72 -
 .../solr/search/grouping/CommandHandler.java    |  257 -
 .../search/grouping/GroupingSpecification.java  |  177 -
 .../grouping/collector/FilterCollector.java     |   69 -
 .../search/grouping/collector/package-info.java |   22 -
 .../distributed/ShardRequestFactory.java        |   38 -
 .../distributed/ShardResponseProcessor.java     |   38 -
 .../distributed/command/GroupConverter.java     |  160 -
 .../distributed/command/QueryCommand.java       |  179 -
 .../distributed/command/QueryCommandResult.java |   47 -
 .../command/SearchGroupsFieldCommand.java       |  158 -
 .../command/SearchGroupsFieldCommandResult.java |   44 -
 .../command/TopGroupsFieldCommand.java          |  203 -
 .../distributed/command/package-info.java       |   22 -
 .../grouping/distributed/package-info.java      |   23 -
 .../SearchGroupsRequestFactory.java             |   82 -
 .../StoredFieldsShardRequestFactory.java        |   99 -
 .../TopGroupsShardRequestFactory.java           |  141 -
 .../requestfactory/package-info.java            |   23 -
 .../SearchGroupShardResponseProcessor.java      |  156 -
 .../StoredFieldsShardResponseProcessor.java     |   53 -
 .../TopGroupsShardResponseProcessor.java        |  218 -
 .../responseprocessor/package-info.java         |   23 -
 .../SearchGroupsResultTransformer.java          |  129 -
 .../ShardResultTransformer.java                 |   53 -
 .../ShardResultTransformerUtils.java            |   51 -
 .../TopGroupsResultTransformer.java             |  289 -
 .../shardresultserializer/package-info.java     |   23 -
 .../EndResultTransformer.java                   |   51 -
 .../GroupedEndResultTransformer.java            |  113 -
 .../MainEndResultTransformer.java               |   59 -
 .../SimpleEndResultTransformer.java             |   71 -
 .../endresulttransformer/package-info.java      |   23 -
 .../solr/search/grouping/package-info.java      |   25 -
 .../apache/solr/search/join/BitSetSlice.java    |   45 -
 .../solr/search/join/BlockJoinChildQParser.java |   56 -
 .../join/BlockJoinChildQParserPlugin.java       |   35 -
 .../join/BlockJoinDocSetFacetComponent.java     |  195 -
 .../search/join/BlockJoinFacetAccsHolder.java   |   83 -
 .../search/join/BlockJoinFacetComponent.java    |   23 -
 .../join/BlockJoinFacetComponentSupport.java    |  158 -
 .../solr/search/join/BlockJoinFacetFilter.java  |   90 -
 .../join/BlockJoinFieldFacetAccumulator.java    |  235 -
 .../search/join/BlockJoinParentQParser.java     |  166 -
 .../join/BlockJoinParentQParserPlugin.java      |   43 -
 .../join/ChildFieldValueSourceParser.java       |  198 -
 .../apache/solr/search/join/FiltersQParser.java |  154 -
 .../solr/search/join/FiltersQParserPlugin.java  |   33 -
 .../apache/solr/search/join/FrontierQuery.java  |   48 -
 .../solr/search/join/GraphPointsCollector.java  |  123 -
 .../solr/search/join/GraphQParserPlugin.java    |   38 -
 .../org/apache/solr/search/join/GraphQuery.java |  445 --
 .../solr/search/join/GraphQueryParser.java      |   99 -
 .../solr/search/join/GraphTermsCollector.java   |  202 -
 .../search/join/ScoreJoinQParserPlugin.java     |  334 --
 .../solr/search/join/ScoreModeParser.java       |   53 -
 .../apache/solr/search/join/package-info.java   |   23 -
 .../apache/solr/search/mlt/CloudMLTQParser.java |  209 -
 .../solr/search/mlt/MLTQParserPlugin.java       |   38 -
 .../solr/search/mlt/SimpleMLTQParser.java       |  167 -
 .../apache/solr/search/mlt/package-info.java    |   23 -
 .../org/apache/solr/search/package-info.java    |   23 -
 .../similarities/BM25SimilarityFactory.java     |   61 -
 .../similarities/ClassicSimilarityFactory.java  |   64 -
 .../similarities/DFISimilarityFactory.java      |   75 -
 .../similarities/DFRSimilarityFactory.java      |  179 -
 .../similarities/IBSimilarityFactory.java       |  110 -
 .../LMDirichletSimilarityFactory.java           |   59 -
 .../LMJelinekMercerSimilarityFactory.java       |   58 -
 .../similarities/SchemaSimilarityFactory.java   |  157 -
 .../SweetSpotSimilarityFactory.java             |  185 -
 .../solr/search/similarities/package-info.java  |   25 -
 .../solr/search/stats/CachedSearcherStats.java  |   22 -
 .../solr/search/stats/CollectionStats.java      |   70 -
 .../search/stats/ExactSharedStatsCache.java     |   87 -
 .../solr/search/stats/ExactStatsCache.java      |  351 --
 .../apache/solr/search/stats/LRUStatsCache.java |  158 -
 .../solr/search/stats/LocalStatsCache.java      |   81 -
 .../solr/search/stats/LocalStatsSource.java     |   47 -
 .../apache/solr/search/stats/StatsCache.java    |  118 -
 .../apache/solr/search/stats/StatsSource.java   |   42 -
 .../org/apache/solr/search/stats/StatsUtil.java |  224 -
 .../org/apache/solr/search/stats/TermStats.java |   73 -
 .../apache/solr/search/stats/package-info.java  |   23 -
 .../security/AttributeOnlyServletContext.java   |  291 -
 .../solr/security/AuthenticationPlugin.java     |   62 -
 .../solr/security/AuthorizationContext.java     |   62 -
 .../solr/security/AuthorizationPlugin.java      |   30 -
 .../solr/security/AuthorizationResponse.java    |   40 -
 .../security/AutorizationEditOperation.java     |  173 -
 .../apache/solr/security/BasicAuthPlugin.java   |  187 -
 .../solr/security/ConfigEditablePlugin.java     |   38 -
 .../ConfigurableInternodeAuthHadoopPlugin.java  |   68 -
 .../security/DelegationTokenKerberosFilter.java |  252 -
 .../apache/solr/security/HadoopAuthFilter.java  |  230 -
 .../apache/solr/security/HadoopAuthPlugin.java  |  279 -
 .../solr/security/HttpClientBuilderPlugin.java  |   37 -
 .../apache/solr/security/KerberosFilter.java    |   62 -
 .../apache/solr/security/KerberosPlugin.java    |  267 -
 .../solr/security/PKIAuthenticationPlugin.java  |  303 --
 .../org/apache/solr/security/Permission.java    |  159 -
 .../solr/security/PermissionNameProvider.java   |   79 -
 .../solr/security/PrintWriterWrapper.java       |  215 -
 .../apache/solr/security/PublicKeyHandler.java  |   47 -
 ...tContinuesRecorderAuthenticationHandler.java |   71 -
 .../security/RuleBasedAuthorizationPlugin.java  |  244 -
 .../solr/security/SecurityPluginHolder.java     |   33 -
 .../security/Sha256AuthenticationProvider.java  |  168 -
 .../org/apache/solr/security/package-info.java  |   22 -
 .../org/apache/solr/servlet/BaseSolrFilter.java |   33 -
 .../apache/solr/servlet/BaseSolrServlet.java    |   34 -
 .../solr/servlet/CheckLoggingConfiguration.java |   38 -
 .../solr/servlet/DirectSolrConnection.java      |  153 -
 .../org/apache/solr/servlet/HttpSolrCall.java   | 1116 ----
 .../apache/solr/servlet/LoadAdminUiServlet.java |   89 -
 .../apache/solr/servlet/RedirectServlet.java    |   63 -
 .../org/apache/solr/servlet/ResponseUtils.java  |   84 -
 .../solr/servlet/ServletInputStreamWrapper.java |  105 -
 .../servlet/ServletOutputStreamWrapper.java     |  140 -
 .../apache/solr/servlet/SolrDispatchFilter.java |  597 ---
 .../apache/solr/servlet/SolrRequestParsers.java |  879 ---
 .../solr/servlet/cache/HttpCacheHeaderUtil.java |  343 --
 .../org/apache/solr/servlet/cache/Method.java   |   31 -
 .../apache/solr/servlet/cache/package-info.java |   23 -
 .../org/apache/solr/servlet/package-info.java   |   23 -
 .../spelling/AbstractLuceneSpellChecker.java    |  278 -
 .../spelling/ConjunctionSolrSpellChecker.java   |  225 -
 .../solr/spelling/DirectSolrSpellChecker.java   |  229 -
 .../solr/spelling/FileBasedSpellChecker.java    |  138 -
 .../solr/spelling/IndexBasedSpellChecker.java   |  115 -
 .../solr/spelling/PossibilityIterator.java      |  427 --
 .../apache/solr/spelling/QueryConverter.java    |   99 -
 .../org/apache/solr/spelling/ResultEntry.java   |   54 -
 .../apache/solr/spelling/SolrSpellChecker.java  |  198 -
 .../solr/spelling/SpellCheckCollation.java      |   68 -
 .../solr/spelling/SpellCheckCollator.java       |  279 -
 .../solr/spelling/SpellCheckCorrection.java     |   57 -
 .../apache/solr/spelling/SpellingOptions.java   |  107 -
 .../solr/spelling/SpellingQueryConverter.java   |  201 -
 .../apache/solr/spelling/SpellingResult.java    |  145 -
 .../solr/spelling/SuggestQueryConverter.java    |   43 -
 .../java/org/apache/solr/spelling/Token.java    |  175 -
 .../spelling/WordBreakSolrSpellChecker.java     |  360 --
 .../org/apache/solr/spelling/package-info.java  |   25 -
 .../spelling/suggest/DictionaryFactory.java     |   47 -
 .../suggest/DocumentDictionaryFactory.java      |   55 -
 .../DocumentExpressionDictionaryFactory.java    |  101 -
 .../spelling/suggest/FileDictionaryFactory.java |   61 -
 .../suggest/HighFrequencyDictionaryFactory.java |   52 -
 .../solr/spelling/suggest/LookupFactory.java    |   66 -
 .../solr/spelling/suggest/SolrSuggester.java    |  300 --
 .../apache/solr/spelling/suggest/Suggester.java |  227 -
 .../solr/spelling/suggest/SuggesterOptions.java |   49 -
 .../solr/spelling/suggest/SuggesterParams.java  |   85 -
 .../solr/spelling/suggest/SuggesterResult.java  |   80 -
 .../fst/AnalyzingInfixLookupFactory.java        |  150 -
 .../suggest/fst/AnalyzingLookupFactory.java     |  130 -
 .../suggest/fst/BlendedInfixLookupFactory.java  |  156 -
 .../spelling/suggest/fst/FSTLookupFactory.java  |   69 -
 .../suggest/fst/FreeTextLookupFactory.java      |   85 -
 .../suggest/fst/FuzzyLookupFactory.java         |  146 -
 .../spelling/suggest/fst/WFSTLookupFactory.java |   55 -
 .../solr/spelling/suggest/fst/package-info.java |   23 -
 .../suggest/jaspell/JaspellLookupFactory.java   |   47 -
 .../spelling/suggest/jaspell/package-info.java  |   23 -
 .../solr/spelling/suggest/package-info.java     |   25 -
 .../spelling/suggest/tst/TSTLookupFactory.java  |   40 -
 .../solr/spelling/suggest/tst/package-info.java |   23 -
 .../solr/store/blockcache/BlockCache.java       |  262 -
 .../solr/store/blockcache/BlockCacheKey.java    |   84 -
 .../store/blockcache/BlockCacheLocation.java    |   78 -
 .../solr/store/blockcache/BlockDirectory.java   |  371 --
 .../store/blockcache/BlockDirectoryCache.java   |  133 -
 .../solr/store/blockcache/BlockLocks.java       |   98 -
 .../solr/store/blockcache/BufferStore.java      |  133 -
 .../org/apache/solr/store/blockcache/Cache.java |   69 -
 .../store/blockcache/CachedIndexOutput.java     |   86 -
 .../blockcache/CustomBufferedIndexInput.java    |  283 -
 .../apache/solr/store/blockcache/Metrics.java   |  140 -
 .../blockcache/ReusedBufferedIndexOutput.java   |  164 -
 .../org/apache/solr/store/blockcache/Store.java |   28 -
 .../solr/store/blockcache/package-info.java     |   23 -
 .../apache/solr/store/hdfs/HdfsDirectory.java   |  277 -
 .../apache/solr/store/hdfs/HdfsFileWriter.java  |   56 -
 .../solr/store/hdfs/HdfsLocalityReporter.java   |  198 -
 .../apache/solr/store/hdfs/HdfsLockFactory.java |  130 -
 .../apache/solr/store/hdfs/package-info.java    |   22 -
 .../apache/solr/uninverting/DocTermOrds.java    |  898 ----
 .../org/apache/solr/uninverting/FieldCache.java |  447 --
 .../apache/solr/uninverting/FieldCacheImpl.java | 1254 -----
 .../solr/uninverting/UninvertingReader.java     |  466 --
 .../apache/solr/uninverting/package-info.java   |   21 -
 .../apache/solr/update/AddUpdateCommand.java    |  282 -
 .../apache/solr/update/CdcrTransactionLog.java  |  399 --
 .../org/apache/solr/update/CdcrUpdateLog.java   |  792 ---
 .../org/apache/solr/update/CommitTracker.java   |  329 --
 .../apache/solr/update/CommitUpdateCommand.java |   59 -
 .../solr/update/DefaultSolrCoreState.java       |  456 --
 .../solr/update/DeleteByQueryWrapper.java       |  119 -
 .../apache/solr/update/DeleteUpdateCommand.java |  114 -
 .../solr/update/DirectUpdateHandler2.java       | 1012 ----
 .../org/apache/solr/update/DocumentBuilder.java |  319 --
 .../apache/solr/update/HdfsTransactionLog.java  |  666 ---
 .../org/apache/solr/update/HdfsUpdateLog.java   |  426 --
 .../apache/solr/update/IndexFingerprint.java    |  221 -
 .../apache/solr/update/LoggingInfoStream.java   |   45 -
 .../org/apache/solr/update/MemOutputStream.java |   52 -
 .../apache/solr/update/MergeIndexesCommand.java |   51 -
 .../java/org/apache/solr/update/PeerSync.java   |  878 ---
 .../apache/solr/update/PeerSyncWithLeader.java  |  372 --
 .../solr/update/RollbackUpdateCommand.java      |   40 -
 .../apache/solr/update/SolrCmdDistributor.java  |  678 ---
 .../org/apache/solr/update/SolrCoreState.java   |  206 -
 .../org/apache/solr/update/SolrIndexConfig.java |  315 --
 .../apache/solr/update/SolrIndexSplitter.java   |  694 ---
 .../org/apache/solr/update/SolrIndexWriter.java |  349 --
 .../apache/solr/update/SplitIndexCommand.java   |   77 -
 .../solr/update/StreamingSolrClients.java       |  178 -
 .../org/apache/solr/update/TransactionLog.java  |  916 ----
 .../org/apache/solr/update/UpdateCommand.java   |  101 -
 .../org/apache/solr/update/UpdateHandler.java   |  240 -
 .../java/org/apache/solr/update/UpdateLog.java  | 2213 --------
 .../apache/solr/update/UpdateShardHandler.java  |  226 -
 .../solr/update/UpdateShardHandlerConfig.java   |   76 -
 .../org/apache/solr/update/VersionBucket.java   |   32 -
 .../org/apache/solr/update/VersionInfo.java     |  304 --
 .../org/apache/solr/update/package-info.java    |   23 -
 ...tractDefaultValueUpdateProcessorFactory.java |  100 -
 .../AddSchemaFieldsUpdateProcessorFactory.java  |  562 --
 ...aluesOrNoneFieldMutatingUpdateProcessor.java |  118 -
 .../processor/AtomicUpdateDocumentMerger.java   |  446 --
 .../processor/AtomicUpdateProcessorFactory.java |  194 -
 .../update/processor/CdcrUpdateProcessor.java   |  130 -
 .../processor/CdcrUpdateProcessorFactory.java   |   46 -
 .../ClassificationUpdateProcessor.java          |  117 -
 .../ClassificationUpdateProcessorFactory.java   |  167 -
 .../ClassificationUpdateProcessorParams.java    |  112 -
 .../CloneFieldUpdateProcessorFactory.java       |  473 --
 .../ConcatFieldUpdateProcessorFactory.java      |  116 -
 .../CountFieldValuesUpdateProcessorFactory.java |   81 -
 .../DefaultValueUpdateProcessorFactory.java     |   83 -
 .../processor/DistributedUpdateProcessor.java   | 2156 --------
 .../DistributedUpdateProcessorFactory.java      |   57 -
 .../DistributingUpdateProcessorFactory.java     |   37 -
 .../DocBasedVersionConstraintsProcessor.java    |  515 --
 ...BasedVersionConstraintsProcessorFactory.java |  196 -
 .../DocExpirationUpdateProcessorFactory.java    |  512 --
 .../FieldLengthUpdateProcessorFactory.java      |   79 -
 .../processor/FieldMutatingUpdateProcessor.java |  301 --
 .../FieldMutatingUpdateProcessorFactory.java    |  244 -
 ...FieldNameMutatingUpdateProcessorFactory.java |   99 -
 .../FieldValueMutatingUpdateProcessor.java      |   98 -
 .../FieldValueSubsetUpdateProcessorFactory.java |   56 -
 .../FirstFieldValueUpdateProcessorFactory.java  |   65 -
 .../HTMLStripFieldUpdateProcessorFactory.java   |   84 -
 ...oreCommitOptimizeUpdateProcessorFactory.java |  148 -
 .../IgnoreFieldUpdateProcessorFactory.java      |   76 -
 .../IgnoreLargeDocumentProcessorFactory.java    |  175 -
 .../LastFieldValueUpdateProcessorFactory.java   |   81 -
 .../processor/LogUpdateProcessorFactory.java    |  219 -
 .../solr/update/processor/Lookup3Signature.java |   36 -
 .../solr/update/processor/MD5Signature.java     |   49 -
 .../MaxFieldValueUpdateProcessorFactory.java    |   79 -
 .../MinFieldValueUpdateProcessorFactory.java    |   79 -
 .../processor/NestedUpdateProcessorFactory.java |  137 -
 .../NoOpDistributingUpdateProcessorFactory.java |   45 -
 ...ParseBooleanFieldUpdateProcessorFactory.java |  153 -
 .../ParseDateFieldUpdateProcessorFactory.java   |  273 -
 .../ParseDoubleFieldUpdateProcessorFactory.java |  122 -
 .../ParseFloatFieldUpdateProcessorFactory.java  |  123 -
 .../ParseIntFieldUpdateProcessorFactory.java    |  124 -
 .../ParseLongFieldUpdateProcessorFactory.java   |  118 -
 ...ParseNumericFieldUpdateProcessorFactory.java |   79 -
 .../PreAnalyzedUpdateProcessorFactory.java      |  173 -
 .../processor/RegexReplaceProcessorFactory.java |  142 -
 .../update/processor/RegexpBoostProcessor.java  |  211 -
 .../processor/RegexpBoostProcessorFactory.java  |   53 -
 .../RemoveBlankFieldUpdateProcessorFactory.java |   70 -
 .../processor/RunUpdateProcessorFactory.java    |  122 -
 .../processor/ScriptEngineCustomizer.java       |   28 -
 .../apache/solr/update/processor/Signature.java |   27 -
 .../SignatureUpdateProcessorFactory.java        |  204 -
 .../processor/SimpleUpdateProcessorFactory.java |  100 -
 .../SkipExistingDocumentsProcessorFactory.java  |  259 -
 .../StatelessScriptUpdateProcessorFactory.java  |  500 --
 .../TemplateUpdateProcessorFactory.java         |  128 -
 .../update/processor/TextProfileSignature.java  |  161 -
 .../TimeRoutedAliasUpdateProcessor.java         |  516 --
 .../TimestampUpdateProcessorFactory.java        |   66 -
 .../processor/TolerantUpdateProcessor.java      |  409 --
 .../TolerantUpdateProcessorFactory.java         |  142 -
 .../TrimFieldUpdateProcessorFactory.java        |   67 -
 .../TruncateFieldUpdateProcessorFactory.java    |  101 -
 .../update/processor/URLClassifyProcessor.java  |  229 -
 .../processor/URLClassifyProcessorFactory.java  |   45 -
 .../processor/UUIDUpdateProcessorFactory.java   |  113 -
 .../UniqFieldsUpdateProcessorFactory.java       |   73 -
 .../processor/UpdateRequestProcessor.java       |  103 -
 .../processor/UpdateRequestProcessorChain.java  |  369 --
 .../UpdateRequestProcessorFactory.java          |   50 -
 .../solr/update/processor/package-info.java     |   24 -
 .../apache/solr/util/AdjustableSemaphore.java   |   78 -
 .../org/apache/solr/util/BoundedTreeSet.java    |   68 -
 .../apache/solr/util/ConcurrentLFUCache.java    |  483 --
 .../apache/solr/util/ConcurrentLRUCache.java    |  754 ---
 .../java/org/apache/solr/util/CryptoKeys.java   |  355 --
 .../src/java/org/apache/solr/util/DOMUtil.java  |  414 --
 .../org/apache/solr/util/DateMathParser.java    |  433 --
 .../solr/util/DefaultSolrThreadFactory.java     |   49 -
 .../org/apache/solr/util/DistanceUnits.java     |  128 -
 .../java/org/apache/solr/util/FSHDFSUtils.java  |  208 -
 .../java/org/apache/solr/util/FileUtils.java    |  119 -
 .../src/java/org/apache/solr/util/HdfsUtil.java |   58 -
 .../java/org/apache/solr/util/IOFunction.java   |   29 -
 .../src/java/org/apache/solr/util/IdUtils.java  |   56 -
 .../src/java/org/apache/solr/util/JmxUtil.java  |   75 -
 .../java/org/apache/solr/util/LongIterator.java |   34 -
 .../org/apache/solr/util/LongPriorityQueue.java |  234 -
 .../src/java/org/apache/solr/util/LongSet.java  |  137 -
 .../java/org/apache/solr/util/MapListener.java  |   58 -
 .../java/org/apache/solr/util/NumberUtils.java  |  217 -
 .../org/apache/solr/util/OrderedExecutor.java   |  116 -
 .../java/org/apache/solr/util/PayloadUtils.java |  143 -
 .../org/apache/solr/util/PivotListEntry.java    |   86 -
 .../java/org/apache/solr/util/PrimUtils.java    |  122 -
 .../apache/solr/util/PropertiesInputStream.java |   50 -
 .../solr/util/PropertiesOutputStream.java       |   43 -
 .../org/apache/solr/util/PropertiesUtil.java    |  151 -
 .../src/java/org/apache/solr/util/RTimer.java   |   99 -
 .../java/org/apache/solr/util/RTimerTree.java   |   91 -
 .../apache/solr/util/RecordingJSONParser.java   |  110 -
 .../org/apache/solr/util/RedactionUtils.java    |   51 -
 .../java/org/apache/solr/util/RefCounted.java   |   61 -
 .../org/apache/solr/util/RegexFileFilter.java   |   44 -
 .../org/apache/solr/util/SafeXMLParsing.java    |  120 -
 .../org/apache/solr/util/SimplePostTool.java    | 1267 -----
 .../src/java/org/apache/solr/util/SolrCLI.java  | 4437 ---------------
 .../solr/util/SolrFileCleaningTracker.java      |  147 -
 .../org/apache/solr/util/SolrLogLayout.java     |  381 --
 .../org/apache/solr/util/SolrPluginUtils.java   | 1121 ----
 .../java/org/apache/solr/util/SpatialUtils.java |  164 -
 .../apache/solr/util/StartupLoggingUtils.java   |  142 -
 .../org/apache/solr/util/SystemIdResolver.java  |  175 -
 .../org/apache/solr/util/TestInjection.java     |  510 --
 .../src/java/org/apache/solr/util/TimeOut.java  |   68 -
 .../org/apache/solr/util/TimeZoneUtils.java     |  103 -
 .../org/apache/solr/util/VersionedFile.java     |  116 -
 .../util/configuration/SSLConfigurations.java   |  124 -
 .../configuration/SSLConfigurationsFactory.java |   49 -
 .../configuration/SSLCredentialProvider.java    |   36 -
 .../SSLCredentialProviderFactory.java           |   93 -
 .../solr/util/configuration/package-info.java   |   23 -
 .../AbstractSSLCredentialProvider.java          |   56 -
 .../providers/EnvSSLCredentialProvider.java     |   72 -
 .../providers/HadoopSSLCredentialProvider.java  |   66 -
 .../providers/SysPropSSLCredentialProvider.java |   38 -
 .../configuration/providers/package-info.java   |   23 -
 .../solr/util/doc-files/min-should-match.html   |  116 -
 .../hll/BigEndianAscendingWordDeserializer.java |  172 -
 .../hll/BigEndianAscendingWordSerializer.java   |  173 -
 .../java/org/apache/solr/util/hll/BitUtil.java  |   70 -
 .../org/apache/solr/util/hll/BitVector.java     |  260 -
 .../src/java/org/apache/solr/util/hll/HLL.java  | 1072 ----
 .../org/apache/solr/util/hll/HLLMetadata.java   |  135 -
 .../java/org/apache/solr/util/hll/HLLType.java  |   28 -
 .../java/org/apache/solr/util/hll/HLLUtil.java  |  198 -
 .../org/apache/solr/util/hll/IHLLMetadata.java  |   70 -
 .../apache/solr/util/hll/ISchemaVersion.java    |   84 -
 .../apache/solr/util/hll/IWordDeserializer.java |   40 -
 .../apache/solr/util/hll/IWordSerializer.java   |   38 -
 .../org/apache/solr/util/hll/NumberUtil.java    |  171 -
 .../apache/solr/util/hll/SchemaVersionOne.java  |  153 -
 .../apache/solr/util/hll/SerializationUtil.java |  276 -
 .../org/apache/solr/util/hll/package-info.java  |   24 -
 .../java/org/apache/solr/util/package-info.java |   23 -
 .../solr/util/plugin/AbstractPluginLoader.java  |  278 -
 .../solr/util/plugin/MapInitializedPlugin.java  |   29 -
 .../solr/util/plugin/MapPluginLoader.java       |   53 -
 .../util/plugin/NamedListInitializedPlugin.java |   29 -
 .../solr/util/plugin/NamedListPluginLoader.java |   49 -
 .../solr/util/plugin/PluginInfoInitialized.java |   31 -
 .../apache/solr/util/plugin/SolrCoreAware.java  |   27 -
 .../apache/solr/util/plugin/package-info.java   |   25 -
 .../stats/HttpClientMetricNameStrategy.java     |   28 -
 .../stats/InstrumentedHttpRequestExecutor.java  |  139 -
 ...entedPoolingHttpClientConnectionManager.java |   53 -
 .../org/apache/solr/util/stats/MetricUtils.java |  640 ---
 .../apache/solr/util/stats/package-info.java    |   23 -
 .../solr/util/xslt/TransformerProvider.java     |  128 -
 .../org/apache/solr/util/xslt/package-info.java |   23 -
 solr/core/src/java/overview.html                |   21 -
 .../solr/analysis/ReversedWildcardFilter.java   |  155 +
 .../analysis/ReversedWildcardFilterFactory.java |  138 +
 .../org/apache/solr/analysis/SolrAnalyzer.java  |   42 +
 .../apache/solr/analysis/TokenizerChain.java    |  139 +
 .../org/apache/solr/analysis/package-info.java  |   26 +
 .../src/main/java/org/apache/solr/api/Api.java  |   68 +
 .../main/java/org/apache/solr/api/ApiBag.java   |  360 ++
 .../java/org/apache/solr/api/ApiSupport.java    |   46 +
 .../java/org/apache/solr/api/V2HttpCall.java    |  384 ++
 .../java/org/apache/solr/api/package-info.java  |   21 +
 .../solrj/embedded/EmbeddedSolrServer.java      |  322 ++
 .../solr/client/solrj/embedded/JettyConfig.java |  131 +
 .../client/solrj/embedded/JettySolrRunner.java  |  586 ++
 .../solr/client/solrj/embedded/SSLConfig.java   |  166 +
 .../client/solrj/embedded/package-info.java     |   25 +
 .../org/apache/solr/cloud/ActionThrottle.java   |   95 +
 .../apache/solr/cloud/ActiveReplicaWatcher.java |  170 +
 .../solr/cloud/CloudConfigSetService.java       |   64 +
 .../org/apache/solr/cloud/CloudDescriptor.java  |  179 +
 .../java/org/apache/solr/cloud/CloudUtil.java   |  145 +
 .../cloud/CurrentCoreDescriptorProvider.java    |   28 +
 .../org/apache/solr/cloud/DistributedMap.java   |  127 +
 .../org/apache/solr/cloud/ElectionContext.java  |  764 +++
 .../solr/cloud/ExclusiveSliceProperty.java      |  346 ++
 .../org/apache/solr/cloud/LeaderElector.java    |  396 ++
 .../java/org/apache/solr/cloud/LockTree.java    |  182 +
 .../java/org/apache/solr/cloud/Overseer.java    |  840 +++
 .../OverseerCollectionConfigSetProcessor.java   |  107 +
 .../cloud/OverseerConfigSetMessageHandler.java  |  377 ++
 .../solr/cloud/OverseerMessageHandler.java      |   63 +
 .../solr/cloud/OverseerNodePrioritizer.java     |  113 +
 .../apache/solr/cloud/OverseerSolrResponse.java |   52 +
 .../solr/cloud/OverseerTaskProcessor.java       |  628 +++
 .../apache/solr/cloud/OverseerTaskQueue.java    |  339 ++
 .../solr/cloud/RecoveringCoreTermWatcher.java   |   85 +
 .../org/apache/solr/cloud/RecoveryStrategy.java |  873 +++
 .../apache/solr/cloud/ReplicateFromLeader.java  |  136 +
 .../solr/cloud/SizeLimitedDistributedMap.java   |   88 +
 .../org/apache/solr/cloud/SolrZkServer.java     |  334 ++
 .../main/java/org/apache/solr/cloud/Stats.java  |  147 +
 .../org/apache/solr/cloud/SyncStrategy.java     |  320 ++
 .../main/java/org/apache/solr/cloud/ZkCLI.java  |  373 ++
 .../apache/solr/cloud/ZkCollectionTerms.java    |   65 +
 .../org/apache/solr/cloud/ZkController.java     | 2590 +++++++++
 .../apache/solr/cloud/ZkDistributedQueue.java   |  587 ++
 .../solr/cloud/ZkDistributedQueueFactory.java   |   43 +
 .../org/apache/solr/cloud/ZkShardTerms.java     |  627 +++
 .../apache/solr/cloud/ZkSolrResourceLoader.java |  188 +
 .../cloud/api/collections/AddReplicaCmd.java    |  409 ++
 .../solr/cloud/api/collections/Assign.java      |  663 +++
 .../solr/cloud/api/collections/BackupCmd.java   |  226 +
 .../cloud/api/collections/CreateAliasCmd.java   |  164 +
 .../api/collections/CreateCollectionCmd.java    |  620 +++
 .../cloud/api/collections/CreateShardCmd.java   |  121 +
 .../api/collections/CreateSnapshotCmd.java      |  179 +
 .../cloud/api/collections/DeleteAliasCmd.java   |   43 +
 .../api/collections/DeleteCollectionCmd.java    |  207 +
 .../cloud/api/collections/DeleteNodeCmd.java    |  133 +
 .../cloud/api/collections/DeleteReplicaCmd.java |  281 +
 .../cloud/api/collections/DeleteShardCmd.java   |  178 +
 .../api/collections/DeleteSnapshotCmd.java      |  160 +
 .../api/collections/LeaderRecoveryWatcher.java  |   88 +
 .../api/collections/MaintainRoutedAliasCmd.java |  305 ++
 .../solr/cloud/api/collections/MigrateCmd.java  |  340 ++
 .../cloud/api/collections/MoveReplicaCmd.java   |  328 ++
 .../OverseerCollectionMessageHandler.java       | 1003 ++++
 .../cloud/api/collections/OverseerRoleCmd.java  |  102 +
 .../api/collections/OverseerStatusCmd.java      |  113 +
 .../cloud/api/collections/ReplaceNodeCmd.java   |  253 +
 .../solr/cloud/api/collections/RestoreCmd.java  |  395 ++
 .../cloud/api/collections/SetAliasPropCmd.java  |   84 +
 .../cloud/api/collections/SplitShardCmd.java    |  778 +++
 .../cloud/api/collections/TimeRoutedAlias.java  |  254 +
 .../cloud/api/collections/UtilizeNodeCmd.java   |  133 +
 .../cloud/api/collections/package-info.java     |   23 +
 .../solr/cloud/autoscaling/ActionContext.java   |   68 +
 .../autoscaling/AutoAddReplicasPlanAction.java  |   63 +
 .../solr/cloud/autoscaling/AutoScaling.java     |  240 +
 .../cloud/autoscaling/AutoScalingHandler.java   |  698 +++
 .../cloud/autoscaling/ComputePlanAction.java    |  302 ++
 .../cloud/autoscaling/ExecutePlanAction.java    |  182 +
 .../cloud/autoscaling/HttpTriggerListener.java  |  164 +
 .../autoscaling/InactiveShardPlanAction.java    |  152 +
 .../cloud/autoscaling/IndexSizeTrigger.java     |  479 ++
 .../solr/cloud/autoscaling/LoggingListener.java |   38 +
 .../solr/cloud/autoscaling/MetricTrigger.java   |  219 +
 .../cloud/autoscaling/NodeAddedTrigger.java     |  231 +
 .../solr/cloud/autoscaling/NodeLostTrigger.java |  228 +
 .../autoscaling/OverseerTriggerThread.java      |  405 ++
 .../cloud/autoscaling/ScheduledTrigger.java     |  222 +
 .../cloud/autoscaling/ScheduledTriggers.java    |  802 +++
 .../cloud/autoscaling/SearchRateTrigger.java    |  797 +++
 .../cloud/autoscaling/SystemLogListener.java    |  212 +
 .../solr/cloud/autoscaling/TriggerAction.java   |   51 +
 .../cloud/autoscaling/TriggerActionBase.java    |   87 +
 .../autoscaling/TriggerActionException.java     |   33 +
 .../solr/cloud/autoscaling/TriggerBase.java     |  267 +
 .../solr/cloud/autoscaling/TriggerEvent.java    |  309 ++
 .../cloud/autoscaling/TriggerEventQueue.java    |  114 +
 .../solr/cloud/autoscaling/TriggerListener.java |   65 +
 .../cloud/autoscaling/TriggerListenerBase.java  |   97 +
 .../solr/cloud/autoscaling/TriggerUtils.java    |   87 +
 .../autoscaling/TriggerValidationException.java |   74 +
 .../solr/cloud/autoscaling/package-info.java    |   21 +
 .../cloud/overseer/ClusterStateMutator.java     |  204 +
 .../solr/cloud/overseer/CollectionMutator.java  |  165 +
 .../apache/solr/cloud/overseer/NodeMutator.java |   86 +
 .../solr/cloud/overseer/OverseerAction.java     |   55 +
 .../solr/cloud/overseer/ReplicaMutator.java     |  497 ++
 .../solr/cloud/overseer/SliceMutator.java       |  273 +
 .../solr/cloud/overseer/ZkStateWriter.java      |  259 +
 .../solr/cloud/overseer/ZkWriteCommand.java     |   50 +
 .../solr/cloud/overseer/package-info.java       |   23 +
 .../org/apache/solr/cloud/package-info.java     |   23 +
 .../apache/solr/cloud/rule/ImplicitSnitch.java  |   65 +
 .../apache/solr/cloud/rule/ReplicaAssigner.java |  447 ++
 .../java/org/apache/solr/cloud/rule/Rule.java   |  386 ++
 .../solr/cloud/rule/ServerSnitchContext.java    |   58 +
 .../apache/solr/cloud/rule/package-info.java    |   23 +
 .../solr/core/AbstractSolrEventListener.java    |   80 +
 .../org/apache/solr/core/BlobRepository.java    |  291 +
 .../solr/core/CachingDirectoryFactory.java      |  524 ++
 .../java/org/apache/solr/core/CloseHook.java    |   53 +
 .../java/org/apache/solr/core/CloudConfig.java  |  215 +
 .../java/org/apache/solr/core/CodecFactory.java |   32 +
 .../main/java/org/apache/solr/core/Config.java  |  493 ++
 .../org/apache/solr/core/ConfigOverlay.java     |  269 +
 .../java/org/apache/solr/core/ConfigSet.java    |   65 +
 .../apache/solr/core/ConfigSetProperties.java   |   82 +
 .../org/apache/solr/core/ConfigSetService.java  |  243 +
 .../org/apache/solr/core/CoreContainer.java     | 1874 +++++++
 .../org/apache/solr/core/CoreDescriptor.java    |  396 ++
 .../apache/solr/core/CorePropertiesLocator.java |  210 +
 .../java/org/apache/solr/core/CoreSorter.java   |  185 +
 .../java/org/apache/solr/core/CoresLocator.java |   71 +
 .../java/org/apache/solr/core/Diagnostics.java  |   54 +
 .../org/apache/solr/core/DirectoryFactory.java  |  434 ++
 .../solr/core/EphemeralDirectoryFactory.java    |   75 +
 .../apache/solr/core/HdfsDirectoryFactory.java  |  610 +++
 .../solr/core/IndexDeletionPolicyWrapper.java   |  271 +
 .../apache/solr/core/IndexReaderFactory.java    |   74 +
 .../java/org/apache/solr/core/InitParams.java   |  144 +
 .../apache/solr/core/MMapDirectoryFactory.java  |   78 +
 .../org/apache/solr/core/MemClassLoader.java    |  181 +
 .../org/apache/solr/core/MetricsConfig.java     |  134 +
 .../apache/solr/core/NIOFSDirectoryFactory.java |   43 +
 .../solr/core/NRTCachingDirectoryFactory.java   |   63 +
 .../java/org/apache/solr/core/NodeConfig.java   |  402 ++
 .../java/org/apache/solr/core/PluginBag.java    |  602 +++
 .../java/org/apache/solr/core/PluginInfo.java   |  190 +
 .../apache/solr/core/QuerySenderListener.java   |  105 +
 .../apache/solr/core/RAMDirectoryFactory.java   |   47 +
 .../org/apache/solr/core/RequestHandlers.java   |  170 +
 .../org/apache/solr/core/RequestParams.java     |  269 +
 .../apache/solr/core/SchemaCodecFactory.java    |  125 +
 .../solr/core/ShutdownAwareDirectory.java       |   30 +
 .../solr/core/SimpleFSDirectoryFactory.java     |   42 +
 .../solr/core/SimpleTextCodecFactory.java       |   38 +
 .../java/org/apache/solr/core/SolrConfig.java   |  963 ++++
 .../java/org/apache/solr/core/SolrCore.java     | 3154 +++++++++++
 .../core/SolrCoreInitializationException.java   |   32 +
 .../java/org/apache/solr/core/SolrCores.java    |  568 ++
 .../apache/solr/core/SolrDeletionPolicy.java    |  237 +
 .../org/apache/solr/core/SolrEventListener.java |   59 +
 .../java/org/apache/solr/core/SolrInfoBean.java |   96 +
 .../apache/solr/core/SolrResourceLoader.java    |  918 ++++
 .../core/SolrResourceNotFoundException.java     |   38 +
 .../org/apache/solr/core/SolrXmlConfig.java     |  553 ++
 .../solr/core/StandardDirectoryFactory.java     |  165 +
 .../solr/core/StandardIndexReaderFactory.java   |   41 +
 .../solr/core/TransientSolrCoreCache.java       |  127 +
 .../core/TransientSolrCoreCacheDefault.java     |  198 +
 .../core/TransientSolrCoreCacheFactory.java     |   85 +
 .../TransientSolrCoreCacheFactoryDefault.java   |   31 +
 .../java/org/apache/solr/core/ZkContainer.java  |  247 +
 .../apache/solr/core/backup/BackupManager.java  |  292 +
 .../apache/solr/core/backup/package-info.java   |   22 +
 .../backup/repository/BackupRepository.java     |  184 +
 .../repository/BackupRepositoryFactory.java     |   88 +
 .../backup/repository/HdfsBackupRepository.java |  189 +
 .../repository/LocalFileSystemRepository.java   |  158 +
 .../core/backup/repository/package-info.java    |   23 +
 .../java/org/apache/solr/core/package-info.java |   23 +
 .../snapshots/CollectionSnapshotMetaData.java   |  242 +
 .../core/snapshots/SolrSnapshotManager.java     |  300 ++
 .../snapshots/SolrSnapshotMetaDataManager.java  |  416 ++
 .../solr/core/snapshots/SolrSnapshotsTool.java  |  467 ++
 .../solr/core/snapshots/package-info.java       |   22 +
 .../handler/AnalysisRequestHandlerBase.java     |  537 ++
 .../apache/solr/handler/AnalyzeEvaluator.java   |  111 +
 .../org/apache/solr/handler/BlobHandler.java    |  316 ++
 .../apache/solr/handler/CalciteJDBCStream.java  |   76 +
 .../apache/solr/handler/CdcrBufferManager.java  |   71 +
 .../solr/handler/CdcrBufferStateManager.java    |  174 +
 .../solr/handler/CdcrLeaderStateManager.java    |  160 +
 .../org/apache/solr/handler/CdcrParams.java     |  256 +
 .../solr/handler/CdcrProcessStateManager.java   |  174 +
 .../org/apache/solr/handler/CdcrReplicator.java |  251 +
 .../solr/handler/CdcrReplicatorManager.java     |  453 ++
 .../solr/handler/CdcrReplicatorScheduler.java   |  114 +
 .../solr/handler/CdcrReplicatorState.java       |  299 ++
 .../apache/solr/handler/CdcrRequestHandler.java |  861 +++
 .../apache/solr/handler/CdcrStateManager.java   |   47 +
 .../solr/handler/CdcrUpdateLogSynchronizer.java |  188 +
 .../org/apache/solr/handler/ClassifyStream.java |  229 +
 .../solr/handler/ContentStreamHandlerBase.java  |   86 +
 .../solr/handler/ContentStreamLoader.java       |   49 +
 .../handler/DocumentAnalysisRequestHandler.java |  346 ++
 .../apache/solr/handler/DumpRequestHandler.java |  126 +
 .../org/apache/solr/handler/ExportHandler.java  |   49 +
 .../handler/FieldAnalysisRequestHandler.java    |  233 +
 .../org/apache/solr/handler/GraphHandler.java   |  233 +
 .../solr/handler/HaversineMetersEvaluator.java  |   59 +
 .../org/apache/solr/handler/IndexFetcher.java   | 1900 +++++++
 .../solr/handler/MoreLikeThisHandler.java       |  519 ++
 .../solr/handler/NestedRequestHandler.java      |   28 +
 .../solr/handler/NotFoundRequestHandler.java    |   38 +
 .../apache/solr/handler/OldBackupDirectory.java |   69 +
 .../apache/solr/handler/PingRequestHandler.java |  343 ++
 .../apache/solr/handler/RealTimeGetHandler.java |   72 +
 .../apache/solr/handler/ReplicationHandler.java | 1826 +++++++
 .../apache/solr/handler/RequestHandlerBase.java |  329 ++
 .../solr/handler/RequestHandlerUtils.java       |  135 +
 .../org/apache/solr/handler/RestoreCore.java    |  162 +
 .../org/apache/solr/handler/SQLHandler.java     |  201 +
 .../org/apache/solr/handler/SchemaHandler.java  |  257 +
 .../org/apache/solr/handler/SnapShooter.java    |  308 ++
 .../apache/solr/handler/SolrConfigHandler.java  |  898 ++++
 .../solr/handler/SolrDefaultStreamFactory.java  |   54 +
 .../solr/handler/StandardRequestHandler.java    |   37 +
 .../org/apache/solr/handler/StreamHandler.java  |  449 ++
 .../solr/handler/UpdateRequestHandler.java      |  187 +
 .../solr/handler/UpdateRequestHandlerApi.java   |   73 +
 .../solr/handler/admin/AdminHandlersProxy.java  |  128 +
 .../admin/AutoscalingHistoryHandler.java        |  165 +
 .../apache/solr/handler/admin/BackupCoreOp.java |   74 +
 .../handler/admin/BaseHandlerApiSupport.java    |  196 +
 .../solr/handler/admin/ClusterStatus.java       |  246 +
 .../handler/admin/CollectionHandlerApi.java     |  130 +
 .../solr/handler/admin/CollectionsHandler.java  | 1383 +++++
 .../solr/handler/admin/ConfigSetsHandler.java   |  333 ++
 .../handler/admin/ConfigSetsHandlerApi.java     |   89 +
 .../solr/handler/admin/CoreAdminHandler.java    |  427 ++
 .../solr/handler/admin/CoreAdminHandlerApi.java |   85 +
 .../solr/handler/admin/CoreAdminOperation.java  |  368 ++
 .../solr/handler/admin/CreateSnapshotOp.java    |   58 +
 .../solr/handler/admin/DeleteSnapshotOp.java    |   51 +
 .../solr/handler/admin/HealthCheckHandler.java  |  110 +
 .../apache/solr/handler/admin/InfoHandler.java  |  157 +
 .../org/apache/solr/handler/admin/InvokeOp.java |   58 +
 .../solr/handler/admin/LoggingHandler.java      |  165 +
 .../solr/handler/admin/LukeRequestHandler.java  |  802 +++
 .../solr/handler/admin/MergeIndexesOp.java      |  142 +
 .../handler/admin/MetricsCollectorHandler.java  |  235 +
 .../solr/handler/admin/MetricsHandler.java      |  350 ++
 .../handler/admin/MetricsHistoryHandler.java    |  964 ++++
 .../solr/handler/admin/PluginInfoHandler.java   |   85 +
 .../solr/handler/admin/PrepRecoveryOp.java      |  191 +
 .../handler/admin/PropertiesRequestHandler.java |   78 +
 .../solr/handler/admin/RebalanceLeaders.java    |  328 ++
 .../handler/admin/RequestApplyUpdatesOp.java    |   71 +
 .../solr/handler/admin/RequestSyncShardOp.java  |   98 +
 .../solr/handler/admin/RestoreCoreOp.java       |   77 +
 .../solr/handler/admin/SecurityConfHandler.java |  318 ++
 .../handler/admin/SecurityConfHandlerLocal.java |  104 +
 .../handler/admin/SecurityConfHandlerZk.java    |   92 +
 .../admin/SegmentsInfoRequestHandler.java       |  131 +
 .../handler/admin/ShowFileRequestHandler.java   |  371 ++
 .../handler/admin/SolrInfoMBeanHandler.java     |  296 +
 .../org/apache/solr/handler/admin/SplitOp.java  |  169 +
 .../org/apache/solr/handler/admin/StatusOp.java |   58 +
 .../solr/handler/admin/SystemInfoHandler.java   |  416 ++
 .../solr/handler/admin/ThreadDumpHandler.java   |  139 +
 .../handler/admin/ZookeeperInfoHandler.java     |  857 +++
 .../handler/admin/ZookeeperStatusHandler.java   |  222 +
 .../apache/solr/handler/admin/package-info.java |   23 +
 .../solr/handler/component/DebugComponent.java  |  394 ++
 .../solr/handler/component/ExpandComponent.java |  828 +++
 .../solr/handler/component/FacetComponent.java  | 1570 ++++++
 .../solr/handler/component/FieldFacetStats.java |  203 +
 .../handler/component/HighlightComponent.java   |  299 ++
 .../handler/component/HttpShardHandler.java     |  512 ++
 .../component/HttpShardHandlerFactory.java      |  484 ++
 .../component/IterativeMergeStrategy.java       |  137 +
 .../solr/handler/component/MergeStrategy.java   |   75 +
 .../component/MoreLikeThisComponent.java        |  428 ++
 .../PhrasesIdentificationComponent.java         | 1129 ++++
 .../solr/handler/component/PivotFacet.java      |  163 +
 .../solr/handler/component/PivotFacetField.java |  397 ++
 .../PivotFacetFieldValueCollection.java         |  341 ++
 .../handler/component/PivotFacetHelper.java     |  189 +
 .../handler/component/PivotFacetProcessor.java  |  441 ++
 .../solr/handler/component/PivotFacetValue.java |  263 +
 .../solr/handler/component/QueryComponent.java  | 1481 +++++
 .../component/QueryElevationComponent.java      | 1134 ++++
 .../handler/component/RangeFacetProcessor.java  |  276 +
 .../handler/component/RangeFacetRequest.java    |  863 +++
 .../handler/component/RealTimeGetComponent.java | 1268 +++++
 .../component/ReplicaListTransformer.java       |   35 +
 .../solr/handler/component/ResponseBuilder.java |  495 ++
 .../handler/component/ResponseLogComponent.java |  118 +
 .../solr/handler/component/SearchComponent.java |  147 +
 .../solr/handler/component/SearchHandler.java   |  496 ++
 .../apache/solr/handler/component/ShardDoc.java |   84 +
 .../component/ShardFieldSortedHitQueue.java     |  165 +
 .../solr/handler/component/ShardHandler.java    |   27 +
 .../handler/component/ShardHandlerFactory.java  |   61 +
 .../solr/handler/component/ShardRequest.java    |   74 +
 .../solr/handler/component/ShardResponse.java   |   99 +
 .../ShufflingReplicaListTransformer.java        |   39 +
 .../component/SortedDateStatsValues.java        |   89 +
 .../component/SortedNumericStatsValues.java     |  106 +
 .../handler/component/SpatialHeatmapFacets.java |  157 +
 .../handler/component/SpellCheckComponent.java  |  870 +++
 .../handler/component/SpellCheckMergeData.java  |   52 +
 .../solr/handler/component/StatsComponent.java  |  255 +
 .../solr/handler/component/StatsField.java      |  754 +++
 .../solr/handler/component/StatsValues.java     |   81 +
 .../handler/component/StatsValuesFactory.java   |  865 +++
 .../handler/component/SuggestComponent.java     |  555 ++
 .../handler/component/TermVectorComponent.java  |  487 ++
 .../solr/handler/component/TermsComponent.java  |  690 +++
 .../solr/handler/component/package-info.java    |   24 +
 .../solr/handler/export/BoolFieldWriter.java    |   63 +
 .../solr/handler/export/DateFieldWriter.java    |   56 +
 .../apache/solr/handler/export/DoubleCmp.java   |   43 +
 .../solr/handler/export/DoubleFieldWriter.java  |   56 +
 .../apache/solr/handler/export/DoubleValue.java |  101 +
 .../solr/handler/export/DoubleValueSortDoc.java |  102 +
 .../solr/handler/export/ExportWriter.java       |  459 ++
 .../apache/solr/handler/export/FieldWriter.java |   27 +
 .../apache/solr/handler/export/FloatCmp.java    |   44 +
 .../solr/handler/export/FloatFieldWriter.java   |   56 +
 .../apache/solr/handler/export/FloatValue.java  |   98 +
 .../org/apache/solr/handler/export/IntComp.java |   45 +
 .../solr/handler/export/IntFieldWriter.java     |   55 +
 .../apache/solr/handler/export/IntValue.java    |   98 +
 .../org/apache/solr/handler/export/LongCmp.java |   45 +
 .../solr/handler/export/LongFieldWriter.java    |   55 +
 .../apache/solr/handler/export/LongValue.java   |   98 +
 .../solr/handler/export/MultiFieldWriter.java   |  104 +
 .../solr/handler/export/PriorityQueue.java      |  218 +
 .../solr/handler/export/QuadValueSortDoc.java   |  139 +
 .../solr/handler/export/SingleValueSortDoc.java |   89 +
 .../org/apache/solr/handler/export/SortDoc.java |  127 +
 .../apache/solr/handler/export/SortQueue.java   |   52 +
 .../apache/solr/handler/export/SortValue.java   |   38 +
 .../solr/handler/export/StringFieldWriter.java  |   62 +
 .../apache/solr/handler/export/StringValue.java |  119 +
 .../solr/handler/export/TripleValueSortDoc.java |  121 +
 .../solr/handler/export/package-info.java       |   23 +
 .../apache/solr/handler/loader/CSVLoader.java   |   45 +
 .../solr/handler/loader/CSVLoaderBase.java      |  393 ++
 .../handler/loader/ContentStreamLoader.java     |   55 +
 .../solr/handler/loader/JavabinLoader.java      |  220 +
 .../apache/solr/handler/loader/JsonLoader.java  |  716 +++
 .../apache/solr/handler/loader/XMLLoader.java   |  531 ++
 .../solr/handler/loader/package-info.java       |   23 +
 .../org/apache/solr/handler/package-info.java   |   23 +
 .../solr/handler/sql/CalciteSolrDriver.java     |   69 +
 .../apache/solr/handler/sql/LimitStream.java    |   89 +
 .../apache/solr/handler/sql/SolrAggregate.java  |  112 +
 .../apache/solr/handler/sql/SolrEnumerator.java |  147 +
 .../org/apache/solr/handler/sql/SolrFilter.java |  382 ++
 .../org/apache/solr/handler/sql/SolrMethod.java |   44 +
 .../apache/solr/handler/sql/SolrProject.java    |   64 +
 .../org/apache/solr/handler/sql/SolrRel.java    |  106 +
 .../org/apache/solr/handler/sql/SolrRules.java  |  248 +
 .../org/apache/solr/handler/sql/SolrSchema.java |  141 +
 .../org/apache/solr/handler/sql/SolrSort.java   |   79 +
 .../org/apache/solr/handler/sql/SolrTable.java  |  885 +++
 .../apache/solr/handler/sql/SolrTableScan.java  |   85 +
 .../handler/sql/SolrToEnumerableConverter.java  |  136 +
 .../sql/SolrToEnumerableConverterRule.java      |   39 +
 .../apache/solr/handler/sql/package-info.java   |   21 +
 .../solr/handler/tagger/OffsetCorrector.java    |  178 +
 .../solr/handler/tagger/TagClusterReducer.java  |  103 +
 .../org/apache/solr/handler/tagger/TagLL.java   |  176 +
 .../org/apache/solr/handler/tagger/Tagger.java  |  230 +
 .../handler/tagger/TaggerRequestHandler.java    |  397 ++
 .../solr/handler/tagger/TaggingAttribute.java   |   65 +
 .../handler/tagger/TaggingAttributeImpl.java    |   79 +
 .../solr/handler/tagger/TermPrefixCursor.java   |  189 +
 .../solr/handler/tagger/XmlOffsetCorrector.java |  113 +
 .../solr/handler/tagger/package-info.java       |   27 +
 .../highlight/BreakIteratorBoundaryScanner.java |   76 +
 .../apache/solr/highlight/DefaultEncoder.java   |   42 +
 .../solr/highlight/DefaultSolrHighlighter.java  |  991 ++++
 .../apache/solr/highlight/GapFragmenter.java    |   99 +
 .../solr/highlight/HighlightingPluginBase.java  |   87 +
 .../org/apache/solr/highlight/HtmlEncoder.java  |   42 +
 .../apache/solr/highlight/HtmlFormatter.java    |   48 +
 .../solr/highlight/LuceneRegexFragmenter.java   |  217 +
 .../solr/highlight/PostingsSolrHighlighter.java |   71 +
 .../apache/solr/highlight/RegexFragmenter.java  |   90 +
 .../highlight/ScoreOrderFragmentsBuilder.java   |   42 +
 .../solr/highlight/SimpleBoundaryScanner.java   |   45 +
 .../solr/highlight/SimpleFragListBuilder.java   |   44 +
 .../solr/highlight/SimpleFragmentsBuilder.java  |   42 +
 .../solr/highlight/SingleFragListBuilder.java   |   44 +
 .../solr/highlight/SolrBoundaryScanner.java     |   35 +
 .../org/apache/solr/highlight/SolrEncoder.java  |   43 +
 .../apache/solr/highlight/SolrFormatter.java    |   44 +
 .../solr/highlight/SolrFragListBuilder.java     |   42 +
 .../apache/solr/highlight/SolrFragmenter.java   |   44 +
 .../solr/highlight/SolrFragmentsBuilder.java    |   80 +
 .../apache/solr/highlight/SolrHighlighter.java  |  123 +
 .../solr/highlight/UnifiedSolrHighlighter.java  |  419 ++
 .../solr/highlight/WeightedFragListBuilder.java |   44 +
 .../org/apache/solr/highlight/package-info.java |   25 +
 .../solr/index/DefaultMergePolicyFactory.java   |   45 +
 .../index/LogByteSizeMergePolicyFactory.java    |   38 +
 .../solr/index/LogDocMergePolicyFactory.java    |   38 +
 .../apache/solr/index/MergePolicyFactory.java   |   40 +
 .../solr/index/MergePolicyFactoryArgs.java      |   66 +
 .../apache/solr/index/NoMergePolicyFactory.java |   34 +
 .../solr/index/SimpleMergePolicyFactory.java    |   42 +
 .../solr/index/SlowCompositeReaderWrapper.java  |  298 +
 .../apache/solr/index/SortingMergePolicy.java   |   44 +
 .../solr/index/SortingMergePolicyFactory.java   |   49 +
 .../solr/index/TieredMergePolicyFactory.java    |   38 +
 .../UninvertDocValuesMergePolicyFactory.java    |  219 +
 .../index/UpgradeIndexMergePolicyFactory.java   |   39 +
 .../solr/index/WrapperMergePolicyFactory.java   |  121 +
 .../apache/solr/index/hdfs/CheckHdfsIndex.java  |   79 +
 .../apache/solr/index/hdfs/package-info.java    |   22 +
 .../org/apache/solr/index/package-info.java     |   22 +
 .../org/apache/solr/internal/csv/CSVParser.java |  561 ++
 .../apache/solr/internal/csv/CSVPrinter.java    |  305 ++
 .../apache/solr/internal/csv/CSVStrategy.java   |  245 +
 .../org/apache/solr/internal/csv/CSVUtils.java  |  121 +
 .../apache/solr/internal/csv/CharBuffer.java    |  209 +
 .../internal/csv/ExtendedBufferedReader.java    |  315 ++
 .../apache/solr/internal/csv/package-info.java  |   23 +
 .../solr/internal/csv/writer/CSVConfig.java     |  283 +
 .../internal/csv/writer/CSVConfigGuesser.java   |  185 +
 .../solr/internal/csv/writer/CSVField.java      |  108 +
 .../solr/internal/csv/writer/CSVWriter.java     |  132 +
 .../solr/internal/csv/writer/package-info.java  |   23 +
 .../org/apache/solr/legacy/BBoxStrategy.java    |  705 +++
 .../org/apache/solr/legacy/BBoxValueSource.java |   98 +
 .../apache/solr/legacy/DistanceValueSource.java |  120 +
 .../apache/solr/legacy/LegacyDoubleField.java   |  174 +
 .../org/apache/solr/legacy/LegacyField.java     |   90 +
 .../org/apache/solr/legacy/LegacyFieldType.java |  149 +
 .../apache/solr/legacy/LegacyFloatField.java    |  174 +
 .../org/apache/solr/legacy/LegacyIntField.java  |  175 +
 .../org/apache/solr/legacy/LegacyLongField.java |  184 +
 .../solr/legacy/LegacyNumericRangeQuery.java    |  537 ++
 .../solr/legacy/LegacyNumericTokenStream.java   |  357 ++
 .../apache/solr/legacy/LegacyNumericType.java   |   34 +
 .../apache/solr/legacy/LegacyNumericUtils.java  |  510 ++
 .../apache/solr/legacy/PointVectorStrategy.java |  289 +
 .../solr/legacy/doc-files/nrq-formula-1.png     |  Bin 0 -> 3171 bytes
 .../solr/legacy/doc-files/nrq-formula-2.png     |  Bin 0 -> 3694 bytes
 .../org/apache/solr/legacy/package-info.java    |   21 +
 .../org/apache/solr/logging/CircularList.java   |  153 +
 .../org/apache/solr/logging/ListenerConfig.java |   35 +
 .../org/apache/solr/logging/LogWatcher.java     |  194 +
 .../apache/solr/logging/LogWatcherConfig.java   |   73 +
 .../org/apache/solr/logging/LoggerInfo.java     |   69 +
 .../apache/solr/logging/MDCLoggingContext.java  |  160 +
 .../org/apache/solr/logging/jul/JulInfo.java    |   72 +
 .../org/apache/solr/logging/jul/JulWatcher.java |  169 +
 .../apache/solr/logging/jul/RecordHandler.java  |   49 +
 .../apache/solr/logging/jul/package-info.java   |   22 +
 .../solr/logging/log4j2/Log4j2Watcher.java      |  293 +
 .../solr/logging/log4j2/package-info.java       |   22 +
 .../org/apache/solr/logging/package-info.java   |   22 +
 .../apache/solr/metrics/AggregateMetric.java    |  200 +
 .../solr/metrics/AltBufferPoolMetricSet.java    |   47 +
 .../metrics/FilteringSolrMetricReporter.java    |   59 +
 .../apache/solr/metrics/MetricSuppliers.java    |  363 ++
 .../org/apache/solr/metrics/MetricsMap.java     |  197 +
 .../solr/metrics/OperatingSystemMetricSet.java  |   46 +
 .../solr/metrics/SolrCoreContainerReporter.java |   47 +
 .../solr/metrics/SolrCoreMetricManager.java     |  232 +
 .../apache/solr/metrics/SolrCoreReporter.java   |   47 +
 .../org/apache/solr/metrics/SolrMetricInfo.java |  111 +
 .../apache/solr/metrics/SolrMetricManager.java  | 1180 ++++
 .../apache/solr/metrics/SolrMetricProducer.java |   35 +
 .../apache/solr/metrics/SolrMetricReporter.java |  126 +
 .../org/apache/solr/metrics/package-info.java   |   23 +
 .../metrics/reporters/ReporterClientCache.java  |   84 +
 .../metrics/reporters/SolrGangliaReporter.java  |  135 +
 .../metrics/reporters/SolrGraphiteReporter.java |  118 +
 .../solr/metrics/reporters/SolrJmxReporter.java |  243 +
 .../metrics/reporters/SolrSlf4jReporter.java    |  172 +
 .../reporters/jmx/JmxMetricsReporter.java       |  754 +++
 .../reporters/jmx/JmxObjectNameFactory.java     |  174 +
 .../metrics/reporters/jmx/package-info.java     |   21 +
 .../solr/metrics/reporters/package-info.java    |   22 +
 .../reporters/solr/SolrClusterReporter.java     |  295 +
 .../metrics/reporters/solr/SolrReporter.java    |  407 ++
 .../reporters/solr/SolrShardReporter.java       |  189 +
 .../metrics/reporters/solr/package-info.java    |   22 +
 .../apache/solr/metrics/rrd/SolrRrdBackend.java |  138 +
 .../solr/metrics/rrd/SolrRrdBackendFactory.java |  451 ++
 .../apache/solr/metrics/rrd/package-info.java   |   22 +
 .../main/java/org/apache/solr/package-info.java |   22 +
 .../java/org/apache/solr/parser/CharStream.java |   99 +
 .../org/apache/solr/parser/FastCharStream.java  |  129 +
 .../org/apache/solr/parser/ParseException.java  |  187 +
 .../org/apache/solr/parser/QueryParser.java     |  933 ++++
 .../java/org/apache/solr/parser/QueryParser.jj  |  352 ++
 .../solr/parser/QueryParserConstants.java       |  135 +
 .../solr/parser/QueryParserTokenManager.java    | 1619 ++++++
 .../apache/solr/parser/SolrQueryParserBase.java | 1257 +++++
 .../main/java/org/apache/solr/parser/Token.java |  131 +
 .../org/apache/solr/parser/TokenMgrError.java   |  147 +
 .../org/apache/solr/parser/package-info.java    |   23 +
 .../java/org/apache/solr/query/FilterQuery.java |   96 +
 .../org/apache/solr/query/SolrRangeQuery.java   |  510 ++
 .../java/org/apache/solr/query/package.html     |   27 +
 .../apache/solr/request/DocValuesFacets.java    |  396 ++
 .../org/apache/solr/request/DocValuesStats.java |  233 +
 .../org/apache/solr/request/IntervalFacets.java |  934 ++++
 .../solr/request/LocalSolrQueryRequest.java     |   71 +
 .../org/apache/solr/request/NumericFacets.java  |  523 ++
 .../request/PerSegmentSingleValuedFaceting.java |  427 ++
 .../solr/request/RegexBytesRefFilter.java       |   46 +
 .../org/apache/solr/request/SimpleFacets.java   | 1202 +++++
 .../apache/solr/request/SolrQueryRequest.java   |  139 +
 .../solr/request/SolrQueryRequestBase.java      |  217 +
 .../apache/solr/request/SolrRequestHandler.java |   65 +
 .../apache/solr/request/SolrRequestInfo.java    |  161 +
 .../solr/request/SubstringBytesRefFilter.java   |   52 +
 .../org/apache/solr/request/json/JSONUtil.java  |   79 +
 .../solr/request/json/JsonQueryConverter.java   |  141 +
 .../apache/solr/request/json/ObjectUtil.java    |  113 +
 .../apache/solr/request/json/RequestUtil.java   |  330 ++
 .../org/apache/solr/request/json/package.html   |   27 +
 .../solr/request/macro/MacroExpander.java       |  195 +
 .../org/apache/solr/request/macro/package.html  |   27 +
 .../org/apache/solr/request/package-info.java   |   23 +
 .../solr/response/BasicResultContext.java       |   75 +
 .../response/BinaryQueryResponseWriter.java     |   37 +
 .../solr/response/BinaryResponseWriter.java     |  179 +
 .../apache/solr/response/CSVResponseWriter.java |  505 ++
 .../org/apache/solr/response/DocsStreamer.java  |  228 +
 .../solr/response/GeoJSONResponseWriter.java    |  336 ++
 .../solr/response/GraphMLResponseWriter.java    |  162 +
 .../solr/response/JSONResponseWriter.java       |  277 +
 .../org/apache/solr/response/JSONWriter.java    |  181 +
 .../apache/solr/response/PHPResponseWriter.java |  122 +
 .../response/PHPSerializedResponseWriter.java   |  278 +
 .../solr/response/PythonResponseWriter.java     |  151 +
 .../solr/response/QueryResponseWriter.java      |   87 +
 .../solr/response/QueryResponseWriterUtil.java  |   76 +
 .../apache/solr/response/RawResponseWriter.java |  108 +
 .../org/apache/solr/response/ResultContext.java |   59 +
 .../solr/response/RubyResponseWriter.java       |   94 +
 .../solr/response/SchemaXmlResponseWriter.java  |   49 +
 .../apache/solr/response/SchemaXmlWriter.java   |  477 ++
 .../solr/response/SmileResponseWriter.java      |  204 +
 .../apache/solr/response/SolrQueryResponse.java |  363 ++
 .../solr/response/TextResponseWriter.java       |  189 +
 .../apache/solr/response/XMLResponseWriter.java |   49 +
 .../org/apache/solr/response/XMLWriter.java     |  370 ++
 .../solr/response/XSLTResponseWriter.java       |  137 +
 .../org/apache/solr/response/package-info.java  |   23 +
 .../transform/BaseEditorialTransformer.java     |   81 +
 .../response/transform/ChildDocTransformer.java |  253 +
 .../transform/ChildDocTransformerFactory.java   |  169 +
 .../transform/DocIdAugmenterFactory.java        |   60 +
 .../solr/response/transform/DocTransformer.java |  140 +
 .../response/transform/DocTransformers.java     |   98 +
 .../transform/ElevatedMarkerFactory.java        |   54 +
 .../transform/ExcludedMarkerFactory.java        |   57 +
 .../transform/ExplainAugmenterFactory.java      |  133 +
 .../transform/GeoTransformerFactory.java        |  231 +
 .../transform/RawValueTransformerFactory.java   |  165 +
 .../transform/RenameFieldTransformer.java       |   53 +
 .../solr/response/transform/ScoreAugmenter.java |   52 +
 .../transform/ShardAugmenterFactory.java        |   44 +
 .../transform/SubQueryAugmenterFactory.java     |  376 ++
 .../response/transform/TransformerFactory.java  |   55 +
 .../transform/ValueAugmenterFactory.java        |  103 +
 .../transform/ValueSourceAugmenter.java         |  105 +
 .../response/transform/WriteableGeoJSON.java    |   55 +
 .../solr/response/transform/package-info.java   |   23 +
 .../org/apache/solr/rest/BaseSolrResource.java  |  216 +
 .../java/org/apache/solr/rest/DELETEable.java   |   26 +
 .../main/java/org/apache/solr/rest/GETable.java |   26 +
 .../org/apache/solr/rest/ManagedResource.java   |  434 ++
 .../solr/rest/ManagedResourceObserver.java      |   37 +
 .../solr/rest/ManagedResourceStorage.java       |  534 ++
 .../java/org/apache/solr/rest/POSTable.java     |   26 +
 .../main/java/org/apache/solr/rest/PUTable.java |   26 +
 .../java/org/apache/solr/rest/RestManager.java  |  797 +++
 .../org/apache/solr/rest/SolrSchemaRestApi.java |   78 +
 .../java/org/apache/solr/rest/package-info.java |   23 +
 .../solr/rest/schema/FieldTypeXmlAdapter.java   |  186 +
 .../analysis/BaseManagedTokenFilterFactory.java |   82 +
 .../analysis/ManagedStopFilterFactory.java      |   95 +
 .../analysis/ManagedSynonymFilterFactory.java   |  443 ++
 .../ManagedSynonymGraphFilterFactory.java       |  438 ++
 .../schema/analysis/ManagedWordSetResource.java |  200 +
 .../solr/rest/schema/analysis/package-info.java |   23 +
 .../apache/solr/rest/schema/package-info.java   |   22 +
 .../apache/solr/schema/AbstractEnumField.java   |  313 ++
 .../solr/schema/AbstractSpatialFieldType.java   |  459 ++
 .../AbstractSpatialPrefixTreeFieldType.java     |  116 +
 .../solr/schema/AbstractSubTypeFieldType.java   |  138 +
 .../java/org/apache/solr/schema/BBoxField.java  |  196 +
 .../org/apache/solr/schema/BinaryField.java     |  108 +
 .../java/org/apache/solr/schema/BoolField.java  |  299 ++
 .../solr/schema/ClassicIndexSchemaFactory.java  |   38 +
 .../org/apache/solr/schema/CollationField.java  |  281 +
 .../apache/solr/schema/CoordinateFieldType.java |   49 +
 .../java/org/apache/solr/schema/CopyField.java  |   81 +
 .../org/apache/solr/schema/CurrencyField.java   |  111 +
 .../apache/solr/schema/CurrencyFieldType.java   |  672 +++
 .../org/apache/solr/schema/CurrencyValue.java   |  231 +
 .../org/apache/solr/schema/DatePointField.java  |  254 +
 .../org/apache/solr/schema/DateRangeField.java  |  180 +
 .../apache/solr/schema/DateValueFieldType.java  |   23 +
 .../apache/solr/schema/DoublePointField.java    |  164 +
 .../solr/schema/DoubleValueFieldType.java       |   23 +
 .../java/org/apache/solr/schema/EnumField.java  |  202 +
 .../org/apache/solr/schema/EnumFieldType.java   |  224 +
 .../solr/schema/ExchangeRateProvider.java       |   66 +
 .../apache/solr/schema/ExternalFileField.java   |  131 +
 .../solr/schema/ExternalFileFieldReloader.java  |   92 +
 .../org/apache/solr/schema/FieldProperties.java |  127 +
 .../java/org/apache/solr/schema/FieldType.java  | 1295 +++++
 .../solr/schema/FieldTypePluginLoader.java      |  432 ++
 .../solr/schema/FileExchangeRateProvider.java   |  230 +
 .../org/apache/solr/schema/FloatPointField.java |  164 +
 .../apache/solr/schema/FloatValueFieldType.java |   23 +
 .../org/apache/solr/schema/GeoHashField.java    |  103 +
 .../solr/schema/HasImplicitIndexAnalyzer.java   |   25 +
 .../org/apache/solr/schema/IndexSchema.java     | 1944 +++++++
 .../apache/solr/schema/IndexSchemaFactory.java  |  100 +
 .../org/apache/solr/schema/IntPointField.java   |  163 +
 .../apache/solr/schema/IntValueFieldType.java   |   24 +
 .../solr/schema/JsonPreAnalyzedParser.java      |  278 +
 .../solr/schema/LatLonPointSpatialField.java    |  306 ++
 .../java/org/apache/solr/schema/LatLonType.java |  598 +++
 .../org/apache/solr/schema/LongPointField.java  |  162 +
 .../apache/solr/schema/LongValueFieldType.java  |   23 +
 .../apache/solr/schema/ManagedIndexSchema.java  | 1393 +++++
 .../solr/schema/ManagedIndexSchemaFactory.java  |  413 ++
 .../java/org/apache/solr/schema/NumberType.java |   49 +
 .../apache/solr/schema/NumericFieldType.java    |  305 ++
 .../solr/schema/NumericValueFieldType.java      |   23 +
 .../schema/OpenExchangeRatesOrgProvider.java    |  294 +
 .../java/org/apache/solr/schema/PointField.java |  303 ++
 .../java/org/apache/solr/schema/PointType.java  |  310 ++
 .../apache/solr/schema/PreAnalyzedField.java    |  386 ++
 .../apache/solr/schema/PrimitiveFieldType.java  |   45 +
 .../org/apache/solr/schema/RandomSortField.java |  203 +
 .../schema/RptWithGeometrySpatialField.java     |  238 +
 .../org/apache/solr/schema/SchemaAware.java     |   37 +
 .../org/apache/solr/schema/SchemaField.java     |  431 ++
 .../org/apache/solr/schema/SchemaManager.java   |  435 ++
 .../apache/solr/schema/SimilarityFactory.java   |   87 +
 .../solr/schema/SimplePreAnalyzedParser.java    |  574 ++
 .../apache/solr/schema/SortableTextField.java   |  215 +
 .../schema/SpatialPointVectorFieldType.java     |  102 +
 .../apache/solr/schema/SpatialQueryable.java    |   34 +
 .../SpatialRecursivePrefixTreeFieldType.java    |   56 +
 .../SpatialTermQueryPrefixTreeFieldType.java    |   33 +
 .../java/org/apache/solr/schema/StrField.java   |  137 +
 .../org/apache/solr/schema/StrFieldSource.java  |   80 +
 .../java/org/apache/solr/schema/TextField.java  |  212 +
 .../org/apache/solr/schema/TrieDateField.java   |  104 +
 .../org/apache/solr/schema/TrieDoubleField.java |  136 +
 .../java/org/apache/solr/schema/TrieField.java  |  683 +++
 .../org/apache/solr/schema/TrieFloatField.java  |  136 +
 .../org/apache/solr/schema/TrieIntField.java    |  134 +
 .../org/apache/solr/schema/TrieLongField.java   |  134 +
 .../java/org/apache/solr/schema/UUIDField.java  |  104 +
 .../apache/solr/schema/ZkIndexSchemaReader.java |  223 +
 .../org/apache/solr/schema/package-info.java    |   24 +
 .../apache/solr/search/AbstractReRankQuery.java |   86 +
 .../org/apache/solr/search/AnalyticsQuery.java  |   80 +
 .../java/org/apache/solr/search/BitDocSet.java  |  388 ++
 .../solr/search/BitsFilteredDocIdSet.java       |   62 +
 .../solr/search/BitsFilteredPostingsEnum.java   |   57 +
 .../apache/solr/search/BoolQParserPlugin.java   |   58 +
 .../apache/solr/search/BoostQParserPlugin.java  |   87 +
 .../org/apache/solr/search/CacheConfig.java     |  161 +
 .../apache/solr/search/CacheRegenerator.java    |   42 +
 .../solr/search/CollapsingQParserPlugin.java    | 2843 ++++++++++
 .../solr/search/ComplexPhraseQParserPlugin.java |  175 +
 .../java/org/apache/solr/search/CursorMark.java |  296 +
 .../apache/solr/search/DelegatingCollector.java |   89 +
 .../org/apache/solr/search/DisMaxQParser.java   |  294 +
 .../apache/solr/search/DisMaxQParserPlugin.java |  118 +
 .../org/apache/solr/search/DocIterator.java     |   52 +
 .../java/org/apache/solr/search/DocList.java    |  141 +
 .../org/apache/solr/search/DocListAndSet.java   |   36 +
 .../java/org/apache/solr/search/DocSet.java     |  135 +
 .../java/org/apache/solr/search/DocSetBase.java |  266 +
 .../org/apache/solr/search/DocSetBuilder.java   |  215 +
 .../org/apache/solr/search/DocSetCollector.java |  176 +
 .../org/apache/solr/search/DocSetProducer.java  |   24 +
 .../java/org/apache/solr/search/DocSetUtil.java |  288 +
 .../java/org/apache/solr/search/DocSlice.java   |  182 +
 .../solr/search/EarlyTerminatingCollector.java  |   78 +
 .../EarlyTerminatingCollectorException.java     |   62 +
 .../EarlyTerminatingSortingCollector.java       |  132 +
 .../apache/solr/search/ExportQParserPlugin.java |  193 +
 .../solr/search/ExtendedDismaxQParser.java      | 1763 ++++++
 .../search/ExtendedDismaxQParserPlugin.java     |   33 +
 .../org/apache/solr/search/ExtendedQuery.java   |   39 +
 .../apache/solr/search/ExtendedQueryBase.java   |   81 +
 .../org/apache/solr/search/FastLRUCache.java    |  306 ++
 .../org/apache/solr/search/FieldParams.java     |   45 +
 .../apache/solr/search/FieldQParserPlugin.java  |   49 +
 .../java/org/apache/solr/search/Filter.java     |  145 +
 .../apache/solr/search/FilteredDocIdSet.java    |  114 +
 .../solr/search/FloatPayloadValueSource.java    |  225 +
 .../org/apache/solr/search/FunctionQParser.java |  459 ++
 .../solr/search/FunctionQParserPlugin.java      |   34 +
 .../solr/search/FunctionRangeQParserPlugin.java |   72 +
 .../apache/solr/search/FunctionRangeQuery.java  |   76 +
 .../solr/search/GraphTermsQParserPlugin.java    |  785 +++
 .../java/org/apache/solr/search/Grouping.java   | 1037 ++++
 .../java/org/apache/solr/search/HashDocSet.java |  310 ++
 .../apache/solr/search/HashQParserPlugin.java   |  376 ++
 .../solr/search/IGainTermsQParserPlugin.java    |  246 +
 .../java/org/apache/solr/search/Insanity.java   |  132 +
 .../apache/solr/search/JoinQParserPlugin.java   |  584 ++
 .../java/org/apache/solr/search/LFUCache.java   |  318 ++
 .../java/org/apache/solr/search/LRUCache.java   |  402 ++
 .../search/LegacyNumericRangeQueryBuilder.java  |  136 +
 .../org/apache/solr/search/LuceneQParser.java   |   58 +
 .../apache/solr/search/LuceneQParserPlugin.java |   40 +
 .../apache/solr/search/MaxScoreCollector.java   |   55 +
 .../org/apache/solr/search/MaxScoreQParser.java |   97 +
 .../solr/search/MaxScoreQParserPlugin.java      |   36 +
 .../apache/solr/search/NestedQParserPlugin.java |   73 +
 .../org/apache/solr/search/NoOpRegenerator.java |   37 +
 .../solr/search/PayloadCheckQParserPlugin.java  |  109 +
 .../solr/search/PayloadScoreQParserPlugin.java  |   92 +
 .../org/apache/solr/search/PointMerger.java     |  456 ++
 .../java/org/apache/solr/search/PostFilter.java |   46 +
 .../apache/solr/search/PrefixQParserPlugin.java |   45 +
 .../java/org/apache/solr/search/QParser.java    |  375 ++
 .../org/apache/solr/search/QParserPlugin.java   |  122 +
 .../org/apache/solr/search/QueryCommand.java    |  222 +
 .../org/apache/solr/search/QueryContext.java    |  109 +
 .../org/apache/solr/search/QueryParsing.java    |  381 ++
 .../org/apache/solr/search/QueryResult.java     |   85 +
 .../org/apache/solr/search/QueryResultKey.java  |  156 +
 .../java/org/apache/solr/search/QueryUtils.java |  143 +
 .../java/org/apache/solr/search/RankQuery.java  |   36 +
 .../apache/solr/search/RawQParserPlugin.java    |   47 +
 .../org/apache/solr/search/ReRankCollector.java |  188 +
 .../apache/solr/search/ReRankQParserPlugin.java |  134 +
 .../org/apache/solr/search/ReRankWeight.java    |   48 +
 .../org/apache/solr/search/ReturnFields.java    |   89 +
 .../org/apache/solr/search/ScoreFilter.java     |   21 +
 .../search/SignificantTermsQParserPlugin.java   |  269 +
 .../apache/solr/search/SimpleQParserPlugin.java |  245 +
 .../java/org/apache/solr/search/SolrCache.java  |  129 +
 .../org/apache/solr/search/SolrCacheBase.java   |  137 +
 .../solr/search/SolrConstantScoreQuery.java     |  139 +
 .../org/apache/solr/search/SolrCoreParser.java  |  108 +
 .../apache/solr/search/SolrDocumentFetcher.java |  787 +++
 .../apache/solr/search/SolrFieldCacheBean.java  |   77 +
 .../java/org/apache/solr/search/SolrFilter.java |   45 +
 .../apache/solr/search/SolrIndexSearcher.java   | 2462 +++++++++
 .../apache/solr/search/SolrQueryBuilder.java    |   34 +
 .../org/apache/solr/search/SolrQueryParser.java |   31 +
 .../solr/search/SolrQueryTimeoutImpl.java       |   90 +
 .../apache/solr/search/SolrReturnFields.java    |  509 ++
 .../solr/search/SolrSpanQueryBuilder.java       |   33 +
 .../java/org/apache/solr/search/SortSpec.java   |  112 +
 .../org/apache/solr/search/SortSpecParsing.java |  225 +
 .../org/apache/solr/search/SortedIntDocSet.java |  807 +++
 .../java/org/apache/solr/search/Sorting.java    |   79 +
 .../solr/search/SpatialBoxQParserPlugin.java    |   32 +
 .../solr/search/SpatialFilterQParser.java       |  100 +
 .../solr/search/SpatialFilterQParserPlugin.java |   58 +
 .../org/apache/solr/search/SpatialOptions.java  |   47 +
 .../java/org/apache/solr/search/StrParser.java  |  336 ++
 .../solr/search/SurroundQParserPlugin.java      |  103 +
 .../apache/solr/search/SwitchQParserPlugin.java |  194 +
 .../org/apache/solr/search/SyntaxError.java     |   30 +
 .../apache/solr/search/TermQParserPlugin.java   |   69 +
 .../apache/solr/search/TermsQParserPlugin.java  |  143 +
 .../TextLogisticRegressionQParserPlugin.java    |  287 +
 .../apache/solr/search/ValueSourceParser.java   | 1574 ++++++
 .../org/apache/solr/search/WrappedQuery.java    |   72 +
 .../apache/solr/search/XmlQParserPlugin.java    |  109 +
 .../solr/search/facet/AggValueSource.java       |   60 +
 .../org/apache/solr/search/facet/AvgAgg.java    |   58 +
 .../org/apache/solr/search/facet/BlockJoin.java |   75 +
 .../org/apache/solr/search/facet/CountAgg.java  |   35 +
 .../apache/solr/search/facet/FacetBucket.java   |  189 +
 .../solr/search/facet/FacetDebugInfo.java       |   93 +
 .../apache/solr/search/facet/FacetField.java    |  190 +
 .../solr/search/facet/FacetFieldMerger.java     |  234 +
 .../solr/search/facet/FacetFieldProcessor.java  |  736 +++
 .../facet/FacetFieldProcessorByArray.java       |  146 +
 .../facet/FacetFieldProcessorByArrayDV.java     |  338 ++
 .../facet/FacetFieldProcessorByArrayUIF.java    |   71 +
 .../FacetFieldProcessorByEnumTermsStream.java   |  369 ++
 .../facet/FacetFieldProcessorByHashDV.java      |  481 ++
 .../apache/solr/search/facet/FacetHeatmap.java  |  520 ++
 .../apache/solr/search/facet/FacetMerger.java   |  152 +
 .../apache/solr/search/facet/FacetModule.java   |  498 ++
 .../solr/search/facet/FacetProcessor.java       |  495 ++
 .../apache/solr/search/facet/FacetQuery.java    |   69 +
 .../apache/solr/search/facet/FacetRange.java    |  887 +++
 .../solr/search/facet/FacetRangeMerger.java     |  167 +
 .../apache/solr/search/facet/FacetRequest.java  | 1034 ++++
 .../search/facet/FacetRequestSortedMerger.java  |  315 ++
 .../org/apache/solr/search/facet/FieldUtil.java |  208 +
 .../org/apache/solr/search/facet/HLLAgg.java    |  250 +
 .../apache/solr/search/facet/LegacyFacet.java   |  318 ++
 .../org/apache/solr/search/facet/MinMaxAgg.java |  350 ++
 .../apache/solr/search/facet/PercentileAgg.java |  224 +
 .../solr/search/facet/RelatednessAgg.java       |  504 ++
 .../solr/search/facet/SimpleAggValueSource.java |   62 +
 .../org/apache/solr/search/facet/SlotAcc.java   |  679 +++
 .../org/apache/solr/search/facet/StddevAgg.java |   66 +
 .../solr/search/facet/StrAggValueSource.java    |   50 +
 .../org/apache/solr/search/facet/SumAgg.java    |   52 +
 .../org/apache/solr/search/facet/SumsqAgg.java  |   37 +
 .../solr/search/facet/UnInvertedField.java      |  641 +++
 .../org/apache/solr/search/facet/UniqueAgg.java |  265 +
 .../solr/search/facet/UniqueBlockAgg.java       |   91 +
 .../solr/search/facet/UniqueMultiDvSlotAcc.java |   93 +
 .../search/facet/UniqueMultivaluedSlotAcc.java  |   71 +
 .../search/facet/UniqueSinglevaluedSlotAcc.java |   97 +
 .../apache/solr/search/facet/UniqueSlotAcc.java |  152 +
 .../apache/solr/search/facet/VarianceAgg.java   |   65 +
 .../org/apache/solr/search/facet/package.html   |   28 +
 .../search/function/CollapseScoreFunction.java  |   73 +
 .../search/function/ConcatStringFunction.java   |   53 +
 .../solr/search/function/EqualFunction.java     |   61 +
 .../search/function/FieldNameValueSource.java   |   60 +
 .../solr/search/function/FileFloatSource.java   |  368 ++
 .../search/function/MultiStringFunction.java    |  146 +
 .../solr/search/function/OrdFieldSource.java    |  174 +
 .../search/function/ReverseOrdFieldSource.java  |  134 +
 .../function/SolrComparisonBoolFunction.java    |   60 +
 .../search/function/ValueSourceRangeFilter.java |  147 +
 .../distance/GeoDistValueSourceParser.java      |  212 +
 .../function/distance/GeohashFunction.java      |   99 +
 .../distance/GeohashHaversineFunction.java      |  133 +
 .../distance/HaversineConstFunction.java        |  118 +
 .../function/distance/HaversineFunction.java    |  152 +
 .../distance/SquaredEuclideanFunction.java      |   74 +
 .../distance/StringDistanceFunction.java        |  109 +
 .../distance/VectorDistanceFunction.java        |  217 +
 .../search/function/distance/package-info.java  |   24 +
 .../solr/search/function/package-info.java      |   24 +
 .../apache/solr/search/grouping/Command.java    |   72 +
 .../solr/search/grouping/CommandHandler.java    |  257 +
 .../search/grouping/GroupingSpecification.java  |  177 +
 .../grouping/collector/FilterCollector.java     |   69 +
 .../search/grouping/collector/package-info.java |   22 +
 .../distributed/ShardRequestFactory.java        |   38 +
 .../distributed/ShardResponseProcessor.java     |   38 +
 .../distributed/command/GroupConverter.java     |  160 +
 .../distributed/command/QueryCommand.java       |  179 +
 .../distributed/command/QueryCommandResult.java |   47 +
 .../command/SearchGroupsFieldCommand.java       |  158 +
 .../command/SearchGroupsFieldCommandResult.java |   44 +
 .../command/TopGroupsFieldCommand.java          |  203 +
 .../distributed/command/package-info.java       |   22 +
 .../grouping/distributed/package-info.java      |   23 +
 .../SearchGroupsRequestFactory.java             |   82 +
 .../StoredFieldsShardRequestFactory.java        |   99 +
 .../TopGroupsShardRequestFactory.java           |  141 +
 .../requestfactory/package-info.java            |   23 +
 .../SearchGroupShardResponseProcessor.java      |  156 +
 .../StoredFieldsShardResponseProcessor.java     |   53 +
 .../TopGroupsShardResponseProcessor.java        |  218 +
 .../responseprocessor/package-info.java         |   23 +
 .../SearchGroupsResultTransformer.java          |  129 +
 .../ShardResultTransformer.java                 |   53 +
 .../ShardResultTransformerUtils.java            |   51 +
 .../TopGroupsResultTransformer.java             |  289 +
 .../shardresultserializer/package-info.java     |   23 +
 .../EndResultTransformer.java                   |   51 +
 .../GroupedEndResultTransformer.java            |  113 +
 .../MainEndResultTransformer.java               |   59 +
 .../SimpleEndResultTransformer.java             |   71 +
 .../endresulttransformer/package-info.java      |   23 +
 .../solr/search/grouping/package-info.java      |   25 +
 .../apache/solr/search/join/BitSetSlice.java    |   45 +
 .../solr/search/join/BlockJoinChildQParser.java |   56 +
 .../join/BlockJoinChildQParserPlugin.java       |   35 +
 .../join/BlockJoinDocSetFacetComponent.java     |  195 +
 .../search/join/BlockJoinFacetAccsHolder.java   |   83 +
 .../search/join/BlockJoinFacetComponent.java    |   23 +
 .../join/BlockJoinFacetComponentSupport.java    |  158 +
 .../solr/search/join/BlockJoinFacetFilter.java  |   90 +
 .../join/BlockJoinFieldFacetAccumulator.java    |  235 +
 .../search/join/BlockJoinParentQParser.java     |  166 +
 .../join/BlockJoinParentQParserPlugin.java      |   43 +
 .../join/ChildFieldValueSourceParser.java       |  198 +
 .../apache/solr/search/join/FiltersQParser.java |  154 +
 .../solr/search/join/FiltersQParserPlugin.java  |   33 +
 .../apache/solr/search/join/FrontierQuery.java  |   48 +
 .../solr/search/join/GraphPointsCollector.java  |  123 +
 .../solr/search/join/GraphQParserPlugin.java    |   38 +
 .../org/apache/solr/search/join/GraphQuery.java |  445 ++
 .../solr/search/join/GraphQueryParser.java      |   99 +
 .../solr/search/join/GraphTermsCollector.java   |  202 +
 .../search/join/ScoreJoinQParserPlugin.java     |  334 ++
 .../solr/search/join/ScoreModeParser.java       |   53 +
 .../apache/solr/search/join/package-info.java   |   23 +
 .../apache/solr/search/mlt/CloudMLTQParser.java |  209 +
 .../solr/search/mlt/MLTQParserPlugin.java       |   38 +
 .../solr/search/mlt/SimpleMLTQParser.java       |  167 +
 .../apache/solr/search/mlt/package-info.java    |   23 +
 .../org/apache/solr/search/package-info.java    |   23 +
 .../similarities/BM25SimilarityFactory.java     |   61 +
 .../similarities/ClassicSimilarityFactory.java  |   64 +
 .../similarities/DFISimilarityFactory.java      |   75 +
 .../similarities/DFRSimilarityFactory.java      |  179 +
 .../similarities/IBSimilarityFactory.java       |  110 +
 .../LMDirichletSimilarityFactory.java           |   59 +
 .../LMJelinekMercerSimilarityFactory.java       |   58 +
 .../similarities/SchemaSimilarityFactory.java   |  157 +
 .../SweetSpotSimilarityFactory.java             |  185 +
 .../solr/search/similarities/package-info.java  |   25 +
 .../solr/search/stats/CachedSearcherStats.java  |   22 +
 .../solr/search/stats/CollectionStats.java      |   70 +
 .../search/stats/ExactSharedStatsCache.java     |   87 +
 .../solr/search/stats/ExactStatsCache.java      |  351 ++
 .../apache/solr/search/stats/LRUStatsCache.java |  158 +
 .../solr/search/stats/LocalStatsCache.java      |   81 +
 .../solr/search/stats/LocalStatsSource.java     |   47 +
 .../apache/solr/search/stats/StatsCache.java    |  118 +
 .../apache/solr/search/stats/StatsSource.java   |   42 +
 .../org/apache/solr/search/stats/StatsUtil.java |  224 +
 .../org/apache/solr/search/stats/TermStats.java |   73 +
 .../apache/solr/search/stats/package-info.java  |   23 +
 .../security/AttributeOnlyServletContext.java   |  291 +
 .../solr/security/AuthenticationPlugin.java     |   62 +
 .../solr/security/AuthorizationContext.java     |   62 +
 .../solr/security/AuthorizationPlugin.java      |   30 +
 .../solr/security/AuthorizationResponse.java    |   40 +
 .../security/AutorizationEditOperation.java     |  173 +
 .../apache/solr/security/BasicAuthPlugin.java   |  187 +
 .../solr/security/ConfigEditablePlugin.java     |   38 +
 .../ConfigurableInternodeAuthHadoopPlugin.java  |   68 +
 .../security/DelegationTokenKerberosFilter.java |  252 +
 .../apache/solr/security/HadoopAuthFilter.java  |  230 +
 .../apache/solr/security/HadoopAuthPlugin.java  |  279 +
 .../solr/security/HttpClientBuilderPlugin.java  |   37 +
 .../apache/solr/security/KerberosFilter.java    |   62 +
 .../apache/solr/security/KerberosPlugin.java    |  267 +
 .../solr/security/PKIAuthenticationPlugin.java  |  303 ++
 .../org/apache/solr/security/Permission.java    |  159 +
 .../solr/security/PermissionNameProvider.java   |   79 +
 .../solr/security/PrintWriterWrapper.java       |  215 +
 .../apache/solr/security/PublicKeyHandler.java  |   47 +
 ...tContinuesRecorderAuthenticationHandler.java |   71 +
 .../security/RuleBasedAuthorizationPlugin.java  |  244 +
 .../solr/security/SecurityPluginHolder.java     |   33 +
 .../security/Sha256AuthenticationProvider.java  |  168 +
 .../org/apache/solr/security/package-info.java  |   22 +
 .../org/apache/solr/servlet/BaseSolrFilter.java |   33 +
 .../apache/solr/servlet/BaseSolrServlet.java    |   34 +
 .../solr/servlet/CheckLoggingConfiguration.java |   38 +
 .../solr/servlet/DirectSolrConnection.java      |  153 +
 .../org/apache/solr/servlet/HttpSolrCall.java   | 1116 ++++
 .../apache/solr/servlet/LoadAdminUiServlet.java |   89 +
 .../apache/solr/servlet/RedirectServlet.java    |   63 +
 .../org/apache/solr/servlet/ResponseUtils.java  |   84 +
 .../solr/servlet/ServletInputStreamWrapper.java |  105 +
 .../servlet/ServletOutputStreamWrapper.java     |  140 +
 .../apache/solr/servlet/SolrDispatchFilter.java |  597 +++
 .../apache/solr/servlet/SolrRequestParsers.java |  879 +++
 .../solr/servlet/cache/HttpCacheHeaderUtil.java |  343 ++
 .../org/apache/solr/servlet/cache/Method.java   |   31 +
 .../apache/solr/servlet/cache/package-info.java |   23 +
 .../org/apache/solr/servlet/package-info.java   |   23 +
 .../spelling/AbstractLuceneSpellChecker.java    |  278 +
 .../spelling/ConjunctionSolrSpellChecker.java   |  225 +
 .../solr/spelling/DirectSolrSpellChecker.java   |  229 +
 .../solr/spelling/FileBasedSpellChecker.java    |  138 +
 .../solr/spelling/IndexBasedSpellChecker.java   |  115 +
 .../solr/spelling/PossibilityIterator.java      |  427 ++
 .../apache/solr/spelling/QueryConverter.java    |   99 +
 .../org/apache/solr/spelling/ResultEntry.java   |   54 +
 .../apache/solr/spelling/SolrSpellChecker.java  |  198 +
 .../solr/spelling/SpellCheckCollation.java      |   68 +
 .../solr/spelling/SpellCheckCollator.java       |  279 +
 .../solr/spelling/SpellCheckCorrection.java     |   57 +
 .../apache/solr/spelling/SpellingOptions.java   |  107 +
 .../solr/spelling/SpellingQueryConverter.java   |  201 +
 .../apache/solr/spelling/SpellingResult.java    |  145 +
 .../solr/spelling/SuggestQueryConverter.java    |   43 +
 .../java/org/apache/solr/spelling/Token.java    |  175 +
 .../spelling/WordBreakSolrSpellChecker.java     |  360 ++
 .../org/apache/solr/spelling/package-info.java  |   25 +
 .../spelling/suggest/DictionaryFactory.java     |   47 +
 .../suggest/DocumentDictionaryFactory.java      |   55 +
 .../DocumentExpressionDictionaryFactory.java    |  101 +
 .../spelling/suggest/FileDictionaryFactory.java |   61 +
 .../suggest/HighFrequencyDictionaryFactory.java |   52 +
 .../solr/spelling/suggest/LookupFactory.java    |   66 +
 .../solr/spelling/suggest/SolrSuggester.java    |  300 ++
 .../apache/solr/spelling/suggest/Suggester.java |  227 +
 .../solr/spelling/suggest/SuggesterOptions.java |   49 +
 .../solr/spelling/suggest/SuggesterParams.java  |   85 +
 .../solr/spelling/suggest/SuggesterResult.java  |   80 +
 .../fst/AnalyzingInfixLookupFactory.java        |  150 +
 .../suggest/fst/AnalyzingLookupFactory.java     |  130 +
 .../suggest/fst/BlendedInfixLookupFactory.java  |  156 +
 .../spelling/suggest/fst/FSTLookupFactory.java  |   69 +
 .../suggest/fst/FreeTextLookupFactory.java      |   85 +
 .../suggest/fst/FuzzyLookupFactory.java         |  146 +
 .../spelling/suggest/fst/WFSTLookupFactory.java |   55 +
 .../solr/spelling/suggest/fst/package-info.java |   23 +
 .../suggest/jaspell/JaspellLookupFactory.java   |   47 +
 .../spelling/suggest/jaspell/package-info.java  |   23 +
 .../solr/spelling/suggest/package-info.java     |   25 +
 .../spelling/suggest/tst/TSTLookupFactory.java  |   40 +
 .../solr/spelling/suggest/tst/package-info.java |   23 +
 .../solr/store/blockcache/BlockCache.java       |  262 +
 .../solr/store/blockcache/BlockCacheKey.java    |   84 +
 .../store/blockcache/BlockCacheLocation.java    |   78 +
 .../solr/store/blockcache/BlockDirectory.java   |  371 ++
 .../store/blockcache/BlockDirectoryCache.java   |  133 +
 .../solr/store/blockcache/BlockLocks.java       |   98 +
 .../solr/store/blockcache/BufferStore.java      |  133 +
 .../org/apache/solr/store/blockcache/Cache.java |   69 +
 .../store/blockcache/CachedIndexOutput.java     |   86 +
 .../blockcache/CustomBufferedIndexInput.java    |  283 +
 .../apache/solr/store/blockcache/Metrics.java   |  140 +
 .../blockcache/ReusedBufferedIndexOutput.java   |  164 +
 .../org/apache/solr/store/blockcache/Store.java |   28 +
 .../solr/store/blockcache/package-info.java     |   23 +
 .../apache/solr/store/hdfs/HdfsDirectory.java   |  277 +
 .../apache/solr/store/hdfs/HdfsFileWriter.java  |   56 +
 .../solr/store/hdfs/HdfsLocalityReporter.java   |  198 +
 .../apache/solr/store/hdfs/HdfsLockFactory.java |  130 +
 .../apache/solr/store/hdfs/package-info.java    |   22 +
 .../apache/solr/uninverting/DocTermOrds.java    |  898 ++++
 .../org/apache/solr/uninverting/FieldCache.java |  447 ++
 .../apache/solr/uninverting/FieldCacheImpl.java | 1254 +++++
 .../solr/uninverting/UninvertingReader.java     |  466 ++
 .../apache/solr/uninverting/package-info.java   |   21 +
 .../apache/solr/update/AddUpdateCommand.java    |  282 +
 .../apache/solr/update/CdcrTransactionLog.java  |  399 ++
 .../org/apache/solr/update/CdcrUpdateLog.java   |  792 +++
 .../org/apache/solr/update/CommitTracker.java   |  329 ++
 .../apache/solr/update/CommitUpdateCommand.java |   59 +
 .../solr/update/DefaultSolrCoreState.java       |  456 ++
 .../solr/update/DeleteByQueryWrapper.java       |  119 +
 .../apache/solr/update/DeleteUpdateCommand.java |  114 +
 .../solr/update/DirectUpdateHandler2.java       | 1012 ++++
 .../org/apache/solr/update/DocumentBuilder.java |  319 ++
 .../apache/solr/update/HdfsTransactionLog.java  |  666 +++
 .../org/apache/solr/update/HdfsUpdateLog.java   |  426 ++
 .../apache/solr/update/IndexFingerprint.java    |  221 +
 .../apache/solr/update/LoggingInfoStream.java   |   45 +
 .../org/apache/solr/update/MemOutputStream.java |   52 +
 .../apache/solr/update/MergeIndexesCommand.java |   51 +
 .../java/org/apache/solr/update/PeerSync.java   |  878 +++
 .../apache/solr/update/PeerSyncWithLeader.java  |  372 ++
 .../solr/update/RollbackUpdateCommand.java      |   40 +
 .../apache/solr/update/SolrCmdDistributor.java  |  678 +++
 .../org/apache/solr/update/SolrCoreState.java   |  206 +
 .../org/apache/solr/update/SolrIndexConfig.java |  315 ++
 .../apache/solr/update/SolrIndexSplitter.java   |  694 +++
 .../org/apache/solr/update/SolrIndexWriter.java |  349 ++
 .../apache/solr/update/SplitIndexCommand.java   |   77 +
 .../solr/update/StreamingSolrClients.java       |  178 +
 .../org/apache/solr/update/TransactionLog.java  |  916 ++++
 .../org/apache/solr/update/UpdateCommand.java   |  101 +
 .../org/apache/solr/update/UpdateHandler.java   |  240 +
 .../java/org/apache/solr/update/UpdateLog.java  | 2213 ++++++++
 .../apache/solr/update/UpdateShardHandler.java  |  226 +
 .../solr/update/UpdateShardHandlerConfig.java   |   76 +
 .../org/apache/solr/update/VersionBucket.java   |   32 +
 .../org/apache/solr/update/VersionInfo.java     |  304 ++
 .../org/apache/solr/update/package-info.java    |   23 +
 ...tractDefaultValueUpdateProcessorFactory.java |  100 +
 .../AddSchemaFieldsUpdateProcessorFactory.java  |  562 ++
 ...aluesOrNoneFieldMutatingUpdateProcessor.java |  118 +
 .../processor/AtomicUpdateDocumentMerger.java   |  446 ++
 .../processor/AtomicUpdateProcessorFactory.java |  194 +
 .../update/processor/CdcrUpdateProcessor.java   |  130 +
 .../processor/CdcrUpdateProcessorFactory.java   |   46 +
 .../ClassificationUpdateProcessor.java          |  117 +
 .../ClassificationUpdateProcessorFactory.java   |  167 +
 .../ClassificationUpdateProcessorParams.java    |  112 +
 .../CloneFieldUpdateProcessorFactory.java       |  473 ++
 .../ConcatFieldUpdateProcessorFactory.java      |  116 +
 .../CountFieldValuesUpdateProcessorFactory.java |   81 +
 .../DefaultValueUpdateProcessorFactory.java     |   83 +
 .../processor/DistributedUpdateProcessor.java   | 2156 ++++++++
 .../DistributedUpdateProcessorFactory.java      |   57 +
 .../DistributingUpdateProcessorFactory.java     |   37 +
 .../DocBasedVersionConstraintsProcessor.java    |  515 ++
 ...BasedVersionConstraintsProcessorFactory.java |  196 +
 .../DocExpirationUpdateProcessorFactory.java    |  512 ++
 .../FieldLengthUpdateProcessorFactory.java      |   79 +
 .../processor/FieldMutatingUpdateProcessor.java |  301 ++
 .../FieldMutatingUpdateProcessorFactory.java    |  244 +
 ...FieldNameMutatingUpdateProcessorFactory.java |   99 +
 .../FieldValueMutatingUpdateProcessor.java      |   98 +
 .../FieldValueSubsetUpdateProcessorFactory.java |   56 +
 .../FirstFieldValueUpdateProcessorFactory.java  |   65 +
 .../HTMLStripFieldUpdateProcessorFactory.java   |   84 +
 ...oreCommitOptimizeUpdateProcessorFactory.java |  148 +
 .../IgnoreFieldUpdateProcessorFactory.java      |   76 +
 .../IgnoreLargeDocumentProcessorFactory.java    |  175 +
 .../LastFieldValueUpdateProcessorFactory.java   |   81 +
 .../processor/LogUpdateProcessorFactory.java    |  219 +
 .../solr/update/processor/Lookup3Signature.java |   36 +
 .../solr/update/processor/MD5Signature.java     |   49 +
 .../MaxFieldValueUpdateProcessorFactory.java    |   79 +
 .../MinFieldValueUpdateProcessorFactory.java    |   79 +
 .../processor/NestedUpdateProcessorFactory.java |  137 +
 .../NoOpDistributingUpdateProcessorFactory.java |   45 +
 ...ParseBooleanFieldUpdateProcessorFactory.java |  153 +
 .../ParseDateFieldUpdateProcessorFactory.java   |  273 +
 .../ParseDoubleFieldUpdateProcessorFactory.java |  122 +
 .../ParseFloatFieldUpdateProcessorFactory.java  |  123 +
 .../ParseIntFieldUpdateProcessorFactory.java    |  124 +
 .../ParseLongFieldUpdateProcessorFactory.java   |  118 +
 ...ParseNumericFieldUpdateProcessorFactory.java |   79 +
 .../PreAnalyzedUpdateProcessorFactory.java      |  173 +
 .../processor/RegexReplaceProcessorFactory.java |  142 +
 .../update/processor/RegexpBoostProcessor.java  |  211 +
 .../processor/RegexpBoostProcessorFactory.java  |   53 +
 .../RemoveBlankFieldUpdateProcessorFactory.java |   70 +
 .../processor/RunUpdateProcessorFactory.java    |  122 +
 .../processor/ScriptEngineCustomizer.java       |   28 +
 .../apache/solr/update/processor/Signature.java |   27 +
 .../SignatureUpdateProcessorFactory.java        |  204 +
 .../processor/SimpleUpdateProcessorFactory.java |  100 +
 .../SkipExistingDocumentsProcessorFactory.java  |  259 +
 .../StatelessScriptUpdateProcessorFactory.java  |  500 ++
 .../TemplateUpdateProcessorFactory.java         |  128 +
 .../update/processor/TextProfileSignature.java  |  161 +
 .../TimeRoutedAliasUpdateProcessor.java         |  516 ++
 .../TimestampUpdateProcessorFactory.java        |   66 +
 .../processor/TolerantUpdateProcessor.java      |  409 ++
 .../TolerantUpdateProcessorFactory.java         |  142 +
 .../TrimFieldUpdateProcessorFactory.java        |   67 +
 .../TruncateFieldUpdateProcessorFactory.java    |  101 +
 .../update/processor/URLClassifyProcessor.java  |  229 +
 .../processor/URLClassifyProcessorFactory.java  |   45 +
 .../processor/UUIDUpdateProcessorFactory.java   |  113 +
 .../UniqFieldsUpdateProcessorFactory.java       |   73 +
 .../processor/UpdateRequestProcessor.java       |  103 +
 .../processor/UpdateRequestProcessorChain.java  |  369 ++
 .../UpdateRequestProcessorFactory.java          |   50 +
 .../solr/update/processor/package-info.java     |   24 +
 .../apache/solr/util/AdjustableSemaphore.java   |   78 +
 .../org/apache/solr/util/BoundedTreeSet.java    |   68 +
 .../apache/solr/util/ConcurrentLFUCache.java    |  483 ++
 .../apache/solr/util/ConcurrentLRUCache.java    |  754 +++
 .../java/org/apache/solr/util/CryptoKeys.java   |  355 ++
 .../main/java/org/apache/solr/util/DOMUtil.java |  414 ++
 .../org/apache/solr/util/DateMathParser.java    |  433 ++
 .../solr/util/DefaultSolrThreadFactory.java     |   49 +
 .../org/apache/solr/util/DistanceUnits.java     |  128 +
 .../java/org/apache/solr/util/FSHDFSUtils.java  |  208 +
 .../java/org/apache/solr/util/FileUtils.java    |  119 +
 .../java/org/apache/solr/util/HdfsUtil.java     |   58 +
 .../java/org/apache/solr/util/IOFunction.java   |   29 +
 .../main/java/org/apache/solr/util/IdUtils.java |   56 +
 .../main/java/org/apache/solr/util/JmxUtil.java |   75 +
 .../java/org/apache/solr/util/LongIterator.java |   34 +
 .../org/apache/solr/util/LongPriorityQueue.java |  234 +
 .../main/java/org/apache/solr/util/LongSet.java |  137 +
 .../java/org/apache/solr/util/MapListener.java  |   58 +
 .../java/org/apache/solr/util/NumberUtils.java  |  217 +
 .../org/apache/solr/util/OrderedExecutor.java   |  116 +
 .../java/org/apache/solr/util/PayloadUtils.java |  143 +
 .../org/apache/solr/util/PivotListEntry.java    |   86 +
 .../java/org/apache/solr/util/PrimUtils.java    |  122 +
 .../apache/solr/util/PropertiesInputStream.java |   50 +
 .../solr/util/PropertiesOutputStream.java       |   43 +
 .../org/apache/solr/util/PropertiesUtil.java    |  151 +
 .../main/java/org/apache/solr/util/RTimer.java  |   99 +
 .../java/org/apache/solr/util/RTimerTree.java   |   91 +
 .../apache/solr/util/RecordingJSONParser.java   |  110 +
 .../org/apache/solr/util/RedactionUtils.java    |   51 +
 .../java/org/apache/solr/util/RefCounted.java   |   61 +
 .../org/apache/solr/util/RegexFileFilter.java   |   44 +
 .../org/apache/solr/util/SafeXMLParsing.java    |  120 +
 .../org/apache/solr/util/SimplePostTool.java    | 1267 +++++
 .../main/java/org/apache/solr/util/SolrCLI.java | 4437 +++++++++++++++
 .../solr/util/SolrFileCleaningTracker.java      |  147 +
 .../org/apache/solr/util/SolrLogLayout.java     |  381 ++
 .../org/apache/solr/util/SolrPluginUtils.java   | 1121 ++++
 .../java/org/apache/solr/util/SpatialUtils.java |  164 +
 .../apache/solr/util/StartupLoggingUtils.java   |  142 +
 .../org/apache/solr/util/SystemIdResolver.java  |  175 +
 .../org/apache/solr/util/TestInjection.java     |  510 ++
 .../main/java/org/apache/solr/util/TimeOut.java |   68 +
 .../org/apache/solr/util/TimeZoneUtils.java     |  103 +
 .../org/apache/solr/util/VersionedFile.java     |  116 +
 .../util/configuration/SSLConfigurations.java   |  124 +
 .../configuration/SSLConfigurationsFactory.java |   49 +
 .../configuration/SSLCredentialProvider.java    |   36 +
 .../SSLCredentialProviderFactory.java           |   93 +
 .../solr/util/configuration/package-info.java   |   23 +
 .../AbstractSSLCredentialProvider.java          |   56 +
 .../providers/EnvSSLCredentialProvider.java     |   72 +
 .../providers/HadoopSSLCredentialProvider.java  |   66 +
 .../providers/SysPropSSLCredentialProvider.java |   38 +
 .../configuration/providers/package-info.java   |   23 +
 .../solr/util/doc-files/min-should-match.html   |  116 +
 .../hll/BigEndianAscendingWordDeserializer.java |  172 +
 .../hll/BigEndianAscendingWordSerializer.java   |  173 +
 .../java/org/apache/solr/util/hll/BitUtil.java  |   70 +
 .../org/apache/solr/util/hll/BitVector.java     |  260 +
 .../main/java/org/apache/solr/util/hll/HLL.java | 1072 ++++
 .../org/apache/solr/util/hll/HLLMetadata.java   |  135 +
 .../java/org/apache/solr/util/hll/HLLType.java  |   28 +
 .../java/org/apache/solr/util/hll/HLLUtil.java  |  198 +
 .../org/apache/solr/util/hll/IHLLMetadata.java  |   70 +
 .../apache/solr/util/hll/ISchemaVersion.java    |   84 +
 .../apache/solr/util/hll/IWordDeserializer.java |   40 +
 .../apache/solr/util/hll/IWordSerializer.java   |   38 +
 .../org/apache/solr/util/hll/NumberUtil.java    |  171 +
 .../apache/solr/util/hll/SchemaVersionOne.java  |  153 +
 .../apache/solr/util/hll/SerializationUtil.java |  276 +
 .../org/apache/solr/util/hll/package-info.java  |   24 +
 .../java/org/apache/solr/util/package-info.java |   23 +
 .../solr/util/plugin/AbstractPluginLoader.java  |  278 +
 .../solr/util/plugin/MapInitializedPlugin.java  |   29 +
 .../solr/util/plugin/MapPluginLoader.java       |   53 +
 .../util/plugin/NamedListInitializedPlugin.java |   29 +
 .../solr/util/plugin/NamedListPluginLoader.java |   49 +
 .../solr/util/plugin/PluginInfoInitialized.java |   31 +
 .../apache/solr/util/plugin/SolrCoreAware.java  |   27 +
 .../apache/solr/util/plugin/package-info.java   |   25 +
 .../stats/HttpClientMetricNameStrategy.java     |   28 +
 .../stats/InstrumentedHttpRequestExecutor.java  |  139 +
 ...entedPoolingHttpClientConnectionManager.java |   53 +
 .../org/apache/solr/util/stats/MetricUtils.java |  640 +++
 .../apache/solr/util/stats/package-info.java    |   23 +
 .../solr/util/xslt/TransformerProvider.java     |  128 +
 .../org/apache/solr/util/xslt/package-info.java |   23 +
 solr/core/src/main/java/overview.html           |   21 +
 .../resources/EditableSolrConfigAttributes.json |   74 +
 .../src/main/resources/ImplicitPlugins.json     |  166 +
 .../main/resources/SystemCollectionSchema.xml   |   38 +
 .../resources/SystemCollectionSolrConfig.xml    |   19 +
 .../resources/EditableSolrConfigAttributes.json |   74 -
 solr/core/src/resources/ImplicitPlugins.json    |  166 -
 .../src/resources/SystemCollectionSchema.xml    |   38 -
 .../resources/SystemCollectionSolrConfig.xml    |   19 -
 solr/core/src/test-files/README                 |   21 -
 solr/core/src/test-files/books_numeric_ids.csv  |   11 -
 solr/core/src/test-files/cryptokeys/pk1.pem     |    9 -
 solr/core/src/test-files/cryptokeys/pk2.pem     |   12 -
 solr/core/src/test-files/cryptokeys/pubk1.der   |  Bin 94 -> 0 bytes
 solr/core/src/test-files/cryptokeys/pubk2.der   |  Bin 126 -> 0 bytes
 .../src/test-files/cryptokeys/samplefile.bin    |  Bin 3262 -> 0 bytes
 .../src/test-files/exampledocs/example.html     |   49 -
 .../core/src/test-files/exampledocs/example.txt |    3 -
 solr/core/src/test-files/lib-dirs/README        |   18 -
 .../test-files/lib-dirs/a/a1/empty-file-a1.txt  |    1 -
 .../test-files/lib-dirs/a/a2/empty-file-a2.txt  |    1 -
 .../test-files/lib-dirs/b/b1/empty-file-b1.txt  |    1 -
 .../test-files/lib-dirs/b/b2/empty-file-b2.txt  |    1 -
 .../test-files/lib-dirs/c/c1/empty-file-c1.txt  |    1 -
 .../test-files/lib-dirs/c/c2/empty-file-c2.txt  |    1 -
 .../test-files/lib-dirs/d/d2/empty-file-d2.txt  |    1 -
 solr/core/src/test-files/log4j2.xml             |   39 -
 solr/core/src/test-files/mailing_lists.pdf      |  382 --
 .../src/test-files/old-solr-example/README.txt  |    0
 .../src/test-files/old-solr-example/solr.xml    |    0
 .../runtimecode/RuntimeLibReqHandler.java       |   33 -
 .../runtimecode/RuntimeLibResponseWriter.java   |   49 -
 .../runtimecode/RuntimeLibSearchComponent.java  |   37 -
 .../src/test-files/runtimecode/TestURP.java     |   30 -
 .../test-files/runtimecode/runtimelibs.jar.bin  |  Bin 6860 -> 0 bytes
 .../runtimecode/runtimelibs_v2.jar.bin          |  Bin 6582 -> 0 bytes
 .../test-files/runtimecode/runtimeurp.jar.bin   |  Bin 753 -> 0 bytes
 .../solr/analysisconfs/analysis-err-schema.xml  |   40 -
 .../conf/addfields.updateprocessor.js           |   26 -
 .../collection1/conf/analyzingInfixSuggest.txt  |    5 -
 .../solr/collection1/conf/bad-currency.xml      |   31 -
 .../collection1/conf/bad-error-solrconfig.xml   |   30 -
 .../collection1/conf/bad-mpf-solrconfig.xml     |   37 -
 .../bad-schema-analyzer-class-and-nested.xml    |   35 -
 .../bad-schema-bogus-analysis-parameters.xml    |   28 -
 .../conf/bad-schema-bogus-field-parameters.xml  |   25 -
 .../bad-schema-codec-global-vs-ft-mismatch.xml  |   31 -
 .../bad-schema-currency-dynamic-multivalued.xml |   30 -
 .../bad-schema-currency-ft-amount-suffix.xml    |   34 -
 ...bad-schema-currency-ft-bogus-code-in-xml.xml |   33 -
 ...ad-schema-currency-ft-bogus-default-code.xml |   33 -
 .../conf/bad-schema-currency-ft-code-suffix.xml |   33 -
 .../conf/bad-schema-currency-ft-multivalued.xml |   29 -
 .../conf/bad-schema-currency-ft-oer-norates.xml |   32 -
 .../conf/bad-schema-currency-multivalued.xml    |   30 -
 ...ma-currencyfieldtype-bogus-amount-suffix.xml |   34 -
 ...hema-currencyfieldtype-bogus-code-suffix.xml |   35 -
 ...ma-currencyfieldtype-dynamic-multivalued.xml |   36 -
 ...a-currencyfieldtype-ft-bogus-code-in-xml.xml |   41 -
 ...-currencyfieldtype-ft-bogus-default-code.xml |   41 -
 ...-schema-currencyfieldtype-ft-multivalued.xml |   36 -
 ...-schema-currencyfieldtype-ft-oer-norates.xml |   40 -
 ...-currencyfieldtype-missing-amount-suffix.xml |   34 -
 ...ma-currencyfieldtype-missing-code-suffix.xml |   35 -
 ...bad-schema-currencyfieldtype-multivalued.xml |   36 -
 ...schema-currencyfieldtype-wrong-amount-ft.xml |   36 -
 ...d-schema-currencyfieldtype-wrong-code-ft.xml |   35 -
 .../conf/bad-schema-default-operator.xml        |   26 -
 .../conf/bad-schema-defaultsearchfield.xml      |   26 -
 .../conf/bad-schema-dup-dynamicField.xml        |   35 -
 .../collection1/conf/bad-schema-dup-field.xml   |   38 -
 .../conf/bad-schema-dup-fieldType.xml           |   37 -
 .../bad-schema-dynamicfield-default-val.xml     |   29 -
 .../conf/bad-schema-dynamicfield-required.xml   |   29 -
 .../solr/collection1/conf/bad-schema-eff.xml    |   44 -
 .../solr/collection1/conf/bad-schema-enums.xml  |   34 -
 ...asterisk-copyfield-dest-should-fail-test.xml |   27 -
 ...terisk-copyfield-source-should-fail-test.xml |   27 -
 ...asterisk-copyfield-dest-should-fail-test.xml |   25 -
 ...terisk-copyfield-source-should-fail-test.xml |   25 -
 ...source-matching-nothing-should-fail-test.xml |   31 -
 .../conf/bad-schema-nontext-analyzer.xml        |   33 -
 .../conf/bad-schema-not-indexed-but-norms.xml   |   35 -
 .../conf/bad-schema-not-indexed-but-pos.xml     |   35 -
 .../conf/bad-schema-not-indexed-but-tf.xml      |   34 -
 .../conf/bad-schema-omit-tf-but-not-pos.xml     |   35 -
 .../bad-schema-sim-default-does-not-exist.xml   |   41 -
 ...d-schema-sim-default-has-no-explicit-sim.xml |   41 -
 .../bad-schema-sim-global-vs-ft-mismatch.xml    |   34 -
 .../conf/bad-schema-sweetspot-both-tf.xml       |   43 -
 .../bad-schema-sweetspot-partial-baseline.xml   |   39 -
 .../bad-schema-sweetspot-partial-hyperbolic.xml |   41 -
 .../conf/bad-schema-sweetspot-partial-norms.xml |   40 -
 ...-schema-uniquekey-diff-type-dynamic-root.xml |   36 -
 .../bad-schema-uniquekey-diff-type-root.xml     |   35 -
 .../bad-schema-uniquekey-is-copyfield-dest.xml  |   31 -
 .../conf/bad-schema-uniquekey-multivalued.xml   |   28 -
 .../conf/bad-schema-uniquekey-uses-default.xml  |   28 -
 .../conf/bad-schema-uniquekey-uses-points.xml   |   28 -
 .../conf/bad-schema-unsupported-docValues.xml   |   26 -
 .../bad-solrconfig-bogus-scriptengine-name.xml  |   34 -
 .../conf/bad-solrconfig-invalid-scriptfile.xml  |   35 -
 ...lrconfig-managed-schema-named-schema.xml.xml |   30 -
 .../conf/bad-solrconfig-missing-scriptfile.xml  |   33 -
 .../conf/bad-solrconfig-multiple-cfs.xml        |   32 -
 .../conf/bad-solrconfig-multiple-dirfactory.xml |   34 -
 .../bad-solrconfig-multiple-indexconfigs.xml    |   35 -
 .../conf/bad-solrconfig-no-autocommit-tag.xml   |   52 -
 .../collection1/conf/bad-solrconfig-nrtmode.xml |   37 -
 ...olrconfig-schema-mutable-but-not-managed.xml |   32 -
 ...d-solrconfig-unexpected-schema-attribute.xml |   32 -
 .../solr/collection1/conf/bad_solrconfig.xml    |   28 -
 .../collection1/conf/blendedInfixSuggest.txt    |    3 -
 .../collection1/conf/compoundDictionary.txt     |   19 -
 .../conf/conditional.updateprocessor.js         |   25 -
 .../solr/collection1/conf/cross-compatible.js   |   53 -
 .../solr/collection1/conf/currency.xml          |   37 -
 .../solr/collection1/conf/da_UTF8.xml           | 1208 -----
 .../collection1/conf/da_compoundDictionary.txt  |   19 -
 .../solr/collection1/conf/elevate.xml           |   54 -
 .../solr/collection1/conf/enumsConfig.xml       |   52 -
 .../solr/collection1/conf/freeTextSuggest.txt   |    2 -
 .../solr/collection1/conf/frenchArticles.txt    |   24 -
 .../solr/collection1/conf/fuzzysuggest.txt      |    4 -
 .../solr/collection1/conf/hunspell-test.aff     |   13 -
 .../solr/collection1/conf/hunspell-test.dic     |    6 -
 .../solr/collection1/conf/hyphenation.dtd       |   68 -
 .../solr/collection1/conf/jasuggest.txt         |    5 -
 .../test-files/solr/collection1/conf/keep-1.txt |   17 -
 .../test-files/solr/collection1/conf/keep-2.txt |   17 -
 .../conf/mapping-ISOLatin1Accent.txt            |  246 -
 .../conf/missing.functions.updateprocessor.js   |    3 -
 ...missleading.extension.updateprocessor.js.txt |   23 -
 .../collection1/conf/multiword-synonyms.txt     |   13 -
 .../solr/collection1/conf/old_synonyms.txt      |   22 -
 .../collection1/conf/open-exchange-rates.json   |   18 -
 .../solr/collection1/conf/phrasesuggest.txt     |    8 -
 .../solr/collection1/conf/protected-1.txt       |   17 -
 .../solr/collection1/conf/protected-2.txt       |   17 -
 .../solr/collection1/conf/protwords.txt         |   23 -
 .../conf/regex-boost-processor-test.txt         |   10 -
 .../conf/schema-HighlighterMaxOffsetTest.xml    |   85 -
 .../collection1/conf/schema-SimpleTextCodec.xml |   32 -
 ...chema-add-schema-fields-update-processor.xml |   72 -
 .../solr/collection1/conf/schema-behavior.xml   |  132 -
 .../collection1/conf/schema-binaryfield.xml     |   41 -
 .../conf/schema-blockjoinfacetcomponent.xml     |   40 -
 .../solr/collection1/conf/schema-bm25.xml       |   46 -
 .../collection1/conf/schema-charfilters.xml     |   47 -
 ...a-class-name-shortening-on-serialization.xml |   44 -
 .../collection1/conf/schema-classification.xml  |   43 -
 .../solr/collection1/conf/schema-collate-dv.xml |   59 -
 .../solr/collection1/conf/schema-collate.xml    |   58 -
 .../collection1/conf/schema-copyfield-test.xml  |  456 --
 .../collection1/conf/schema-custom-field.xml    |   44 -
 .../collection1/conf/schema-customfield.xml     |   55 -
 .../solr/collection1/conf/schema-dfi.xml        |   50 -
 .../solr/collection1/conf/schema-dfr.xml        |   64 -
 .../conf/schema-distrib-interval-faceting.xml   |   78 -
 .../conf/schema-distributed-missing-sort.xml    |   86 -
 .../solr/collection1/conf/schema-docValues.xml  |   81 -
 .../conf/schema-docValuesFaceting.xml           |   96 -
 .../collection1/conf/schema-docValuesJoin.xml   |  105 -
 .../conf/schema-docValuesMissing.xml            |  116 -
 .../collection1/conf/schema-docValuesMulti.xml  |   49 -
 .../solr/collection1/conf/schema-eff.xml        |   40 -
 .../solr/collection1/conf/schema-enums.xml      |   49 -
 .../conf/schema-field-sort-values.xml           |   36 -
 .../solr/collection1/conf/schema-folding.xml    |  262 -
 .../solr/collection1/conf/schema-hash.xml       |  614 ---
 .../solr/collection1/conf/schema-ib.xml         |   51 -
 .../conf/schema-id-and-version-fields-only.xml  |   25 -
 .../collection1/conf/schema-inplace-updates.xml |   67 -
 .../collection1/conf/schema-lmdirichlet.xml     |   44 -
 .../collection1/conf/schema-lmjelinekmercer.xml |   44 -
 .../conf/schema-luceneMatchVersion.xml          |   52 -
 .../conf/schema-minimal-atomic-stress.xml       |   38 -
 .../schema-minimal-with-another-uniqkey.xml     |   23 -
 .../solr/collection1/conf/schema-minimal.xml    |   21 -
 .../conf/schema-multiword-synonyms.xml          |   50 -
 .../solr/collection1/conf/schema-nest.xml       |   65 -
 .../conf/schema-non-stored-docvalues.xml        |   74 -
 .../conf/schema-not-required-unique-key.xml     |   38 -
 .../conf/schema-null-charfilters-analyzer.xml   |   27 -
 .../solr/collection1/conf/schema-numeric.xml    |   85 -
 ...ma-one-field-no-dynamic-field-unique-key.xml |   25 -
 .../conf/schema-one-field-no-dynamic-field.xml  |   24 -
 .../conf/schema-phrases-identification.xml      |   97 -
 .../collection1/conf/schema-phrasesuggest.xml   |   56 -
 .../solr/collection1/conf/schema-point.xml      |  187 -
 .../conf/schema-postingshighlight.xml           |   46 -
 .../collection1/conf/schema-preanalyzed.xml     |   44 -
 .../collection1/conf/schema-protected-term.xml  |   86 -
 .../collection1/conf/schema-psuedo-fields.xml   |   74 -
 .../collection1/conf/schema-replication1.xml    |   38 -
 .../collection1/conf/schema-replication2.xml    |   40 -
 .../collection1/conf/schema-required-fields.xml |  401 --
 .../conf/schema-rest-lucene-match-version.xml   |   36 -
 .../solr/collection1/conf/schema-rest.xml       |  747 ---
 .../solr/collection1/conf/schema-reversed.xml   |   80 -
 .../conf/schema-sim-default-override.xml        |   66 -
 .../solr/collection1/conf/schema-sim.xml        |   69 -
 .../collection1/conf/schema-simpleqpplugin.xml  |   60 -
 .../collection1/conf/schema-snippet-field.xml   |    3 -
 .../collection1/conf/schema-snippet-type.xml    |    3 -
 .../collection1/conf/schema-snippet-types.incl  |   19 -
 .../collection1/conf/schema-sorting-text.xml    |  149 -
 .../collection1/conf/schema-sortingresponse.xml |  107 -
 .../solr/collection1/conf/schema-sorts.xml      |  314 --
 .../solr/collection1/conf/schema-spatial.xml    |  101 -
 .../collection1/conf/schema-spellchecker.xml    |   71 -
 .../solr/collection1/conf/schema-sql.xml        |  650 ---
 .../solr/collection1/conf/schema-sweetspot.xml  |   69 -
 .../conf/schema-synonym-tokenizer.xml           |   40 -
 .../solr/collection1/conf/schema-tagger.xml     |  187 -
 .../solr/collection1/conf/schema-tfidf.xml      |   44 -
 .../solr/collection1/conf/schema-tiny.xml       |   35 -
 .../collection1/conf/schema-tokenizer-test.xml  |  125 -
 .../solr/collection1/conf/schema-trie.xml       |  324 --
 .../conf/schema-unifiedhighlight.xml            |   45 -
 .../solr/collection1/conf/schema-version-dv.xml |   33 -
 .../collection1/conf/schema-version-indexed.xml |   33 -
 .../solr/collection1/conf/schema-xinclude.xml   |   26 -
 .../test-files/solr/collection1/conf/schema.xml |  834 ---
 .../solr/collection1/conf/schema11.xml          |  539 --
 .../solr/collection1/conf/schema12.xml          |  745 ---
 .../solr/collection1/conf/schema15.xml          |  625 ---
 .../solr/collection1/conf/schema_codec.xml      |   51 -
 .../solr/collection1/conf/schema_latest.xml     |  791 ---
 .../solr/collection1/conf/schemasurround.xml    |  609 ---
 .../collection1/conf/solrconfig-SOLR-749.xml    |   35 -
 ...dd-schema-fields-update-processor-chains.xml |  223 -
 .../conf/solrconfig-altdirectory.xml            |   27 -
 .../conf/solrconfig-analytics-query.xml         |  319 --
 .../solr/collection1/conf/solrconfig-basic.xml  |   29 -
 .../conf/solrconfig-blockjoinfacetcomponent.xml |   58 -
 .../conf/solrconfig-cache-enable-disable.xml    |   86 -
 .../collection1/conf/solrconfig-caching.xml     |   40 -
 .../solr/collection1/conf/solrconfig-cdcr.xml   |   77 -
 .../conf/solrconfig-cdcrupdatelog.xml           |   49 -
 .../conf/solrconfig-classification.xml          |   68 -
 .../conf/solrconfig-collapseqparser.xml         |  326 --
 .../conf/solrconfig-components-name.xml         |   74 -
 .../solrconfig-concurrentmergescheduler.xml     |   37 -
 .../solrconfig-configurerecoverystrategy.xml    |   28 -
 .../conf/solrconfig-customrecoverystrategy.xml  |   32 -
 .../collection1/conf/solrconfig-deeppaging.xml  |   52 -
 .../collection1/conf/solrconfig-defaults.xml    |   43 -
 .../conf/solrconfig-delaying-component.xml      |   62 -
 .../collection1/conf/solrconfig-delpolicy1.xml  |   51 -
 .../collection1/conf/solrconfig-delpolicy2.xml  |   48 -
 ...lrconfig-distrib-update-processor-chains.xml |   84 -
 .../solrconfig-doc-expire-update-processor.xml  |   96 -
 .../conf/solrconfig-doctransformers.xml         |   52 -
 .../collection1/conf/solrconfig-elevate.xml     |  160 -
 .../solrconfig-externalversionconstraint.xml    |  155 -
 .../conf/solrconfig-functionquery.xml           |   48 -
 .../solr/collection1/conf/solrconfig-hash.xml   |   61 -
 .../collection1/conf/solrconfig-headers.xml     |   32 -
 .../collection1/conf/solrconfig-highlight.xml   |   61 -
 .../conf/solrconfig-implicitproperties.xml      |   76 -
 ...olrconfig-indexconfig-mergepolicyfactory.xml |   31 -
 .../conf/solrconfig-indexmetrics.xml            |   61 -
 .../conf/solrconfig-infixsuggesters.xml         |  101 -
 .../conf/solrconfig-infostream-logging.xml      |   29 -
 .../conf/solrconfig-logmergepolicyfactory.xml   |   37 -
 .../conf/solrconfig-managed-schema-test.xml     |   27 -
 .../conf/solrconfig-managed-schema.xml          |   86 -
 .../conf/solrconfig-master-throttled.xml        |   66 -
 .../solr/collection1/conf/solrconfig-master.xml |   70 -
 .../conf/solrconfig-master1-keepOneBackup.xml   |   49 -
 .../collection1/conf/solrconfig-master1.xml     |   68 -
 .../collection1/conf/solrconfig-master2.xml     |   66 -
 .../collection1/conf/solrconfig-master3.xml     |   67 -
 .../conf/solrconfig-mergepolicy-defaults.xml    |   33 -
 .../conf/solrconfig-mergepolicy-legacy.xml      |   31 -
 .../solrconfig-mergepolicyfactory-nocfs.xml     |   34 -
 .../collection1/conf/solrconfig-minimal.xml     |   65 -
 .../collection1/conf/solrconfig-nocache.xml     |   48 -
 .../conf/solrconfig-nomergepolicyfactory.xml    |   32 -
 .../collection1/conf/solrconfig-noopregen.xml   |   36 -
 .../collection1/conf/solrconfig-paramset.xml    |   89 -
 ...lrconfig-parsing-update-processor-chains.xml |  234 -
 .../conf/solrconfig-phrases-identification.xml  |   53 -
 .../conf/solrconfig-phrasesuggest.xml           |  468 --
 .../conf/solrconfig-plugcollector.xml           |  543 --
 .../conf/solrconfig-postingshighlight.xml       |   36 -
 .../conf/solrconfig-query-parser-init.xml       |   37 -
 .../conf/solrconfig-querysender-noquery.xml     |   75 -
 .../collection1/conf/solrconfig-querysender.xml |   71 -
 .../collection1/conf/solrconfig-repeater.xml    |   61 -
 .../collection1/conf/solrconfig-reqHandler.incl |    5 -
 .../conf/solrconfig-response-log-component.xml  |   61 -
 .../collection1/conf/solrconfig-schemaless.xml  |   98 -
 .../conf/solrconfig-script-updateprocessor.xml  |  120 -
 .../conf/solrconfig-searcher-listeners1.xml     |   51 -
 .../solr/collection1/conf/solrconfig-slave.xml  |   59 -
 .../solr/collection1/conf/solrconfig-slave1.xml |   52 -
 .../conf/solrconfig-snippet-processor.xml       |    6 -
 .../conf/solrconfig-solcoreproperties.xml       |   36 -
 .../solrconfig-sortingmergepolicyfactory.xml    |   56 -
 .../conf/solrconfig-sortingresponse.xml         |   45 -
 .../collection1/conf/solrconfig-spatial.xml     |   42 -
 .../conf/solrconfig-spellcheckcomponent.xml     |  197 -
 .../conf/solrconfig-spellchecker.xml            |  143 -
 .../solr/collection1/conf/solrconfig-sql.xml    |   72 -
 ...-suggestercomponent-context-filter-query.xml |  122 -
 .../conf/solrconfig-suggestercomponent.xml      |  146 -
 .../solr/collection1/conf/solrconfig-tagger.xml |   59 -
 .../collection1/conf/solrconfig-test-misc.xml   |   53 -
 .../conf/solrconfig-testxmlparser.xml           |   33 -
 .../solrconfig-tieredmergepolicyfactory.xml     |   41 -
 .../solr/collection1/conf/solrconfig-tlog.xml   |  181 -
 .../conf/solrconfig-tolerant-search.xml         |   53 -
 .../conf/solrconfig-tolerant-update-minimal.xml |   40 -
 .../conf/solrconfig-transformers.xml            |   89 -
 ...nfig-uninvertdocvaluesmergepolicyfactory.xml |   38 -
 .../conf/solrconfig-update-processor-chains.xml |  657 ---
 ...lrconfig-warmer-randommergepolicyfactory.xml |   46 -
 .../conf/solrconfig-withgethandler.xml          |   52 -
 .../collection1/conf/solrconfig-xinclude.xml    |   36 -
 .../solrconfig.snippet.randomindexconfig.xml    |   49 -
 .../solr/collection1/conf/solrconfig.xml        |  567 --
 .../conf/solrconfig_SimpleTextCodec.xml         |   26 -
 .../solr/collection1/conf/solrconfig_codec.xml  |   28 -
 .../solr/collection1/conf/solrconfig_codec2.xml |   26 -
 .../solr/collection1/conf/solrconfig_perf.xml   |   73 -
 .../solr/collection1/conf/stemdict.txt          |   22 -
 .../test-files/solr/collection1/conf/stop-1.txt |   17 -
 .../test-files/solr/collection1/conf/stop-2.txt |   17 -
 .../solr/collection1/conf/stop-snowball.txt     |   10 -
 .../solr/collection1/conf/stoptypes-1.txt       |   17 -
 .../solr/collection1/conf/stoptypes-2.txt       |   17 -
 .../solr/collection1/conf/stopwithbom.txt       |    1 -
 .../solr/collection1/conf/stopwords.txt         |   58 -
 .../collection1/conf/stopwordsWrongEncoding.txt |   18 -
 .../solr/collection1/conf/synonyms.txt          |   40 -
 .../conf/throw.error.on.add.updateprocessor.js  |   21 -
 .../conf/trivial.updateprocessor0.js            |   59 -
 .../conf/trivial.updateprocessor1.js            |   25 -
 .../solr/collection1/conf/wdftypes.txt          |   32 -
 .../conf/xslt/dummy-using-include.xsl           |   31 -
 .../solr/collection1/conf/xslt/dummy.xsl        |   39 -
 .../conf/xslt/xsl-update-handler-test.xsl       |   49 -
 .../src/test-files/solr/collection1/lib/README  |   18 -
 .../lib/classes/empty-file-main-lib.txt         |    1 -
 .../src/test-files/solr/conf/core.properties    |   19 -
 .../_default/conf/lang/contractions_ca.txt      |    8 -
 .../_default/conf/lang/contractions_fr.txt      |   15 -
 .../_default/conf/lang/contractions_ga.txt      |    5 -
 .../_default/conf/lang/contractions_it.txt      |   23 -
 .../_default/conf/lang/hyphenations_ga.txt      |    5 -
 .../_default/conf/lang/stemdict_nl.txt          |    6 -
 .../_default/conf/lang/stoptags_ja.txt          |  420 --
 .../_default/conf/lang/stopwords_ar.txt         |  125 -
 .../_default/conf/lang/stopwords_bg.txt         |  193 -
 .../_default/conf/lang/stopwords_ca.txt         |  220 -
 .../_default/conf/lang/stopwords_cz.txt         |  172 -
 .../_default/conf/lang/stopwords_da.txt         |  110 -
 .../_default/conf/lang/stopwords_de.txt         |  294 -
 .../_default/conf/lang/stopwords_el.txt         |   78 -
 .../_default/conf/lang/stopwords_en.txt         |   54 -
 .../_default/conf/lang/stopwords_es.txt         |  356 --
 .../_default/conf/lang/stopwords_eu.txt         |   99 -
 .../_default/conf/lang/stopwords_fa.txt         |  313 --
 .../_default/conf/lang/stopwords_fi.txt         |   97 -
 .../_default/conf/lang/stopwords_fr.txt         |  186 -
 .../_default/conf/lang/stopwords_ga.txt         |  110 -
 .../_default/conf/lang/stopwords_gl.txt         |  161 -
 .../_default/conf/lang/stopwords_hi.txt         |  235 -
 .../_default/conf/lang/stopwords_hu.txt         |  211 -
 .../_default/conf/lang/stopwords_hy.txt         |   46 -
 .../_default/conf/lang/stopwords_id.txt         |  359 --
 .../_default/conf/lang/stopwords_it.txt         |  303 --
 .../_default/conf/lang/stopwords_ja.txt         |  127 -
 .../_default/conf/lang/stopwords_lv.txt         |  172 -
 .../_default/conf/lang/stopwords_nl.txt         |  119 -
 .../_default/conf/lang/stopwords_no.txt         |  194 -
 .../_default/conf/lang/stopwords_pt.txt         |  253 -
 .../_default/conf/lang/stopwords_ro.txt         |  233 -
 .../_default/conf/lang/stopwords_ru.txt         |  243 -
 .../_default/conf/lang/stopwords_sv.txt         |  133 -
 .../_default/conf/lang/stopwords_th.txt         |  119 -
 .../_default/conf/lang/stopwords_tr.txt         |  212 -
 .../_default/conf/lang/userdict_ja.txt          |   29 -
 .../configsets/_default/conf/managed-schema     | 1007 ----
 .../solr/configsets/_default/conf/params.json   |   20 -
 .../solr/configsets/_default/conf/protwords.txt |   21 -
 .../configsets/_default/conf/solrconfig.xml     | 1355 -----
 .../solr/configsets/_default/conf/stopwords.txt |   14 -
 .../solr/configsets/_default/conf/synonyms.txt  |   29 -
 .../solr/configsets/backcompat/conf/schema.xml  |   22 -
 .../configsets/backcompat/conf/solrconfig.xml   |   43 -
 .../configsets/bad-mergepolicy/conf/schema.xml  |   21 -
 .../bad-mergepolicy/conf/solrconfig.xml         |   36 -
 .../cdcr-cluster1/conf/managed-schema           |   29 -
 .../cdcr-cluster1/conf/solrconfig.xml           |   80 -
 .../cdcr-cluster2/conf/managed-schema           |   29 -
 .../cdcr-cluster2/conf/solrconfig.xml           |   80 -
 .../cdcr-source-disabled/conf/schema.xml        |   29 -
 .../cdcr-source-disabled/conf/solrconfig.xml    |   60 -
 .../solr/configsets/cdcr-source/conf/schema.xml |   29 -
 .../configsets/cdcr-source/conf/solrconfig.xml  |   75 -
 .../solr/configsets/cdcr-target/conf/schema.xml |   29 -
 .../configsets/cdcr-target/conf/solrconfig.xml  |   62 -
 .../configsets/cloud-dynamic/conf/schema.xml    |  293 -
 .../cloud-dynamic/conf/solrconfig.xml           |   48 -
 .../solr/configsets/cloud-hdfs/conf/schema.xml  |   28 -
 .../configsets/cloud-hdfs/conf/solrconfig.xml   |   52 -
 .../conf/managed-schema                         |   41 -
 .../conf/solrconfig.xml                         |   51 -
 .../cloud-managed-upgrade/conf/schema.xml       |   27 -
 .../cloud-managed-upgrade/conf/solrconfig.xml   |   50 -
 .../cloud-managed/conf/managed-schema           |   27 -
 .../cloud-managed/conf/solrconfig.xml           |   51 -
 .../conf/schema.xml                             |   31 -
 .../conf/solrconfig.xml                         |   48 -
 .../cloud-minimal-jmx/conf/schema.xml           |   28 -
 .../cloud-minimal-jmx/conf/solrconfig.xml       |   50 -
 .../configsets/cloud-minimal/conf/schema.xml    |   29 -
 .../cloud-minimal/conf/solrconfig.xml           |   51 -
 .../configsets/cloud-subdirs/conf/schema.xml    |   28 -
 .../cloud-subdirs/conf/solrconfig.xml           |   48 -
 .../conf/stopwords/stopwords-en.txt             |   62 -
 .../solr/configsets/configset-2/conf/schema.xml |   25 -
 .../configsets/configset-2/conf/solrconfig.xml  |   49 -
 .../solr/configsets/doc-expiry/conf/schema.xml  |  287 -
 .../configsets/doc-expiry/conf/solrconfig.xml   |  107 -
 .../exitable-directory/conf/schema.xml          |   28 -
 .../exitable-directory/conf/solrconfig.xml      |  117 -
 .../solr/configsets/minimal/conf/schema.xml     |   21 -
 .../solr/configsets/minimal/conf/solrconfig.xml |   47 -
 .../solr/configsets/resource-sharing/schema.xml |   21 -
 .../configsets/resource-sharing/solrconfig.xml  |   51 -
 .../dih-script-transformer/managed-schema       |   25 -
 .../dih-script-transformer/solrconfig.xml       |   61 -
 .../configsets/upload/regular/managed-schema    |   25 -
 .../configsets/upload/regular/solrconfig.xml    |   61 -
 .../regular/xslt/xsl-update-handler-test.xsl    |   49 -
 .../upload/with-script-processor/managed-schema |   25 -
 ...missleading.extension.updateprocessor.js.txt |   23 -
 .../upload/with-script-processor/solrconfig.xml |   65 -
 .../test-files/solr/crazy-path-to-config.xml    |   68 -
 .../test-files/solr/crazy-path-to-schema.xml    |   44 -
 solr/core/src/test-files/solr/external_eff      |   10 -
 .../solr/security/hadoop_kerberos_config.json   |   16 -
 .../hadoop_simple_auth_with_delegation.json     |   29 -
 solr/core/src/test-files/solr/solr-50-all.xml   |   64 -
 .../test-files/solr/solr-gangliareporter.xml    |   32 -
 .../test-files/solr/solr-graphitereporter.xml   |   31 -
 .../src/test-files/solr/solr-hiddensysprops.xml |   31 -
 .../src/test-files/solr/solr-jmxreporter.xml    |   43 -
 .../src/test-files/solr/solr-metricreporter.xml |   57 -
 .../src/test-files/solr/solr-metricsconfig.xml  |   61 -
 .../solr-shardhandler-loadBalancerRequests.xml  |   23 -
 .../src/test-files/solr/solr-shardhandler.xml   |   29 -
 .../src/test-files/solr/solr-slf4jreporter.xml  |   42 -
 .../src/test-files/solr/solr-solrDataHome.xml   |   24 -
 .../src/test-files/solr/solr-solrreporter.xml   |   70 -
 .../src/test-files/solr/solr-stress-new.xml     |   34 -
 .../solr/solr-trackingshardhandler.xml          |   48 -
 solr/core/src/test-files/solr/solr.xml          |   54 -
 solr/core/src/test-files/spellings.txt          |   16 -
 .../solr/AnalysisAfterCoreReloadTest.java       |  143 +
 .../org/apache/solr/BasicFunctionalityTest.java | 1037 ++++
 .../org/apache/solr/ConvertedLegacyTest.java    | 1321 +++++
 .../java/org/apache/solr/CursorPagingTest.java  |  979 ++++
 .../apache/solr/DisMaxRequestHandlerTest.java   |  222 +
 .../solr/DistributedIntervalFacetingTest.java   |  219 +
 .../java/org/apache/solr/EchoParamsTest.java    |   80 +
 .../solr/HelloWorldSolrCloudTestCase.java       |   94 +
 .../java/org/apache/solr/MinimalSchemaTest.java |  140 +
 .../java/org/apache/solr/OutputWriterTest.java  |  125 +
 .../test/java/org/apache/solr/SampleTest.java   |  120 +
 .../java/org/apache/solr/SolrInfoBeanTest.java  |  124 +
 .../org/apache/solr/SolrTestCaseJ4Test.java     |   64 +
 .../java/org/apache/solr/TestCrossCoreJoin.java |  139 +
 .../solr/TestCursorMarkWithoutUniqueKey.java    |   64 +
 .../apache/solr/TestDistributedGrouping.java    |  324 ++
 .../apache/solr/TestDistributedMissingSort.java |  278 +
 .../org/apache/solr/TestDistributedSearch.java  | 1248 +++++
 .../org/apache/solr/TestDocumentBuilder.java    |   68 +
 .../org/apache/solr/TestGroupingSearch.java     |  978 ++++
 .../apache/solr/TestHighlightDedupGrouping.java |  123 +
 .../src/test/java/org/apache/solr/TestJoin.java |  292 +
 .../org/apache/solr/TestRandomDVFaceting.java   |  305 ++
 .../org/apache/solr/TestRandomFaceting.java     |  460 ++
 .../solr/TestSimpleTrackingShardHandler.java    |   56 +
 .../org/apache/solr/TestSolrCoreProperties.java |  101 +
 .../org/apache/solr/TestTolerantSearch.java     |  237 +
 .../src/test/java/org/apache/solr/TestTrie.java |  282 +
 .../PathHierarchyTokenizerFactoryTest.java      |   96 +
 .../ProtectedTermFilterFactoryTest.java         |   84 +
 .../apache/solr/analysis/TestCharFilters.java   |   76 +
 .../solr/analysis/TestLuceneMatchVersion.java   |   57 +
 .../TestReversedWildcardFilterFactory.java      |  241 +
 .../TestWordDelimiterFilterFactory.java         |  245 +
 .../ThrowingMockTokenFilterFactory.java         |   69 +
 .../solr/analysis/TokenizerChainTest.java       |   43 +
 .../solr/analysis/htmlStripReaderTest.html      |  350 ++
 .../backcompat/TestLuceneIndexBackCompat.java   |   96 +
 .../TestEmbeddedSolrServerAdminHandler.java     |   70 +
 .../TestEmbeddedSolrServerConstructors.java     |   68 +
 .../TestEmbeddedSolrServerSchemaAPI.java        |  115 +
 .../solrj/embedded/TestJettySolrRunner.java     |   72 +
 .../client/solrj/impl/ConnectionReuseTest.java  |  193 +
 .../apache/solr/cloud/ActionThrottleTest.java   |  124 +
 .../org/apache/solr/cloud/AddReplicaTest.java   |  194 +
 .../apache/solr/cloud/AliasIntegrationTest.java |  658 +++
 .../cloud/AssignBackwardCompatibilityTest.java  |  116 +
 .../AsyncCallRequestStatusResponseTest.java     |   62 +
 .../solr/cloud/BasicDistributedZk2Test.java     |  462 ++
 .../solr/cloud/BasicDistributedZkTest.java      | 1170 ++++
 .../java/org/apache/solr/cloud/BasicZkTest.java |  181 +
 .../cloud/ChaosMonkeyNothingIsSafeTest.java     |  300 ++
 ...MonkeyNothingIsSafeWithPullReplicasTest.java |  343 ++
 .../solr/cloud/ChaosMonkeySafeLeaderTest.java   |  210 +
 ...aosMonkeySafeLeaderWithPullReplicasTest.java |  259 +
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |  269 +
 .../apache/solr/cloud/CleanupOldIndexTest.java  |  122 +
 .../cloud/CloudExitableDirectoryReaderTest.java |  116 +
 .../org/apache/solr/cloud/CloudTestUtils.java   |  175 +
 .../apache/solr/cloud/ClusterStateMockUtil.java |  197 +
 .../org/apache/solr/cloud/ClusterStateTest.java |   81 +
 .../solr/cloud/ClusterStateUpdateTest.java      |  144 +
 .../apache/solr/cloud/CollectionPropsTest.java  |  271 +
 .../solr/cloud/CollectionStateFormat2Test.java  |   71 +
 .../solr/cloud/CollectionsAPISolrJTest.java     |  668 +++
 .../cloud/ConcurrentCreateRoutedAliasTest.java  |  222 +
 .../apache/solr/cloud/ConfigSetsAPITest.java    |   49 +
 .../solr/cloud/ConnectionManagerTest.java       |  176 +
 .../solr/cloud/CreateCollectionCleanupTest.java |   82 +
 .../solr/cloud/CreateRoutedAliasTest.java       |  381 ++
 .../solr/cloud/DeleteInactiveReplicaTest.java   |  111 +
 .../DeleteLastCustomShardedReplicaTest.java     |   56 +
 .../org/apache/solr/cloud/DeleteNodeTest.java   |  117 +
 .../apache/solr/cloud/DeleteReplicaTest.java    |  396 ++
 .../org/apache/solr/cloud/DeleteShardTest.java  |  142 +
 .../org/apache/solr/cloud/DeleteStatusTest.java |  123 +
 .../solr/cloud/DistribCursorPagingTest.java     |  760 +++
 ...DistribDocExpirationUpdateProcessorTest.java |  189 +
 .../cloud/DistribJoinFromCollectionTest.java    |  237 +
 .../apache/solr/cloud/DistributedQueueTest.java |  344 ++
 .../solr/cloud/DistributedVersionInfoTest.java  |  386 ++
 .../solr/cloud/DocValuesNotIndexedTest.java     |  536 ++
 .../org/apache/solr/cloud/ForceLeaderTest.java  |  256 +
 .../cloud/FullSolrCloudDistribCmdsTest.java     |  760 +++
 .../FullThrottleStoppableIndexingThread.java    |  173 +
 .../solr/cloud/HealthCheckHandlerTest.java      |  111 +
 .../solr/cloud/HttpPartitionOnCommitTest.java   |  178 +
 .../apache/solr/cloud/HttpPartitionTest.java    |  617 +++
 .../apache/solr/cloud/KerberosTestServices.java |  225 +
 .../solr/cloud/LIROnShardRestartTest.java       |  262 +
 .../cloud/LeaderElectionContextKeyTest.java     |  121 +
 .../cloud/LeaderElectionIntegrationTest.java    |  180 +
 .../apache/solr/cloud/LeaderElectionTest.java   |  550 ++
 .../cloud/LeaderFailoverAfterPartitionTest.java |  189 +
 .../cloud/LeaderFailureAfterFreshStartTest.java |  274 +
 .../solr/cloud/LeaderTragicEventTest.java       |  197 +
 .../solr/cloud/LeaderVoteWaitTimeoutTest.java   |  274 +
 .../solr/cloud/LegacyCloudClusterPropTest.java  |  165 +
 .../cloud/MetricsHistoryIntegrationTest.java    |  194 +
 .../apache/solr/cloud/MigrateRouteKeyTest.java  |  224 +
 .../solr/cloud/MissingSegmentRecoveryTest.java  |  124 +
 .../org/apache/solr/cloud/MockZkController.java |   36 +
 .../solr/cloud/MoveReplicaHDFSFailoverTest.java |  207 +
 .../apache/solr/cloud/MoveReplicaHDFSTest.java  |   98 +
 .../org/apache/solr/cloud/MoveReplicaTest.java  |  390 ++
 .../solr/cloud/MultiSolrCloudTestCaseTest.java  |   80 +
 .../apache/solr/cloud/MultiThreadedOCPTest.java |  295 +
 .../org/apache/solr/cloud/NodeMutatorTest.java  |   94 +
 ...utOfBoxZkACLAndCredentialsProvidersTest.java |  137 +
 ...rriddenZkACLAndCredentialsProvidersTest.java |  340 ++
 ...verseerCollectionConfigSetProcessorTest.java |  804 +++
 .../cloud/OverseerModifyCollectionTest.java     |   80 +
 .../apache/solr/cloud/OverseerRolesTest.java    |  153 +
 .../apache/solr/cloud/OverseerStatusTest.java   |   98 +
 .../solr/cloud/OverseerTaskQueueTest.java       |   96 +
 .../org/apache/solr/cloud/OverseerTest.java     | 1521 ++++++
 .../solr/cloud/PeerSyncReplicationTest.java     |  412 ++
 .../solr/cloud/RecoveryAfterSoftCommitTest.java |  119 +
 .../org/apache/solr/cloud/RecoveryZkTest.java   |  150 +
 .../apache/solr/cloud/RemoteQueryErrorTest.java |   58 +
 .../solr/cloud/ReplaceNodeNoTargetTest.java     |  120 +
 .../org/apache/solr/cloud/ReplaceNodeTest.java  |  187 +
 .../solr/cloud/ReplicationFactorTest.java       |  491 ++
 .../solr/cloud/RestartWhileUpdatingTest.java    |  203 +
 .../apache/solr/cloud/RollingRestartTest.java   |  161 +
 .../org/apache/solr/cloud/SSLMigrationTest.java |  138 +
 .../solr/cloud/SaslZkACLProviderTest.java       |  224 +
 .../cloud/SegmentTerminateEarlyTestState.java   |  275 +
 .../solr/cloud/ShardRoutingCustomTest.java      |   99 +
 .../org/apache/solr/cloud/ShardRoutingTest.java |  345 ++
 .../cloud/SharedFSAutoReplicaFailoverTest.java  |  421 ++
 .../org/apache/solr/cloud/SliceStateTest.java   |   58 +
 .../apache/solr/cloud/SolrCLIZkUtilsTest.java   |  782 +++
 .../apache/solr/cloud/SolrCloudExampleTest.java |  288 +
 .../org/apache/solr/cloud/SolrXmlInZkTest.java  |  179 +
 .../org/apache/solr/cloud/SyncSliceTest.java    |  309 ++
 .../solr/cloud/TestAuthenticationFramework.java |  181 +
 .../apache/solr/cloud/TestCloudConsistency.java |  271 +
 .../solr/cloud/TestCloudDeleteByQuery.java      |  247 +
 .../apache/solr/cloud/TestCloudInspectUtil.java |  124 +
 .../cloud/TestCloudJSONFacetJoinDomain.java     |  853 +++
 .../solr/cloud/TestCloudJSONFacetSKG.java       |  677 +++
 ...TestCloudPhrasesIdentificationComponent.java |  200 +
 .../apache/solr/cloud/TestCloudPivotFacet.java  |  840 +++
 .../solr/cloud/TestCloudPseudoReturnFields.java |  841 +++
 .../apache/solr/cloud/TestCloudRecovery.java    |  205 +
 .../solr/cloud/TestCloudSearcherWarming.java    |  323 ++
 .../solr/cloud/TestClusterProperties.java       |   68 +
 .../apache/solr/cloud/TestConfigSetsAPI.java    |  769 +++
 .../cloud/TestConfigSetsAPIExclusivity.java     |  181 +
 .../solr/cloud/TestConfigSetsAPIZkFailure.java  |  360 ++
 .../org/apache/solr/cloud/TestCryptoKeys.java   |  208 +
 .../cloud/TestDeleteCollectionOnDownNodes.java  |   67 +
 .../solr/cloud/TestDistribDocBasedVersion.java  |  353 ++
 .../apache/solr/cloud/TestDistributedMap.java   |  180 +
 .../solr/cloud/TestDownShardTolerantSearch.java |   81 +
 .../TestExclusionRuleCollectionAccess.java      |   47 +
 .../apache/solr/cloud/TestHashPartitioner.java  |  321 ++
 .../TestLeaderElectionWithEmptyReplica.java     |  125 +
 .../solr/cloud/TestLeaderElectionZkExpiry.java  |  107 +
 .../org/apache/solr/cloud/TestLockTree.java     |  123 +
 .../solr/cloud/TestMiniSolrCloudClusterSSL.java |  400 ++
 .../cloud/TestOnReconnectListenerSupport.java   |  155 +
 .../org/apache/solr/cloud/TestPrepRecovery.java |  112 +
 .../org/apache/solr/cloud/TestPullReplica.java  |  677 +++
 .../cloud/TestPullReplicaErrorHandling.java     |  348 ++
 .../apache/solr/cloud/TestRandomFlRTGCloud.java |  968 ++++
 .../cloud/TestRandomRequestDistribution.java    |  248 +
 .../apache/solr/cloud/TestRebalanceLeaders.java |  349 ++
 .../solr/cloud/TestRequestForwarding.java       |   85 +
 .../apache/solr/cloud/TestSSLRandomization.java |  254 +
 .../apache/solr/cloud/TestSegmentSorting.java   |  188 +
 .../solr/cloud/TestShortCircuitedRequests.java  |   57 +
 .../cloud/TestSizeLimitedDistributedMap.java    |   69 +
 .../solr/cloud/TestSkipOverseerOperations.java  |  128 +
 .../TestSolrCloudWithDelegationTokens.java      |  413 ++
 .../cloud/TestSolrCloudWithKerberosAlt.java     |  173 +
 .../TestSolrCloudWithSecureImpersonation.java   |  335 ++
 .../TestStressCloudBlindAtomicUpdates.java      |  499 ++
 .../solr/cloud/TestStressInPlaceUpdates.java    |  599 +++
 .../apache/solr/cloud/TestStressLiveNodes.java  |  255 +
 .../org/apache/solr/cloud/TestTlogReplica.java  |  967 ++++
 .../cloud/TestTolerantUpdateProcessorCloud.java | 1076 ++++
 .../TestTolerantUpdateProcessorRandomCloud.java |  395 ++
 .../org/apache/solr/cloud/TestUtilizeNode.java  |  183 +
 .../apache/solr/cloud/TestWithCollection.java   |  621 +++
 .../org/apache/solr/cloud/TestZkChroot.java     |  168 +
 .../TlogReplayBufferedWhileIndexingTest.java    |  146 +
 .../cloud/TriLevelCompositeIdRoutingTest.java   |  158 +
 .../solr/cloud/UnloadDistributedZkTest.java     |  378 ++
 ...MParamsZkACLAndCredentialsProvidersTest.java |  278 +
 .../java/org/apache/solr/cloud/ZkCLITest.java   |  394 ++
 .../org/apache/solr/cloud/ZkControllerTest.java |  387 ++
 .../org/apache/solr/cloud/ZkFailoverTest.java   |   90 +
 .../org/apache/solr/cloud/ZkNodePropsTest.java  |   51 +
 .../org/apache/solr/cloud/ZkShardTermsTest.java |  316 ++
 .../org/apache/solr/cloud/ZkSolrClientTest.java |  373 ++
 .../AbstractCloudBackupRestoreTestCase.java     |  387 ++
 .../solr/cloud/api/collections/AssignTest.java  |  193 +
 .../api/collections/CollectionReloadTest.java   |   85 +
 .../CollectionTooManyReplicasTest.java          |  222 +
 .../CollectionsAPIAsyncDistributedZkTest.java   |  265 +
 .../CollectionsAPIDistributedZkTest.java        |  689 +++
 ...ConcurrentDeleteAndCreateCollectionTest.java |  227 +
 .../api/collections/CustomCollectionTest.java   |  201 +
 .../HdfsCollectionsAPIDistributedZkTest.java    |  177 +
 .../api/collections/ReplicaPropertiesBase.java  |  178 +
 .../cloud/api/collections/ShardSplitTest.java   | 1113 ++++
 .../SimpleCollectionCreateDeleteTest.java       |   66 +
 .../api/collections/TestCollectionAPI.java      |  939 ++++
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  299 ++
 .../collections/TestHdfsCloudBackupRestore.java |  216 +
 .../TestLocalFSCloudBackupRestore.java          |   66 +
 .../api/collections/TestReplicaProperties.java  |  236 +
 .../TestRequestStatusCollectionAPI.java         |  198 +
 .../AutoAddReplicasIntegrationTest.java         |  199 +
 .../AutoAddReplicasPlanActionTest.java          |  208 +
 .../autoscaling/AutoScalingHandlerTest.java     | 1053 ++++
 .../solr/cloud/autoscaling/CapturedEvent.java   |   65 +
 .../autoscaling/ComputePlanActionTest.java      |  679 +++
 .../autoscaling/ExecutePlanActionTest.java      |  226 +
 .../HdfsAutoAddReplicasIntegrationTest.java     |   57 +
 .../autoscaling/HttpTriggerListenerTest.java    |  208 +
 .../cloud/autoscaling/IndexSizeTriggerTest.java |  844 +++
 .../MetricTriggerIntegrationTest.java           |  245 +
 .../cloud/autoscaling/MetricTriggerTest.java    |  135 +
 .../NodeAddedTriggerIntegrationTest.java        |  299 ++
 .../cloud/autoscaling/NodeAddedTriggerTest.java |  326 ++
 .../NodeLostTriggerIntegrationTest.java         |  321 ++
 .../cloud/autoscaling/NodeLostTriggerTest.java  |  370 ++
 .../NodeMarkersRegistrationTest.java            |  286 +
 .../autoscaling/RestoreTriggerStateTest.java    |  168 +
 .../ScheduledMaintenanceTriggerTest.java        |  276 +
 .../ScheduledTriggerIntegrationTest.java        |  143 +
 .../cloud/autoscaling/ScheduledTriggerTest.java |  140 +
 .../SearchRateTriggerIntegrationTest.java       |  741 +++
 .../autoscaling/SearchRateTriggerTest.java      |  352 ++
 .../autoscaling/SystemLogListenerTest.java      |  240 +
 .../solr/cloud/autoscaling/TestPolicyCloud.java |  427 ++
 .../TriggerCooldownIntegrationTest.java         |  239 +
 .../autoscaling/TriggerEventQueueTest.java      |   98 +
 .../autoscaling/TriggerIntegrationTest.java     |  685 +++
 .../TriggerSetPropertiesIntegrationTest.java    |  200 +
 .../solr/cloud/autoscaling/sim/ActionError.java |   24 +
 .../sim/GenericDistributedQueue.java            |  599 +++
 .../sim/GenericDistributedQueueFactory.java     |   45 +
 .../cloud/autoscaling/sim/LiveNodesSet.java     |  109 +
 .../cloud/autoscaling/sim/SimCloudManager.java  |  895 ++++
 .../sim/SimClusterStateProvider.java            | 2172 ++++++++
 .../autoscaling/sim/SimDistribStateManager.java |  590 ++
 .../sim/SimDistributedQueueFactory.java         |  284 +
 .../autoscaling/sim/SimNodeStateProvider.java   |  331 ++
 .../autoscaling/sim/SimSolrCloudTestCase.java   |  170 +
 .../sim/TestSimClusterStateProvider.java        |  225 +
 .../sim/TestSimComputePlanAction.java           |  360 ++
 .../sim/TestSimDistribStateManager.java         |  342 ++
 .../sim/TestSimDistributedQueue.java            |  221 +
 .../sim/TestSimExecutePlanAction.java           |  210 +
 .../autoscaling/sim/TestSimExtremeIndexing.java |  167 +
 .../sim/TestSimGenericDistributedQueue.java     |   39 +
 .../autoscaling/sim/TestSimLargeCluster.java    |  730 +++
 .../sim/TestSimNodeAddedTrigger.java            |  327 ++
 .../autoscaling/sim/TestSimNodeLostTrigger.java |  346 ++
 .../autoscaling/sim/TestSimPolicyCloud.java     |  369 ++
 .../sim/TestSimTriggerIntegration.java          | 1333 +++++
 .../cloud/autoscaling/sim/package-info.java     |   98 +
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |  898 ++++
 .../solr/cloud/cdcr/CdcrBidirectionalTest.java  |  237 +
 .../solr/cloud/cdcr/CdcrBootstrapTest.java      |  308 ++
 .../cloud/cdcr/CdcrOpsAndBoundariesTest.java    |  322 ++
 .../cloud/cdcr/CdcrReplicationHandlerTest.java  |  331 ++
 .../solr/cloud/cdcr/CdcrRequestHandlerTest.java |  178 +
 .../apache/solr/cloud/cdcr/CdcrTestsUtil.java   |  273 +
 .../cloud/cdcr/CdcrVersionReplicationTest.java  |  307 ++
 .../cloud/cdcr/CdcrWithNodesRestartsTest.java   |  344 ++
 .../solr/cloud/hdfs/HDFSCollectionsAPITest.java |   95 +
 .../cloud/hdfs/HdfsBasicDistributedZk2Test.java |   59 +
 .../cloud/hdfs/HdfsBasicDistributedZkTest.java  |   65 +
 .../hdfs/HdfsChaosMonkeyNothingIsSafeTest.java  |   69 +
 .../hdfs/HdfsChaosMonkeySafeLeaderTest.java     |   68 +
 .../solr/cloud/hdfs/HdfsNNFailoverTest.java     |   80 +
 .../solr/cloud/hdfs/HdfsRecoverLeaseTest.java   |  247 +
 .../solr/cloud/hdfs/HdfsRecoveryZkTest.java     |   59 +
 .../hdfs/HdfsRestartWhileUpdatingTest.java      |   63 +
 .../solr/cloud/hdfs/HdfsSyncSliceTest.java      |   57 +
 .../apache/solr/cloud/hdfs/HdfsTestUtil.java    |  242 +
 .../solr/cloud/hdfs/HdfsThreadLeakTest.java     |   84 +
 ...HdfsTlogReplayBufferedWhileIndexingTest.java |   65 +
 .../cloud/hdfs/HdfsUnloadDistributedZkTest.java |   57 +
 .../HdfsWriteToMultipleCollectionsTest.java     |  185 +
 .../apache/solr/cloud/hdfs/StressHdfsTest.java  |  247 +
 .../cloud/overseer/TestClusterStateMutator.java |   82 +
 .../solr/cloud/overseer/ZkStateReaderTest.java  |  284 +
 .../solr/cloud/overseer/ZkStateWriterTest.java  |  361 ++
 .../solr/cloud/rule/ImplicitSnitchTest.java     |  242 +
 .../apache/solr/cloud/rule/RuleEngineTest.java  |  315 ++
 .../org/apache/solr/cloud/rule/RulesTest.java   |  260 +
 .../common/cloud/ZkStateReaderAccessor.java     |   36 +
 .../solr/core/AlternateDirectoryTest.java       |   84 +
 .../solr/core/BlobRepositoryCloudTest.java      |  116 +
 .../solr/core/BlobRepositoryMockingTest.java    |  157 +
 .../solr/core/BlobStoreTestRequestHandler.java  |   62 +
 .../solr/core/CachingDirectoryFactoryTest.java  |  250 +
 .../core/ConfigureRecoveryStrategyTest.java     |  112 +
 .../org/apache/solr/core/CoreSorterTest.java    |  237 +
 .../solr/core/CountUsageValueSourceParser.java  |   86 +
 .../apache/solr/core/DirectoryFactoryTest.java  |  117 +
 .../solr/core/DummyValueSourceParser.java       |   58 +
 .../solr/core/ExitableDirectoryReaderTest.java  |  170 +
 .../apache/solr/core/FakeDeletionPolicy.java    |   60 +
 .../solr/core/HdfsDirectoryFactoryTest.java     |  238 +
 .../java/org/apache/solr/core/HelloStream.java  |  100 +
 .../org/apache/solr/core/MockEventListener.java |   57 +
 .../java/org/apache/solr/core/MockInfoBean.java |   75 +
 .../core/MockQuerySenderListenerReqHandler.java |   61 +
 .../solr/core/MockShardHandlerFactory.java      |   69 +
 .../solr/core/OpenCloseCoreStressTest.java      |  525 ++
 .../org/apache/solr/core/PluginInfoTest.java    |  165 +
 .../apache/solr/core/QueryResultKeyTest.java    |  201 +
 .../solr/core/RAMDirectoryFactoryTest.java      |   64 +
 .../apache/solr/core/RequestHandlersTest.java   |  121 +
 .../apache/solr/core/ResourceLoaderTest.java    |  222 +
 .../java/org/apache/solr/core/SOLR749Test.java  |  110 +
 .../core/SolrCoreCheckLockOnStartupTest.java    |  112 +
 .../java/org/apache/solr/core/SolrCoreTest.java |  358 ++
 .../solr/core/TestBackupRepositoryFactory.java  |  149 +
 .../org/apache/solr/core/TestBadConfig.java     |  102 +
 .../org/apache/solr/core/TestCodecSupport.java  |  230 +
 .../java/org/apache/solr/core/TestConfig.java   |  236 +
 .../org/apache/solr/core/TestConfigOverlay.java |   78 +
 .../solr/core/TestConfigSetImmutable.java       |  122 +
 .../solr/core/TestConfigSetProperties.java      |   93 +
 .../org/apache/solr/core/TestConfigSets.java    |  142 +
 .../org/apache/solr/core/TestCoreContainer.java |  647 +++
 .../org/apache/solr/core/TestCoreDiscovery.java |  576 ++
 .../solr/core/TestCorePropertiesReload.java     |   74 +
 .../org/apache/solr/core/TestCustomStream.java  |   75 +
 .../apache/solr/core/TestDirectoryFactory.java  |  106 +
 .../apache/solr/core/TestDynamicLoading.java    |  288 +
 .../org/apache/solr/core/TestDynamicURP.java    |  110 +
 .../solr/core/TestImplicitCoreProperties.java   |   76 +
 .../apache/solr/core/TestInfoStreamLogging.java |   36 +
 .../org/apache/solr/core/TestInitParams.java    |  132 +
 .../apache/solr/core/TestJmxIntegration.java    |  263 +
 .../org/apache/solr/core/TestLazyCores.java     |  870 +++
 .../apache/solr/core/TestMergePolicyConfig.java |  270 +
 .../java/org/apache/solr/core/TestNRTOpen.java  |  150 +
 .../solr/core/TestQuerySenderListener.java      |  101 +
 .../solr/core/TestQuerySenderNoQuery.java       |   84 +
 .../solr/core/TestReloadAndDeleteDocs.java      |   48 +
 .../solr/core/TestShardHandlerFactory.java      |   42 +
 .../apache/solr/core/TestSimpleTextCodec.java   |   63 +
 .../apache/solr/core/TestSolrConfigHandler.java |  868 +++
 .../solr/core/TestSolrDeletionPolicy1.java      |  136 +
 .../solr/core/TestSolrDeletionPolicy2.java      |   61 +
 .../apache/solr/core/TestSolrIndexConfig.java   |   66 +
 .../java/org/apache/solr/core/TestSolrXml.java  |  338 ++
 .../apache/solr/core/TestXIncludeConfig.java    |   83 +
 .../core/snapshots/TestSolrCloudSnapshots.java  |  323 ++
 .../core/snapshots/TestSolrCoreSnapshots.java   |  312 ++
 .../handler/AnalysisRequestHandlerTestBase.java |  123 +
 .../apache/solr/handler/BackupRestoreUtils.java |  104 +
 .../handler/BinaryUpdateRequestHandlerTest.java |   73 +
 .../solr/handler/CSVRequestHandlerTest.java     |   53 +
 .../apache/solr/handler/CheckBackupStatus.java  |   69 +
 .../DocumentAnalysisRequestHandlerTest.java     |  336 ++
 .../FieldAnalysisRequestHandlerTest.java        |  547 ++
 .../org/apache/solr/handler/JsonLoaderTest.java | 1084 ++++
 .../solr/handler/MoreLikeThisHandlerTest.java   |  188 +
 .../solr/handler/PingRequestHandlerTest.java    |  242 +
 .../apache/solr/handler/RequestLoggingTest.java |  102 +
 .../solr/handler/ResponseBuilderTest.java       |   46 +
 .../apache/solr/handler/SearchHandlerTest.java  |   80 +
 .../apache/solr/handler/TestBlobHandler.java    |  183 +
 .../org/apache/solr/handler/TestCSVLoader.java  |  328 ++
 .../apache/solr/handler/TestConfigReload.java   |  128 +
 .../org/apache/solr/handler/TestCoreBackup.java |   61 +
 .../solr/handler/TestHdfsBackupRestoreCore.java |  250 +
 .../solr/handler/TestReplicationHandler.java    | 1668 ++++++
 .../handler/TestReplicationHandlerBackup.java   |  316 ++
 .../apache/solr/handler/TestReqParamsAPI.java   |  300 ++
 .../apache/solr/handler/TestRestoreCore.java    |  248 +
 .../org/apache/solr/handler/TestSQLHandler.java | 2761 ++++++++++
 .../solr/handler/TestSQLHandlerNonCloud.java    |   92 +
 .../handler/TestSolrConfigHandlerCloud.java     |  275 +
 .../TestSolrConfigHandlerConcurrent.java        |  198 +
 .../solr/handler/TestSystemCollAutoCreate.java  |   29 +
 .../handler/ThrowErrorOnInitRequestHandler.java |   51 +
 .../solr/handler/V2ApiIntegrationTest.java      |  158 +
 .../apache/solr/handler/V2StandaloneTest.java   |   53 +
 .../handler/XmlUpdateRequestHandlerTest.java    |  233 +
 .../handler/XsltUpdateRequestHandlerTest.java   |  128 +
 .../handler/admin/AdminHandlersProxyTest.java   |  119 +
 .../admin/AutoscalingHistoryHandlerTest.java    |  439 ++
 .../admin/CoreAdminCreateDiscoverTest.java      |  277 +
 .../handler/admin/CoreAdminHandlerTest.java     |  444 ++
 .../handler/admin/CoreAdminOperationTest.java   |  681 +++
 .../admin/CoreAdminRequestStatusTest.java       |  102 +
 .../admin/CoreMergeIndexesAdminHandlerTest.java |  105 +
 .../solr/handler/admin/InfoHandlerTest.java     |  171 +
 .../solr/handler/admin/LoggingHandlerTest.java  |   73 +
 .../handler/admin/LukeRequestHandlerTest.java   |  263 +
 .../solr/handler/admin/MBeansHandlerTest.java   |  199 +
 .../solr/handler/admin/MetricsHandlerTest.java  |  321 ++
 .../admin/MetricsHistoryHandlerTest.java        |  128 +
 .../admin/PropertiesRequestHandlerTest.java     |   73 +
 .../SecurityConfHandlerLocalForTesting.java     |   39 +
 .../handler/admin/SecurityConfHandlerTest.java  |  277 +
 .../admin/SegmentsInfoRequestHandlerTest.java   |  123 +
 .../admin/ShowFileRequestHandlerTest.java       |  125 +
 .../solr/handler/admin/StatsReloadRaceTest.java |  146 +
 .../handler/admin/SystemInfoHandlerTest.java    |   53 +
 .../solr/handler/admin/TestApiFramework.java    |  219 +
 .../solr/handler/admin/TestCollectionAPIs.java  |  308 ++
 .../solr/handler/admin/TestConfigsApi.java      |   59 +
 .../solr/handler/admin/TestCoreAdminApis.java   |  106 +
 .../admin/ZookeeperStatusHandlerTest.java       |   86 +
 .../handler/component/BadComponentTest.java     |   41 +
 .../component/CustomHighlightComponentTest.java |  306 ++
 .../handler/component/DebugComponentTest.java   |  280 +
 .../DistributedDebugComponentTest.java          |  445 ++
 .../DistributedExpandComponentTest.java         |  229 +
 .../DistributedFacetExistsSmallTest.java        |  236 +
 .../DistributedFacetPivotLargeTest.java         |  974 ++++
 .../DistributedFacetPivotLongTailTest.java      |  324 ++
 .../DistributedFacetPivotSmallAdvancedTest.java |  249 +
 .../DistributedFacetPivotSmallTest.java         | 1680 ++++++
 .../DistributedFacetPivotWhiteBoxTest.java      |  139 +
 .../component/DistributedMLTComponentTest.java  |  188 +
 ...DistributedQueryComponentCustomSortTest.java |  122 +
 ...stributedQueryComponentOptimizationTest.java |  343 ++
 .../DistributedQueryElevationComponentTest.java |  136 +
 .../DistributedSpellCheckComponentTest.java     |  228 +
 .../DistributedSuggestComponentTest.java        |  140 +
 .../DistributedTermsComponentTest.java          |   60 +
 .../component/DummyCustomParamSpellChecker.java |   69 +
 .../handler/component/FacetPivotSmallTest.java  |  511 ++
 .../handler/component/InfixSuggestersTest.java  |  155 +
 .../PhrasesIdentificationComponentTest.java     |  796 +++
 .../component/QueryElevationComponentTest.java  |  797 +++
 .../component/ReplicaListTransformerTest.java   |  163 +
 .../component/ResourceSharingTestComponent.java |  144 +
 .../component/ResponseLogComponentTest.java     |   86 +
 .../handler/component/SearchHandlerTest.java    |  273 +
 .../ShufflingReplicaListTransformerTest.java    |   76 +
 .../component/SpellCheckComponentTest.java      |  358 ++
 .../handler/component/StatsComponentTest.java   | 1995 +++++++
 .../SuggestComponentContextFilterQueryTest.java |  257 +
 .../handler/component/SuggestComponentTest.java |  525 ++
 .../TermVectorComponentDistributedTest.java     |  237 +
 .../component/TermVectorComponentTest.java      |  331 ++
 .../handler/component/TermsComponentTest.java   |  514 ++
 ...estDistributedStatsComponentCardinality.java |  291 +
 .../handler/component/TestExpandComponent.java  |  334 ++
 .../component/TestHttpShardHandlerFactory.java  |  221 +
 .../handler/component/TestPivotHelperCode.java  |  116 +
 .../TestTrackingShardHandlerFactory.java        |  133 +
 .../solr/handler/export/TestExportWriter.java   |  796 +++
 .../solr/handler/loader/JavabinLoaderTest.java  |   91 +
 .../tagger/EmbeddedSolrNoSerializeTest.java     |  154 +
 .../handler/tagger/RandomizedTaggerTest.java    |  150 +
 .../apache/solr/handler/tagger/Tagger2Test.java |  176 +
 .../apache/solr/handler/tagger/TaggerTest.java  |  296 +
 .../solr/handler/tagger/TaggerTestCase.java     |  251 +
 .../handler/tagger/TaggingAttributeTest.java    |   73 +
 .../handler/tagger/WordLengthTaggingFilter.java |  110 +
 .../tagger/WordLengthTaggingFilterFactory.java  |   67 +
 .../handler/tagger/XmlInterpolationTest.java    |  224 +
 .../apache/solr/highlight/DummyHighlighter.java |   37 +
 .../highlight/FastVectorHighlighterTest.java    |   92 +
 .../solr/highlight/HighlighterConfigTest.java   |   77 +
 .../highlight/HighlighterMaxOffsetTest.java     |  119 +
 .../apache/solr/highlight/HighlighterTest.java  | 1250 +++++
 .../highlight/TestPostingsSolrHighlighter.java  |  189 +
 .../highlight/TestUnifiedSolrHighlighter.java   |  295 +
 .../solr/index/DummyMergePolicyFactory.java     |   33 +
 .../index/TestSlowCompositeReaderWrapper.java   |  150 +
 .../index/UninvertDocValuesMergePolicyTest.java |  242 +
 .../index/WrapperMergePolicyFactoryTest.java    |  135 +
 .../solr/index/hdfs/CheckHdfsIndexTest.java     |  140 +
 .../apache/solr/internal/csv/CSVParserTest.java |  583 ++
 .../solr/internal/csv/CSVPrinterTest.java       |  195 +
 .../solr/internal/csv/CSVStrategyTest.java      |   89 +
 .../apache/solr/internal/csv/CSVUtilsTest.java  |  150 +
 .../solr/internal/csv/CharBufferTest.java       |  190 +
 .../csv/ExtendedBufferedReaderTest.java         |  220 +
 .../csv/writer/CSVConfigGuesserTest.java        |   87 +
 .../solr/internal/csv/writer/CSVConfigTest.java |   98 +
 .../solr/internal/csv/writer/CSVFieldTest.java  |   46 +
 .../solr/internal/csv/writer/CSVWriterTest.java |   54 +
 .../org/apache/solr/legacy/TestLegacyField.java |  186 +
 .../solr/legacy/TestLegacyFieldReuse.java       |   81 +
 .../solr/legacy/TestLegacyNumericUtils.java     |  571 ++
 .../org/apache/solr/legacy/TestLegacyTerms.java |  159 +
 .../TestMultiValuedNumericRangeQuery.java       |   84 +
 .../solr/legacy/TestNumericRangeQuery32.java    |  461 ++
 .../solr/legacy/TestNumericRangeQuery64.java    |  490 ++
 .../solr/legacy/TestNumericTokenStream.java     |  188 +
 .../org/apache/solr/logging/TestLogWatcher.java |   80 +
 .../org/apache/solr/metrics/JvmMetricsTest.java |  151 +
 .../apache/solr/metrics/MetricsConfigTest.java  |  126 +
 .../solr/metrics/MockCounterSupplier.java       |   36 +
 .../solr/metrics/MockHistogramSupplier.java     |   40 +
 .../apache/solr/metrics/MockMeterSupplier.java  |   36 +
 .../apache/solr/metrics/MockTimerSupplier.java  |   46 +
 .../solr/metrics/SolrCoreMetricManagerTest.java |  164 +
 .../solr/metrics/SolrMetricManagerTest.java     |  256 +
 .../solr/metrics/SolrMetricReporterTest.java    |   70 +
 .../solr/metrics/SolrMetricTestUtils.java       |  103 +
 .../metrics/SolrMetricsIntegrationTest.java     |  190 +
 .../metrics/reporters/MockMetricReporter.java   |   81 +
 .../reporters/SolrGangliaReporterTest.java      |   84 +
 .../reporters/SolrGraphiteReporterTest.java     |  118 +
 .../reporters/SolrJmxReporterCloudTest.java     |  114 +
 .../metrics/reporters/SolrJmxReporterTest.java  |  244 +
 .../reporters/SolrSlf4jReporterTest.java        |  106 +
 .../reporters/solr/SolrCloudReportersTest.java  |  185 +
 .../reporters/solr/SolrShardReporterTest.java   |  118 +
 .../metrics/rrd/SolrRrdBackendFactoryTest.java  |  201 +
 .../solr/request/RegexBytesRefFilterTest.java   |   34 +
 .../apache/solr/request/SimpleFacetsTest.java   | 3523 ++++++++++++
 .../request/SubstringBytesRefFilterTest.java    |   51 +
 .../apache/solr/request/TestFacetMethods.java   |  226 +
 .../org/apache/solr/request/TestFaceting.java   |  904 ++++
 .../solr/request/TestIntervalFaceting.java      | 1207 +++++
 .../solr/request/TestRemoteStreaming.java       |  136 +
 .../org/apache/solr/request/TestStreamBody.java |  138 +
 .../request/TestUnInvertedFieldException.java   |  129 +
 .../org/apache/solr/request/TestWriterPerf.java |  199 +
 .../solr/request/macro/TestMacroExpander.java   |  129 +
 .../apache/solr/request/macro/TestMacros.java   |   89 +
 .../apache/solr/response/JSONWriterTest.java    |  304 ++
 .../apache/solr/response/SmileWriterTest.java   |  258 +
 .../solr/response/TestBinaryResponseWriter.java |  107 +
 .../solr/response/TestCSVResponseWriter.java    |  257 +
 .../solr/response/TestCustomDocTransformer.java |  128 +
 .../response/TestGeoJSONResponseWriter.java     |  279 +
 .../response/TestGraphMLResponseWriter.java     |  155 +
 .../response/TestJavabinTupleStreamParser.java  |  202 +
 .../TestPHPSerializedResponseWriter.java        |  105 +
 .../apache/solr/response/TestPushWriter.java    |   85 +
 .../solr/response/TestRawResponseWriter.java    |  202 +
 .../solr/response/TestRawTransformer.java       |   70 +
 .../response/TestRetrieveFieldsOptimizer.java   |  648 +++
 .../solr/response/TestSolrQueryResponse.java    |  293 +
 .../transform/TestChildDocTransformer.java      |  472 ++
 .../TestChildDocTransformerHierarchy.java       |  387 ++
 .../transform/TestSubQueryTransformer.java      |  568 ++
 .../TestSubQueryTransformerCrossCore.java       |  131 +
 .../TestSubQueryTransformerDistrib.java         |  250 +
 .../apache/solr/rest/SolrRestletTestBase.java   |   67 +
 .../apache/solr/rest/TestManagedResource.java   |  327 ++
 .../solr/rest/TestManagedResourceStorage.java   |  140 +
 .../org/apache/solr/rest/TestRestManager.java   |  280 +
 .../solr/rest/schema/TestBulkSchemaAPI.java     | 1029 ++++
 .../schema/TestCopyFieldCollectionResource.java |  137 +
 .../TestDynamicFieldCollectionResource.java     |   63 +
 .../rest/schema/TestDynamicFieldResource.java   |   72 +
 .../schema/TestFieldCollectionResource.java     |   89 +
 .../solr/rest/schema/TestFieldResource.java     |   99 +
 .../schema/TestFieldTypeCollectionResource.java |   38 +
 .../solr/rest/schema/TestFieldTypeResource.java |   90 +
 .../rest/schema/TestSchemaNameResource.java     |   28 +
 .../solr/rest/schema/TestSchemaResource.java    |  182 +
 .../schema/TestSchemaSimilarityResource.java    |   35 +
 .../rest/schema/TestSchemaVersionResource.java  |   29 +
 .../TestSerializedLuceneMatchVersion.java       |   71 +
 .../rest/schema/TestUniqueKeyFieldResource.java |   30 +
 .../analysis/TestManagedStopFilterFactory.java  |  217 +
 .../TestManagedSynonymFilterFactory.java        |  295 +
 .../TestManagedSynonymGraphFilterFactory.java   |  298 +
 .../apache/solr/schema/BadCopyFieldTest.java    |   57 +
 .../apache/solr/schema/BadIndexSchemaTest.java  |  188 +
 .../apache/solr/schema/BooleanFieldTest.java    |   78 +
 .../solr/schema/ChangedSchemaMergeTest.java     |  201 +
 .../org/apache/solr/schema/CopyFieldTest.java   |  263 +
 .../solr/schema/CurrencyFieldTypeTest.java      |  729 +++
 .../solr/schema/CustomAnalyzerStrField.java     |   73 +
 .../org/apache/solr/schema/DateFieldTest.java   |   64 +
 .../apache/solr/schema/DateRangeFieldTest.java  |  128 +
 .../solr/schema/DocValuesMissingTest.java       |  547 ++
 .../apache/solr/schema/DocValuesMultiTest.java  |  347 ++
 .../org/apache/solr/schema/DocValuesTest.java   |  749 +++
 .../org/apache/solr/schema/EnumFieldTest.java   |  573 ++
 .../solr/schema/ExternalFileFieldSortTest.java  |   64 +
 .../schema/IndexSchemaRuntimeFieldTest.java     |   71 +
 .../org/apache/solr/schema/IndexSchemaTest.java |  132 +
 ...IntPointPrefixActsAsRangeQueryFieldType.java |   34 +
 .../ManagedSchemaRoundRobinCloudTest.java       |   98 +
 .../solr/schema/MockExchangeRateProvider.java   |   91 +
 .../org/apache/solr/schema/MultiTermTest.java   |   93 +
 .../apache/solr/schema/MyCrazyCustomField.java  |   60 +
 .../solr/schema/NotRequiredUniqueKeyTest.java   |   49 +
 .../apache/solr/schema/NumericFieldsTest.java   |  240 +
 .../OpenExchangeRatesOrgProviderTest.java       |  119 +
 .../org/apache/solr/schema/PolyFieldTest.java   |  187 +
 .../PreAnalyzedFieldManagedSchemaCloudTest.java |   73 +
 .../solr/schema/PreAnalyzedFieldTest.java       |  256 +
 .../solr/schema/PrimitiveFieldTypeTest.java     |  131 +
 .../apache/solr/schema/RequiredFieldsTest.java  |  142 +
 .../solr/schema/SchemaApiFailureTest.java       |   63 +
 .../SchemaVersionSpecificBehaviorTest.java      |  229 +
 .../apache/solr/schema/SchemaWatcherTest.java   |   59 +
 .../apache/solr/schema/SortableBinaryField.java |   87 +
 .../solr/schema/SpatialRPTFieldTypeTest.java    |  284 +
 .../solr/schema/SynonymTokenizerTest.java       |   43 +
 .../org/apache/solr/schema/TestBinaryField.java |  173 +
 .../solr/schema/TestBulkSchemaConcurrent.java   |  315 ++
 .../solr/schema/TestCloudManagedSchema.java     |  105 +
 .../apache/solr/schema/TestCloudSchemaless.java |  178 +
 .../apache/solr/schema/TestCollationField.java  |  195 +
 .../schema/TestCollationFieldDocValues.java     |  194 +
 .../solr/schema/TestHalfAndHalfDocValues.java   |  131 +
 .../apache/solr/schema/TestManagedSchema.java   |  472 ++
 .../solr/schema/TestManagedSchemaAPI.java       |  102 +
 .../schema/TestManagedSchemaThreadSafety.java   |  189 +
 .../apache/solr/schema/TestOmitPositions.java   |   59 +
 .../org/apache/solr/schema/TestPointFields.java | 3942 ++++++++++++++
 .../apache/solr/schema/TestSchemaManager.java   |   73 +
 .../schema/TestSchemalessBufferedUpdates.java   |  159 +
 .../solr/schema/TestSortableTextField.java      |  562 ++
 .../solr/schema/TestUseDocValuesAsStored.java   |  429 ++
 .../solr/schema/TestUseDocValuesAsStored2.java  |  185 +
 .../solr/schema/ThrowErrorOnInitFieldType.java  |   29 +
 .../TrieIntPrefixActsAsRangeQueryFieldType.java |   36 +
 .../org/apache/solr/schema/UUIDFieldTest.java   |   67 +
 .../solr/schema/WrappedIntPointField.java       |   46 +
 .../apache/solr/schema/WrappedTrieIntField.java |   31 +
 .../solr/search/AnalyticsMergeStrategyTest.java |  122 +
 .../apache/solr/search/AnalyticsQueryTest.java  |   84 +
 .../solr/search/AnalyticsTestQParserPlugin.java |  171 +
 .../ApacheLuceneSolrNearQueryBuilder.java       |   54 +
 .../solr/search/ChooseOneWordQueryBuilder.java  |   62 +
 .../search/CurrencyRangeFacetCloudTest.java     |  486 ++
 .../org/apache/solr/search/CursorMarkTest.java  |  292 +
 .../solr/search/DelayingSearchComponent.java    |   51 +
 .../java/org/apache/solr/search/DocSetPerf.java |  173 +
 .../apache/solr/search/FooQParserPlugin.java    |   42 +
 .../apache/solr/search/GoodbyeQueryBuilder.java |   38 +
 .../apache/solr/search/HandyQueryBuilder.java   |   69 +
 .../apache/solr/search/HelloQueryBuilder.java   |   38 +
 .../org/apache/solr/search/LargeFieldTest.java  |  126 +
 .../apache/solr/search/MergeStrategyTest.java   |  177 +
 .../apache/solr/search/MockSearchComponent.java |   49 +
 .../apache/solr/search/QueryEqualityTest.java   | 1279 +++++
 .../apache/solr/search/QueryParsingTest.java    |   97 +
 .../org/apache/solr/search/RankQueryTest.java   |  113 +
 .../apache/solr/search/ReturnFieldsTest.java    |  561 ++
 .../SignificantTermsQParserPluginTest.java      |   35 +
 .../apache/solr/search/SortSpecParsingTest.java |  245 +
 .../apache/solr/search/SpatialFilterTest.java   |  207 +
 .../solr/search/TestAddFieldRealTimeGet.java    |   88 +
 .../solr/search/TestCollapseQParserPlugin.java  |  955 ++++
 .../TestComplexPhraseLeadingWildcard.java       |  113 +
 .../search/TestComplexPhraseQParserPlugin.java  |  396 ++
 .../apache/solr/search/TestComponentsName.java  |   72 +
 .../org/apache/solr/search/TestCustomSort.java  |  125 +
 .../java/org/apache/solr/search/TestDocSet.java |  572 ++
 .../solr/search/TestElisionMultitermQuery.java  |   51 +
 .../solr/search/TestExtendedDismaxParser.java   | 2123 ++++++++
 .../apache/solr/search/TestFastLRUCache.java    |  409 ++
 .../apache/solr/search/TestFieldSortValues.java |   53 +
 .../solr/search/TestFilteredDocIdSet.java       |  222 +
 .../org/apache/solr/search/TestFiltering.java   |  549 ++
 .../solr/search/TestFoldingMultitermQuery.java  |  349 ++
 .../search/TestGraphTermsQParserPlugin.java     |  123 +
 .../solr/search/TestHashQParserPlugin.java      |  312 ++
 .../apache/solr/search/TestIndexSearcher.java   |  484 ++
 .../org/apache/solr/search/TestInitQParser.java |   62 +
 .../org/apache/solr/search/TestLFUCache.java    |  548 ++
 .../org/apache/solr/search/TestLRUCache.java    |  192 +
 .../TestLegacyNumericRangeQueryBuilder.java     |  179 +
 .../solr/search/TestMaxScoreQueryParser.java    |  166 +
 .../apache/solr/search/TestMissingGroups.java   |  179 +
 .../solr/search/TestMultiWordSynonyms.java      |  100 +
 .../apache/solr/search/TestNoOpRegenerator.java |   60 +
 ...OverriddenPrefixQueryForCustomFieldType.java |  153 +
 .../search/TestPayloadCheckQParserPlugin.java   |   73 +
 .../search/TestPayloadScoreQParserPlugin.java   |   62 +
 .../solr/search/TestPseudoReturnFields.java     |  753 +++
 .../org/apache/solr/search/TestQueryTypes.java  |  484 ++
 .../org/apache/solr/search/TestQueryUtils.java  |  288 +
 .../org/apache/solr/search/TestRTGBase.java     |  129 +
 .../search/TestRandomCollapseQParserPlugin.java |  212 +
 .../org/apache/solr/search/TestRangeQuery.java  |  705 +++
 .../apache/solr/search/TestRankQueryPlugin.java |  783 +++
 .../solr/search/TestReRankQParserPlugin.java    |  650 +++
 .../org/apache/solr/search/TestRealTimeGet.java |  798 +++
 .../org/apache/solr/search/TestRecovery.java    | 1718 ++++++
 .../apache/solr/search/TestRecoveryHdfs.java    | 1063 ++++
 .../java/org/apache/solr/search/TestReload.java |  101 +
 .../apache/solr/search/TestReloadDeadlock.java  |  239 +
 .../org/apache/solr/search/TestSearchPerf.java  |  249 +
 .../apache/solr/search/TestSearcherReuse.java   |  256 +
 .../solr/search/TestSimpleQParserPlugin.java    |  232 +
 .../apache/solr/search/TestSmileRequest.java    |  110 +
 .../apache/solr/search/TestSolr4Spatial.java    |  440 ++
 .../apache/solr/search/TestSolr4Spatial2.java   |  336 ++
 .../apache/solr/search/TestSolrCoreParser.java  |  218 +
 .../solr/search/TestSolrFieldCacheBean.java     |   97 +
 .../java/org/apache/solr/search/TestSolrJ.java  |  183 +
 .../apache/solr/search/TestSolrQueryParser.java | 1128 ++++
 .../java/org/apache/solr/search/TestSort.java   |  372 ++
 .../solr/search/TestStandardQParsers.java       |   91 +
 .../apache/solr/search/TestStressLucene.java    |  375 ++
 .../apache/solr/search/TestStressRecovery.java  |  417 ++
 .../apache/solr/search/TestStressReorder.java   |  335 ++
 .../solr/search/TestStressUserVersions.java     |  325 ++
 .../apache/solr/search/TestStressVersions.java  |  281 +
 .../solr/search/TestSurroundQueryParser.java    |  107 +
 .../org/apache/solr/search/TestTrieFacet.java   |  205 +
 .../solr/search/TestValueSourceCache.java       |  134 +
 .../org/apache/solr/search/TestXmlQParser.java  |   42 +
 .../solr/search/TestXmlQParserPlugin.java       |   78 +
 .../org/apache/solr/search/facet/DebugAgg.java  |  221 +
 ...ibutedFacetSimpleRefinementLongTailTest.java |  443 ++
 .../solr/search/facet/RangeFacetCloudTest.java  |  786 +++
 .../search/facet/SpatialHeatmapFacetsTest.java  |  349 ++
 .../search/facet/TestJsonFacetRefinement.java   | 1299 +++++
 .../solr/search/facet/TestJsonFacets.java       | 2663 +++++++++
 .../facet/TestJsonFacetsStatsParsing.java       |  106 +
 .../facet/TestJsonFacetsWithNestedObjects.java  |  363 ++
 .../search/function/NvlValueSourceParser.java   |   80 +
 .../search/function/SortByFunctionTest.java     |  215 +
 .../solr/search/function/TestFunctionQuery.java | 1069 ++++
 .../function/TestMinMaxOnMultiValuedField.java  |  927 ++++
 .../solr/search/function/TestOrdValues.java     |  309 ++
 .../function/TestSortByMinMaxFunction.java      |   35 +
 .../function/distance/DistanceFunctionTest.java |  242 +
 .../apache/solr/search/join/BJQParserTest.java  |  483 ++
 .../search/join/BlockJoinFacetDistribTest.java  |  235 +
 .../search/join/BlockJoinFacetRandomTest.java   |  635 +++
 .../search/join/BlockJoinFacetSimpleTest.java   |  121 +
 .../apache/solr/search/join/GraphQueryTest.java |  143 +
 .../search/join/TestCloudNestedDocsSort.java    |  193 +
 .../solr/search/join/TestNestedDocsSort.java    |  150 +
 .../search/join/TestScoreJoinQPNoScore.java     |  369 ++
 .../solr/search/join/TestScoreJoinQPScore.java  |  341 ++
 .../solr/search/json/TestJsonRequest.java       |  539 ++
 .../solr/search/mlt/CloudMLTQParserTest.java    |  250 +
 .../solr/search/mlt/SimpleMLTQParserTest.java   |  168 +
 .../similarities/BaseSimilarityTestCase.java    |   54 +
 .../similarities/CustomSimilarityFactory.java   |   27 +
 .../MockConfigurableSimilarity.java             |   31 +
 .../similarities/TestBM25SimilarityFactory.java |   45 +
 .../TestClassicSimilarityFactory.java           |   43 +
 .../similarities/TestDFISimilarityFactory.java  |   55 +
 .../similarities/TestDFRSimilarityFactory.java  |   71 +
 .../similarities/TestIBSimilarityFactory.java   |   59 +
 .../TestLMDirichletSimilarityFactory.java       |   44 +
 .../TestLMJelinekMercerSimilarityFactory.java   |   44 +
 .../TestNonDefinedSimilarityFactory.java        |   42 +
 .../similarities/TestPerFieldSimilarity.java    |   75 +
 ...stPerFieldSimilarityWithDefaultOverride.java |   73 +
 .../TestSweetSpotSimilarityFactory.java         |  152 +
 .../solr/search/stats/TestBaseStatsCache.java   |   68 +
 .../search/stats/TestDefaultStatsCache.java     |  112 +
 .../solr/search/stats/TestDistribIDF.java       |  257 +
 .../search/stats/TestExactSharedStatsCache.java |   25 +
 .../solr/search/stats/TestExactStatsCache.java  |   24 +
 .../solr/search/stats/TestLRUStatsCache.java    |   24 +
 .../solr/security/BasicAuthIntegrationTest.java |  320 ++
 .../solr/security/BasicAuthStandaloneTest.java  |  221 +
 .../HttpParamDelegationTokenPlugin.java         |  272 +
 .../solr/security/MockAuthenticationPlugin.java |   80 +
 .../solr/security/MockAuthorizationPlugin.java  |   67 +
 .../PKIAuthenticationIntegrationTest.java       |  108 +
 .../security/TestAuthorizationFramework.java    |  119 +
 .../security/TestPKIAuthenticationPlugin.java   |  166 +
 .../TestRuleBasedAuthorizationPlugin.java       |  468 ++
 .../TestSha256AuthenticationProvider.java       |   67 +
 .../solr/security/hadoop/ImpersonationUtil.java |   73 +
 .../hadoop/ImpersonatorCollectionsHandler.java  |   60 +
 .../hadoop/TestDelegationWithHadoopAuth.java    |  402 ++
 .../hadoop/TestImpersonationWithHadoopAuth.java |  215 +
 .../TestSolrCloudWithHadoopAuthPlugin.java      |  139 +
 .../hadoop/TestZkAclsWithHadoopAuth.java        |  165 +
 .../apache/solr/servlet/CacheHeaderTest.java    |  269 +
 .../solr/servlet/CacheHeaderTestBase.java       |  151 +
 .../solr/servlet/DirectSolrConnectionTest.java  |   90 +
 .../solr/servlet/HttpSolrCallGetCoreTest.java   |  168 +
 .../apache/solr/servlet/NoCacheHeaderTest.java  |  175 +
 .../apache/solr/servlet/ResponseHeaderTest.java |   92 +
 .../solr/servlet/SolrRequestParserTest.java     |  510 ++
 .../ConjunctionSolrSpellCheckerTest.java        |   96 +
 .../spelling/DirectSolrSpellCheckerTest.java    |   93 +
 .../spelling/FileBasedSpellCheckerTest.java     |  179 +
 .../spelling/IndexBasedSpellCheckerTest.java    |  332 ++
 .../apache/solr/spelling/SampleComparator.java  |   35 +
 .../solr/spelling/SimpleQueryConverter.java     |   70 +
 .../solr/spelling/SpellCheckCollatorTest.java   |  635 +++
 .../SpellCheckCollatorWithCollapseTest.java     |   67 +
 .../spelling/SpellPossibilityIteratorTest.java  |  225 +
 .../spelling/SpellingQueryConverterTest.java    |  224 +
 .../spelling/TestSuggestSpellingConverter.java  |   73 +
 .../spelling/WordBreakSolrSpellCheckerTest.java |  331 ++
 .../suggest/RandomTestDictionaryFactory.java    |  117 +
 .../solr/spelling/suggest/SuggesterFSTTest.java |   23 +
 .../solr/spelling/suggest/SuggesterTSTTest.java |   23 +
 .../solr/spelling/suggest/SuggesterTest.java    |  114 +
 .../spelling/suggest/SuggesterWFSTTest.java     |   23 +
 .../suggest/TestAnalyzeInfixSuggestions.java    |  117 +
 .../suggest/TestAnalyzedSuggestions.java        |   58 +
 .../suggest/TestBlendedInfixSuggestions.java    |  135 +
 .../suggest/TestFileDictionaryLookup.java       |   60 +
 .../suggest/TestFreeTextSuggestions.java        |   57 +
 .../suggest/TestFuzzyAnalyzedSuggestions.java   |  112 +
 .../TestHighFrequencyDictionaryFactory.java     |   80 +
 .../spelling/suggest/TestPhraseSuggestions.java |   45 +
 .../solr/store/blockcache/BlockCacheTest.java   |  357 ++
 .../store/blockcache/BlockDirectoryTest.java    |  293 +
 .../solr/store/blockcache/BufferStoreTest.java  |  101 +
 .../solr/store/hdfs/HdfsDirectoryTest.java      |  235 +
 .../solr/store/hdfs/HdfsLockFactoryTest.java    |   81 +
 .../solr/uninverting/TestDocTermOrds.java       |  687 +++
 .../TestDocTermOrdsUninvertLimit.java           |   89 +
 .../apache/solr/uninverting/TestFieldCache.java |  730 +++
 .../solr/uninverting/TestFieldCacheReopen.java  |   72 +
 .../solr/uninverting/TestFieldCacheSort.java    | 1811 +++++++
 .../uninverting/TestFieldCacheSortRandom.java   |  325 ++
 .../uninverting/TestFieldCacheVsDocValues.java  |  517 ++
 .../uninverting/TestFieldCacheWithThreads.java  |  256 +
 .../solr/uninverting/TestLegacyFieldCache.java  |  473 ++
 .../solr/uninverting/TestNumericTerms32.java    |  156 +
 .../solr/uninverting/TestNumericTerms64.java    |  166 +
 .../solr/uninverting/TestUninvertingReader.java |  403 ++
 .../apache/solr/update/AddBlockUpdateTest.java  |  910 ++++
 .../solr/update/AnalysisErrorHandlingTest.java  |   50 +
 .../apache/solr/update/CdcrUpdateLogTest.java   |  774 +++
 .../solr/update/DataDrivenBlockJoinTest.java    |   70 +
 .../solr/update/DirectUpdateHandlerTest.java    |  467 ++
 .../apache/solr/update/DocumentBuilderTest.java |  295 +
 .../apache/solr/update/DummyMergePolicy.java    |   33 +
 .../solr/update/MaxSizeAutoCommitTest.java      |  233 +
 .../solr/update/MockStreamingSolrClients.java   |   96 +
 .../org/apache/solr/update/PeerSyncTest.java    |  352 ++
 .../update/PeerSyncWithBufferUpdatesTest.java   |  223 +
 ...PeerSyncWithIndexFingerprintCachingTest.java |  108 +
 ...ithLeaderAndIndexFingerprintCachingTest.java |   36 +
 .../solr/update/PeerSyncWithLeaderTest.java     |   39 +
 .../apache/solr/update/SoftAutoCommitTest.java  |  590 ++
 .../solr/update/SolrCmdDistributorTest.java     |  842 +++
 .../apache/solr/update/SolrIndexConfigTest.java |  210 +
 .../solr/update/SolrIndexMetricsTest.java       |  126 +
 .../solr/update/SolrIndexSplitterTest.java      |  382 ++
 .../solr/update/TestAtomicUpdateErrorCases.java |  101 +
 .../update/TestDocBasedVersionConstraints.java  |  544 ++
 .../solr/update/TestExceedMaxTermLength.java    |  150 +
 .../apache/solr/update/TestHdfsUpdateLog.java   |  148 +
 .../solr/update/TestInPlaceUpdatesDistrib.java  | 1247 +++++
 .../update/TestInPlaceUpdatesStandalone.java    | 1103 ++++
 .../solr/update/TestIndexingPerformance.java    |  129 +
 .../solr/update/TestNestedUpdateProcessor.java  |  195 +
 .../java/org/apache/solr/update/TestUpdate.java |  259 +
 .../apache/solr/update/TransactionLogTest.java  |   44 +
 .../org/apache/solr/update/UpdateLogTest.java   |  263 +
 .../apache/solr/update/UpdateParamsTest.java    |   76 +
 .../org/apache/solr/update/VersionInfoTest.java |  172 +
 ...dSchemaFieldsUpdateProcessorFactoryTest.java |  310 ++
 .../AtomicUpdateProcessorFactoryTest.java       |  279 +
 .../update/processor/AtomicUpdatesTest.java     | 1347 +++++
 ...lassificationUpdateProcessorFactoryTest.java |  136 +
 ...ificationUpdateProcessorIntegrationTest.java |  192 +
 .../ClassificationUpdateProcessorTest.java      |  507 ++
 .../CloneFieldUpdateProcessorFactoryTest.java   |  370 ++
 .../processor/CustomUpdateRequestProcessor.java |   30 +
 .../CustomUpdateRequestProcessorFactory.java    |   44 +
 .../DefaultValueUpdateProcessorTest.java        |  154 +
 .../DistributedUpdateProcessorTest.java         |   54 +
 ...DocExpirationUpdateProcessorFactoryTest.java |  221 +
 .../FieldMutatingUpdateProcessorTest.java       |  833 +++
 ...ommitOptimizeUpdateProcessorFactoryTest.java |   84 +
 ...IgnoreLargeDocumentProcessorFactoryTest.java |  181 +
 .../ParsingFieldUpdateProcessorsTest.java       | 1053 ++++
 .../PreAnalyzedUpdateProcessorTest.java         |  119 +
 .../RecordingUpdateProcessorFactory.java        |  118 +
 .../processor/RegexBoostProcessorTest.java      |  126 +
 .../solr/update/processor/RuntimeUrp.java       |   40 +
 .../solr/update/processor/ScriptEngineTest.java |  110 +
 .../SignatureUpdateProcessorFactoryTest.java    |  358 ++
 ...ipExistingDocumentsProcessorFactoryTest.java |  342 ++
 ...atelessScriptUpdateProcessorFactoryTest.java |  275 +
 .../processor/TemplateUpdateProcessorTest.java  |  101 +
 .../processor/TestNamedUpdateProcessors.java    |  168 +
 .../TestPartialUpdateDeduplication.java         |   68 +
 .../TimeRoutedAliasUpdateProcessorTest.java     |  715 +++
 .../processor/TolerantUpdateProcessorTest.java  |  448 ++
 .../TrackingUpdateProcessorFactory.java         |  156 +
 .../processor/URLClassifyProcessorTest.java     |  104 +
 .../UUIDUpdateProcessorFallbackTest.java        |  208 +
 .../UniqFieldsUpdateProcessorFactoryTest.java   |  121 +
 .../UpdateRequestProcessorFactoryTest.java      |  184 +
 .../java/org/apache/solr/util/BitSetPerf.java   |  195 +
 .../org/apache/solr/util/CircularListTest.java  |   48 +
 .../java/org/apache/solr/util/DOMUtilTest.java  |   57 +
 .../apache/solr/util/DateMathParserTest.java    |  441 ++
 .../org/apache/solr/util/DistanceUnitsTest.java |   28 +
 .../org/apache/solr/util/FileUtilsTest.java     |   32 +
 .../org/apache/solr/util/MockCoreContainer.java |   40 +
 .../solr/util/MockSearchableSolrClient.java     |  117 +
 .../org/apache/solr/util/NumberUtilsTest.java   |   40 +
 .../apache/solr/util/OrderedExecutorTest.java   |  105 +
 .../org/apache/solr/util/PrimUtilsTest.java     |   92 +
 .../apache/solr/util/SimplePostToolTest.java    |  250 +
 .../apache/solr/util/SolrPluginUtilsTest.java   |  451 ++
 .../apache/solr/util/TestFastOutputStream.java  |   83 +
 .../solr/util/TestMaxTokenLenTokenizer.java     |  119 +
 .../solr/util/TestObjectReleaseTracker.java     |   62 +
 .../org/apache/solr/util/TestRTimerTree.java    |   87 +
 .../apache/solr/util/TestSafeXMLParsing.java    |   99 +
 .../apache/solr/util/TestSolrCLIRunExample.java |  657 +++
 .../apache/solr/util/TestSystemIdResolver.java  |  106 +
 .../org/apache/solr/util/TestTestInjection.java |  105 +
 .../java/org/apache/solr/util/TestUtils.java    |  324 ++
 .../org/apache/solr/util/TimeZoneUtilsTest.java |  134 +
 .../org/apache/solr/util/UtilsToolTest.java     |  189 +
 .../configuration/SSLConfigurationsTest.java    |  268 +
 .../SSLCredentialProviderFactoryTest.java       |   90 +
 .../providers/EnvSSLCredentialProviderTest.java |   61 +
 .../HadoopSSLCredentialProviderTest.java        |   74 +
 .../SysPropSSLCredentialProviderTest.java       |   66 +
 .../BigEndianAscendingWordDeserializerTest.java |  188 +
 .../BigEndianAscendingWordSerializerTest.java   |  336 ++
 .../org/apache/solr/util/hll/BitVectorTest.java |  169 +
 .../apache/solr/util/hll/ExplicitHLLTest.java   |  234 +
 .../org/apache/solr/util/hll/FullHLLTest.java   |  341 ++
 .../solr/util/hll/HLLSerializationTest.java     |  225 +
 .../org/apache/solr/util/hll/HLLUtilTest.java   |   44 +
 .../solr/util/hll/IntegrationTestGenerator.java |  711 +++
 .../solr/util/hll/ProbabilisticTestUtil.java    |   75 +
 .../org/apache/solr/util/hll/SparseHLLTest.java |  452 ++
 .../apache/solr/util/stats/MetricUtilsTest.java |  166 +
 .../solr/AnalysisAfterCoreReloadTest.java       |  143 -
 .../org/apache/solr/BasicFunctionalityTest.java | 1037 ----
 .../org/apache/solr/ConvertedLegacyTest.java    | 1321 -----
 .../test/org/apache/solr/CursorPagingTest.java  |  979 ----
 .../apache/solr/DisMaxRequestHandlerTest.java   |  222 -
 .../solr/DistributedIntervalFacetingTest.java   |  219 -
 .../test/org/apache/solr/EchoParamsTest.java    |   80 -
 .../solr/HelloWorldSolrCloudTestCase.java       |   94 -
 .../test/org/apache/solr/MinimalSchemaTest.java |  140 -
 .../test/org/apache/solr/OutputWriterTest.java  |  125 -
 .../src/test/org/apache/solr/SampleTest.java    |  120 -
 .../test/org/apache/solr/SolrInfoBeanTest.java  |  124 -
 .../org/apache/solr/SolrTestCaseJ4Test.java     |   64 -
 .../test/org/apache/solr/TestCrossCoreJoin.java |  139 -
 .../solr/TestCursorMarkWithoutUniqueKey.java    |   64 -
 .../apache/solr/TestDistributedGrouping.java    |  324 --
 .../apache/solr/TestDistributedMissingSort.java |  278 -
 .../org/apache/solr/TestDistributedSearch.java  | 1248 -----
 .../org/apache/solr/TestDocumentBuilder.java    |   68 -
 .../org/apache/solr/TestGroupingSearch.java     |  978 ----
 .../apache/solr/TestHighlightDedupGrouping.java |  123 -
 .../core/src/test/org/apache/solr/TestJoin.java |  292 -
 .../org/apache/solr/TestRandomDVFaceting.java   |  305 --
 .../org/apache/solr/TestRandomFaceting.java     |  460 --
 .../solr/TestSimpleTrackingShardHandler.java    |   56 -
 .../org/apache/solr/TestSolrCoreProperties.java |  101 -
 .../org/apache/solr/TestTolerantSearch.java     |  237 -
 .../core/src/test/org/apache/solr/TestTrie.java |  282 -
 .../PathHierarchyTokenizerFactoryTest.java      |   96 -
 .../ProtectedTermFilterFactoryTest.java         |   84 -
 .../apache/solr/analysis/TestCharFilters.java   |   76 -
 .../solr/analysis/TestLuceneMatchVersion.java   |   57 -
 .../TestReversedWildcardFilterFactory.java      |  241 -
 .../TestWordDelimiterFilterFactory.java         |  245 -
 .../ThrowingMockTokenFilterFactory.java         |   69 -
 .../solr/analysis/TokenizerChainTest.java       |   43 -
 .../solr/analysis/htmlStripReaderTest.html      |  350 --
 .../backcompat/TestLuceneIndexBackCompat.java   |   96 -
 .../TestEmbeddedSolrServerAdminHandler.java     |   70 -
 .../TestEmbeddedSolrServerConstructors.java     |   68 -
 .../TestEmbeddedSolrServerSchemaAPI.java        |  115 -
 .../solrj/embedded/TestJettySolrRunner.java     |   72 -
 .../client/solrj/impl/ConnectionReuseTest.java  |  193 -
 .../apache/solr/cloud/ActionThrottleTest.java   |  124 -
 .../org/apache/solr/cloud/AddReplicaTest.java   |  194 -
 .../apache/solr/cloud/AliasIntegrationTest.java |  658 ---
 .../cloud/AssignBackwardCompatibilityTest.java  |  116 -
 .../AsyncCallRequestStatusResponseTest.java     |   62 -
 .../solr/cloud/BasicDistributedZk2Test.java     |  462 --
 .../solr/cloud/BasicDistributedZkTest.java      | 1170 ----
 .../test/org/apache/solr/cloud/BasicZkTest.java |  181 -
 .../cloud/ChaosMonkeyNothingIsSafeTest.java     |  300 --
 ...MonkeyNothingIsSafeWithPullReplicasTest.java |  343 --
 .../solr/cloud/ChaosMonkeySafeLeaderTest.java   |  210 -
 ...aosMonkeySafeLeaderWithPullReplicasTest.java |  259 -
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |  269 -
 .../apache/solr/cloud/CleanupOldIndexTest.java  |  122 -
 .../cloud/CloudExitableDirectoryReaderTest.java |  116 -
 .../org/apache/solr/cloud/CloudTestUtils.java   |  175 -
 .../apache/solr/cloud/ClusterStateMockUtil.java |  197 -
 .../org/apache/solr/cloud/ClusterStateTest.java |   81 -
 .../solr/cloud/ClusterStateUpdateTest.java      |  144 -
 .../apache/solr/cloud/CollectionPropsTest.java  |  271 -
 .../solr/cloud/CollectionStateFormat2Test.java  |   71 -
 .../solr/cloud/CollectionsAPISolrJTest.java     |  668 ---
 .../cloud/ConcurrentCreateRoutedAliasTest.java  |  222 -
 .../apache/solr/cloud/ConfigSetsAPITest.java    |   49 -
 .../solr/cloud/ConnectionManagerTest.java       |  176 -
 .../solr/cloud/CreateCollectionCleanupTest.java |   82 -
 .../solr/cloud/CreateRoutedAliasTest.java       |  381 --
 .../solr/cloud/DeleteInactiveReplicaTest.java   |  111 -
 .../DeleteLastCustomShardedReplicaTest.java     |   56 -
 .../org/apache/solr/cloud/DeleteNodeTest.java   |  117 -
 .../apache/solr/cloud/DeleteReplicaTest.java    |  396 --
 .../org/apache/solr/cloud/DeleteShardTest.java  |  142 -
 .../org/apache/solr/cloud/DeleteStatusTest.java |  123 -
 .../solr/cloud/DistribCursorPagingTest.java     |  760 ---
 ...DistribDocExpirationUpdateProcessorTest.java |  189 -
 .../cloud/DistribJoinFromCollectionTest.java    |  237 -
 .../apache/solr/cloud/DistributedQueueTest.java |  344 --
 .../solr/cloud/DistributedVersionInfoTest.java  |  386 --
 .../solr/cloud/DocValuesNotIndexedTest.java     |  536 --
 .../org/apache/solr/cloud/ForceLeaderTest.java  |  256 -
 .../cloud/FullSolrCloudDistribCmdsTest.java     |  760 ---
 .../FullThrottleStoppableIndexingThread.java    |  173 -
 .../solr/cloud/HealthCheckHandlerTest.java      |  111 -
 .../solr/cloud/HttpPartitionOnCommitTest.java   |  178 -
 .../apache/solr/cloud/HttpPartitionTest.java    |  617 ---
 .../apache/solr/cloud/KerberosTestServices.java |  225 -
 .../solr/cloud/LIROnShardRestartTest.java       |  262 -
 .../cloud/LeaderElectionContextKeyTest.java     |  121 -
 .../cloud/LeaderElectionIntegrationTest.java    |  180 -
 .../apache/solr/cloud/LeaderElectionTest.java   |  550 --
 .../cloud/LeaderFailoverAfterPartitionTest.java |  189 -
 .../cloud/LeaderFailureAfterFreshStartTest.java |  274 -
 .../solr/cloud/LeaderTragicEventTest.java       |  197 -
 .../solr/cloud/LeaderVoteWaitTimeoutTest.java   |  274 -
 .../solr/cloud/LegacyCloudClusterPropTest.java  |  165 -
 .../cloud/MetricsHistoryIntegrationTest.java    |  194 -
 .../apache/solr/cloud/MigrateRouteKeyTest.java  |  224 -
 .../solr/cloud/MissingSegmentRecoveryTest.java  |  124 -
 .../org/apache/solr/cloud/MockZkController.java |   36 -
 .../solr/cloud/MoveReplicaHDFSFailoverTest.java |  207 -
 .../apache/solr/cloud/MoveReplicaHDFSTest.java  |   98 -
 .../org/apache/solr/cloud/MoveReplicaTest.java  |  390 --
 .../solr/cloud/MultiSolrCloudTestCaseTest.java  |   80 -
 .../apache/solr/cloud/MultiThreadedOCPTest.java |  295 -
 .../org/apache/solr/cloud/NodeMutatorTest.java  |   94 -
 ...utOfBoxZkACLAndCredentialsProvidersTest.java |  137 -
 ...rriddenZkACLAndCredentialsProvidersTest.java |  340 --
 ...verseerCollectionConfigSetProcessorTest.java |  804 ---
 .../cloud/OverseerModifyCollectionTest.java     |   80 -
 .../apache/solr/cloud/OverseerRolesTest.java    |  153 -
 .../apache/solr/cloud/OverseerStatusTest.java   |   98 -
 .../solr/cloud/OverseerTaskQueueTest.java       |   96 -
 .../org/apache/solr/cloud/OverseerTest.java     | 1521 ------
 .../solr/cloud/PeerSyncReplicationTest.java     |  412 --
 .../solr/cloud/RecoveryAfterSoftCommitTest.java |  119 -
 .../org/apache/solr/cloud/RecoveryZkTest.java   |  150 -
 .../apache/solr/cloud/RemoteQueryErrorTest.java |   58 -
 .../solr/cloud/ReplaceNodeNoTargetTest.java     |  120 -
 .../org/apache/solr/cloud/ReplaceNodeTest.java  |  187 -
 .../solr/cloud/ReplicationFactorTest.java       |  491 --
 .../solr/cloud/RestartWhileUpdatingTest.java    |  203 -
 .../apache/solr/cloud/RollingRestartTest.java   |  161 -
 .../org/apache/solr/cloud/SSLMigrationTest.java |  138 -
 .../solr/cloud/SaslZkACLProviderTest.java       |  224 -
 .../cloud/SegmentTerminateEarlyTestState.java   |  275 -
 .../solr/cloud/ShardRoutingCustomTest.java      |   99 -
 .../org/apache/solr/cloud/ShardRoutingTest.java |  345 --
 .../cloud/SharedFSAutoReplicaFailoverTest.java  |  421 --
 .../org/apache/solr/cloud/SliceStateTest.java   |   58 -
 .../apache/solr/cloud/SolrCLIZkUtilsTest.java   |  782 ---
 .../apache/solr/cloud/SolrCloudExampleTest.java |  288 -
 .../org/apache/solr/cloud/SolrXmlInZkTest.java  |  179 -
 .../org/apache/solr/cloud/SyncSliceTest.java    |  309 --
 .../solr/cloud/TestAuthenticationFramework.java |  181 -
 .../apache/solr/cloud/TestCloudConsistency.java |  271 -
 .../solr/cloud/TestCloudDeleteByQuery.java      |  247 -
 .../apache/solr/cloud/TestCloudInspectUtil.java |  124 -
 .../cloud/TestCloudJSONFacetJoinDomain.java     |  853 ---
 .../solr/cloud/TestCloudJSONFacetSKG.java       |  677 ---
 ...TestCloudPhrasesIdentificationComponent.java |  200 -
 .../apache/solr/cloud/TestCloudPivotFacet.java  |  840 ---
 .../solr/cloud/TestCloudPseudoReturnFields.java |  841 ---
 .../apache/solr/cloud/TestCloudRecovery.java    |  205 -
 .../solr/cloud/TestCloudSearcherWarming.java    |  323 --
 .../solr/cloud/TestClusterProperties.java       |   68 -
 .../apache/solr/cloud/TestConfigSetsAPI.java    |  769 ---
 .../cloud/TestConfigSetsAPIExclusivity.java     |  181 -
 .../solr/cloud/TestConfigSetsAPIZkFailure.java  |  360 --
 .../org/apache/solr/cloud/TestCryptoKeys.java   |  208 -
 .../cloud/TestDeleteCollectionOnDownNodes.java  |   67 -
 .../solr/cloud/TestDistribDocBasedVersion.java  |  353 --
 .../apache/solr/cloud/TestDistributedMap.java   |  180 -
 .../solr/cloud/TestDownShardTolerantSearch.java |   81 -
 .../TestExclusionRuleCollectionAccess.java      |   47 -
 .../apache/solr/cloud/TestHashPartitioner.java  |  321 --
 .../TestLeaderElectionWithEmptyReplica.java     |  125 -
 .../solr/cloud/TestLeaderElectionZkExpiry.java  |  107 -
 .../org/apache/solr/cloud/TestLockTree.java     |  123 -
 .../solr/cloud/TestMiniSolrCloudClusterSSL.java |  400 --
 .../cloud/TestOnReconnectListenerSupport.java   |  155 -
 .../org/apache/solr/cloud/TestPrepRecovery.java |  112 -
 .../org/apache/solr/cloud/TestPullReplica.java  |  677 ---
 .../cloud/TestPullReplicaErrorHandling.java     |  348 --
 .../apache/solr/cloud/TestRandomFlRTGCloud.java |  968 ----
 .../cloud/TestRandomRequestDistribution.java    |  248 -
 .../apache/solr/cloud/TestRebalanceLeaders.java |  349 --
 .../solr/cloud/TestRequestForwarding.java       |   85 -
 .../apache/solr/cloud/TestSSLRandomization.java |  254 -
 .../apache/solr/cloud/TestSegmentSorting.java   |  188 -
 .../solr/cloud/TestShortCircuitedRequests.java  |   57 -
 .../cloud/TestSizeLimitedDistributedMap.java    |   69 -
 .../solr/cloud/TestSkipOverseerOperations.java  |  128 -
 .../TestSolrCloudWithDelegationTokens.java      |  413 --
 .../cloud/TestSolrCloudWithKerberosAlt.java     |  173 -
 .../TestSolrCloudWithSecureImpersonation.java   |  335 --
 .../TestStressCloudBlindAtomicUpdates.java      |  499 --
 .../solr/cloud/TestStressInPlaceUpdates.java    |  599 ---
 .../apache/solr/cloud/TestStressLiveNodes.java  |  255 -
 .../org/apache/solr/cloud/TestTlogReplica.java  |  967 ----
 .../cloud/TestTolerantUpdateProcessorCloud.java | 1076 ----
 .../TestTolerantUpdateProcessorRandomCloud.java |  395 --
 .../org/apache/solr/cloud/TestUtilizeNode.java  |  183 -
 .../apache/solr/cloud/TestWithCollection.java   |  621 ---
 .../org/apache/solr/cloud/TestZkChroot.java     |  168 -
 .../TlogReplayBufferedWhileIndexingTest.java    |  146 -
 .../cloud/TriLevelCompositeIdRoutingTest.java   |  158 -
 .../solr/cloud/UnloadDistributedZkTest.java     |  378 --
 ...MParamsZkACLAndCredentialsProvidersTest.java |  278 -
 .../test/org/apache/solr/cloud/ZkCLITest.java   |  394 --
 .../org/apache/solr/cloud/ZkControllerTest.java |  387 --
 .../org/apache/solr/cloud/ZkFailoverTest.java   |   90 -
 .../org/apache/solr/cloud/ZkNodePropsTest.java  |   51 -
 .../org/apache/solr/cloud/ZkShardTermsTest.java |  316 --
 .../org/apache/solr/cloud/ZkSolrClientTest.java |  373 --
 .../AbstractCloudBackupRestoreTestCase.java     |  387 --
 .../solr/cloud/api/collections/AssignTest.java  |  193 -
 .../api/collections/CollectionReloadTest.java   |   85 -
 .../CollectionTooManyReplicasTest.java          |  222 -
 .../CollectionsAPIAsyncDistributedZkTest.java   |  265 -
 .../CollectionsAPIDistributedZkTest.java        |  689 ---
 ...ConcurrentDeleteAndCreateCollectionTest.java |  227 -
 .../api/collections/CustomCollectionTest.java   |  201 -
 .../HdfsCollectionsAPIDistributedZkTest.java    |  177 -
 .../api/collections/ReplicaPropertiesBase.java  |  178 -
 .../cloud/api/collections/ShardSplitTest.java   | 1113 ----
 .../SimpleCollectionCreateDeleteTest.java       |   66 -
 .../api/collections/TestCollectionAPI.java      |  939 ----
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  299 --
 .../collections/TestHdfsCloudBackupRestore.java |  216 -
 .../TestLocalFSCloudBackupRestore.java          |   66 -
 .../api/collections/TestReplicaProperties.java  |  236 -
 .../TestRequestStatusCollectionAPI.java         |  198 -
 .../AutoAddReplicasIntegrationTest.java         |  199 -
 .../AutoAddReplicasPlanActionTest.java          |  208 -
 .../autoscaling/AutoScalingHandlerTest.java     | 1053 ----
 .../solr/cloud/autoscaling/CapturedEvent.java   |   65 -
 .../autoscaling/ComputePlanActionTest.java      |  679 ---
 .../autoscaling/ExecutePlanActionTest.java      |  226 -
 .../HdfsAutoAddReplicasIntegrationTest.java     |   57 -
 .../autoscaling/HttpTriggerListenerTest.java    |  208 -
 .../cloud/autoscaling/IndexSizeTriggerTest.java |  844 ---
 .../MetricTriggerIntegrationTest.java           |  245 -
 .../cloud/autoscaling/MetricTriggerTest.java    |  135 -
 .../NodeAddedTriggerIntegrationTest.java        |  299 --
 .../cloud/autoscaling/NodeAddedTriggerTest.java |  326 --
 .../NodeLostTriggerIntegrationTest.java         |  321 --
 .../cloud/autoscaling/NodeLostTriggerTest.java  |  370 --
 .../NodeMarkersRegistrationTest.java            |  286 -
 .../autoscaling/RestoreTriggerStateTest.java    |  168 -
 .../ScheduledMaintenanceTriggerTest.java        |  276 -
 .../ScheduledTriggerIntegrationTest.java        |  143 -
 .../cloud/autoscaling/ScheduledTriggerTest.java |  140 -
 .../SearchRateTriggerIntegrationTest.java       |  741 ---
 .../autoscaling/SearchRateTriggerTest.java      |  352 --
 .../autoscaling/SystemLogListenerTest.java      |  240 -
 .../solr/cloud/autoscaling/TestPolicyCloud.java |  427 --
 .../TriggerCooldownIntegrationTest.java         |  239 -
 .../autoscaling/TriggerEventQueueTest.java      |   98 -
 .../autoscaling/TriggerIntegrationTest.java     |  685 ---
 .../TriggerSetPropertiesIntegrationTest.java    |  200 -
 .../solr/cloud/autoscaling/sim/ActionError.java |   24 -
 .../sim/GenericDistributedQueue.java            |  599 ---
 .../sim/GenericDistributedQueueFactory.java     |   45 -
 .../cloud/autoscaling/sim/LiveNodesSet.java     |  109 -
 .../cloud/autoscaling/sim/SimCloudManager.java  |  895 ----
 .../sim/SimClusterStateProvider.java            | 2172 --------
 .../autoscaling/sim/SimDistribStateManager.java |  590 --
 .../sim/SimDistributedQueueFactory.java         |  284 -
 .../autoscaling/sim/SimNodeStateProvider.java   |  331 --
 .../autoscaling/sim/SimSolrCloudTestCase.java   |  170 -
 .../sim/TestSimClusterStateProvider.java        |  225 -
 .../sim/TestSimComputePlanAction.java           |  360 --
 .../sim/TestSimDistribStateManager.java         |  342 --
 .../sim/TestSimDistributedQueue.java            |  221 -
 .../sim/TestSimExecutePlanAction.java           |  210 -
 .../autoscaling/sim/TestSimExtremeIndexing.java |  167 -
 .../sim/TestSimGenericDistributedQueue.java     |   39 -
 .../autoscaling/sim/TestSimLargeCluster.java    |  730 ---
 .../sim/TestSimNodeAddedTrigger.java            |  327 --
 .../autoscaling/sim/TestSimNodeLostTrigger.java |  346 --
 .../autoscaling/sim/TestSimPolicyCloud.java     |  369 --
 .../sim/TestSimTriggerIntegration.java          | 1333 -----
 .../cloud/autoscaling/sim/package-info.java     |   98 -
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |  898 ----
 .../solr/cloud/cdcr/CdcrBidirectionalTest.java  |  237 -
 .../solr/cloud/cdcr/CdcrBootstrapTest.java      |  308 --
 .../cloud/cdcr/CdcrOpsAndBoundariesTest.java    |  322 --
 .../cloud/cdcr/CdcrReplicationHandlerTest.java  |  331 --
 .../solr/cloud/cdcr/CdcrRequestHandlerTest.java |  178 -
 .../apache/solr/cloud/cdcr/CdcrTestsUtil.java   |  273 -
 .../cloud/cdcr/CdcrVersionReplicationTest.java  |  307 --
 .../cloud/cdcr/CdcrWithNodesRestartsTest.java   |  344 --
 .../solr/cloud/hdfs/HDFSCollectionsAPITest.java |   95 -
 .../cloud/hdfs/HdfsBasicDistributedZk2Test.java |   59 -
 .../cloud/hdfs/HdfsBasicDistributedZkTest.java  |   65 -
 .../hdfs/HdfsChaosMonkeyNothingIsSafeTest.java  |   69 -
 .../hdfs/HdfsChaosMonkeySafeLeaderTest.java     |   68 -
 .../solr/cloud/hdfs/HdfsNNFailoverTest.java     |   80 -
 .../solr/cloud/hdfs/HdfsRecoverLeaseTest.java   |  247 -
 .../solr/cloud/hdfs/HdfsRecoveryZkTest.java     |   59 -
 .../hdfs/HdfsRestartWhileUpdatingTest.java      |   63 -
 .../solr/cloud/hdfs/HdfsSyncSliceTest.java      |   57 -
 .../apache/solr/cloud/hdfs/HdfsTestUtil.java    |  242 -
 .../solr/cloud/hdfs/HdfsThreadLeakTest.java     |   84 -
 ...HdfsTlogReplayBufferedWhileIndexingTest.java |   65 -
 .../cloud/hdfs/HdfsUnloadDistributedZkTest.java |   57 -
 .../HdfsWriteToMultipleCollectionsTest.java     |  185 -
 .../apache/solr/cloud/hdfs/StressHdfsTest.java  |  247 -
 .../cloud/overseer/TestClusterStateMutator.java |   82 -
 .../solr/cloud/overseer/ZkStateReaderTest.java  |  284 -
 .../solr/cloud/overseer/ZkStateWriterTest.java  |  361 --
 .../solr/cloud/rule/ImplicitSnitchTest.java     |  242 -
 .../apache/solr/cloud/rule/RuleEngineTest.java  |  315 --
 .../org/apache/solr/cloud/rule/RulesTest.java   |  260 -
 .../common/cloud/ZkStateReaderAccessor.java     |   36 -
 .../solr/core/AlternateDirectoryTest.java       |   84 -
 .../solr/core/BlobRepositoryCloudTest.java      |  116 -
 .../solr/core/BlobRepositoryMockingTest.java    |  157 -
 .../solr/core/BlobStoreTestRequestHandler.java  |   62 -
 .../solr/core/CachingDirectoryFactoryTest.java  |  250 -
 .../core/ConfigureRecoveryStrategyTest.java     |  112 -
 .../org/apache/solr/core/CoreSorterTest.java    |  237 -
 .../solr/core/CountUsageValueSourceParser.java  |   86 -
 .../apache/solr/core/DirectoryFactoryTest.java  |  117 -
 .../solr/core/DummyValueSourceParser.java       |   58 -
 .../solr/core/ExitableDirectoryReaderTest.java  |  170 -
 .../apache/solr/core/FakeDeletionPolicy.java    |   60 -
 .../solr/core/HdfsDirectoryFactoryTest.java     |  238 -
 .../test/org/apache/solr/core/HelloStream.java  |  100 -
 .../org/apache/solr/core/MockEventListener.java |   57 -
 .../test/org/apache/solr/core/MockInfoBean.java |   75 -
 .../core/MockQuerySenderListenerReqHandler.java |   61 -
 .../solr/core/MockShardHandlerFactory.java      |   69 -
 .../solr/core/OpenCloseCoreStressTest.java      |  525 --
 .../org/apache/solr/core/PluginInfoTest.java    |  165 -
 .../apache/solr/core/QueryResultKeyTest.java    |  201 -
 .../solr/core/RAMDirectoryFactoryTest.java      |   64 -
 .../apache/solr/core/RequestHandlersTest.java   |  121 -
 .../apache/solr/core/ResourceLoaderTest.java    |  222 -
 .../test/org/apache/solr/core/SOLR749Test.java  |  110 -
 .../core/SolrCoreCheckLockOnStartupTest.java    |  112 -
 .../test/org/apache/solr/core/SolrCoreTest.java |  358 --
 .../solr/core/TestBackupRepositoryFactory.java  |  149 -
 .../org/apache/solr/core/TestBadConfig.java     |  102 -
 .../org/apache/solr/core/TestCodecSupport.java  |  230 -
 .../test/org/apache/solr/core/TestConfig.java   |  236 -
 .../org/apache/solr/core/TestConfigOverlay.java |   78 -
 .../solr/core/TestConfigSetImmutable.java       |  122 -
 .../solr/core/TestConfigSetProperties.java      |   93 -
 .../org/apache/solr/core/TestConfigSets.java    |  142 -
 .../org/apache/solr/core/TestCoreContainer.java |  647 ---
 .../org/apache/solr/core/TestCoreDiscovery.java |  576 --
 .../solr/core/TestCorePropertiesReload.java     |   74 -
 .../org/apache/solr/core/TestCustomStream.java  |   75 -
 .../apache/solr/core/TestDirectoryFactory.java  |  106 -
 .../apache/solr/core/TestDynamicLoading.java    |  288 -
 .../org/apache/solr/core/TestDynamicURP.java    |  110 -
 .../solr/core/TestImplicitCoreProperties.java   |   76 -
 .../apache/solr/core/TestInfoStreamLogging.java |   36 -
 .../org/apache/solr/core/TestInitParams.java    |  132 -
 .../apache/solr/core/TestJmxIntegration.java    |  263 -
 .../org/apache/solr/core/TestLazyCores.java     |  870 ---
 .../apache/solr/core/TestMergePolicyConfig.java |  270 -
 .../test/org/apache/solr/core/TestNRTOpen.java  |  150 -
 .../solr/core/TestQuerySenderListener.java      |  101 -
 .../solr/core/TestQuerySenderNoQuery.java       |   84 -
 .../solr/core/TestReloadAndDeleteDocs.java      |   48 -
 .../solr/core/TestShardHandlerFactory.java      |   42 -
 .../apache/solr/core/TestSimpleTextCodec.java   |   63 -
 .../apache/solr/core/TestSolrConfigHandler.java |  868 ---
 .../solr/core/TestSolrDeletionPolicy1.java      |  136 -
 .../solr/core/TestSolrDeletionPolicy2.java      |   61 -
 .../apache/solr/core/TestSolrIndexConfig.java   |   66 -
 .../test/org/apache/solr/core/TestSolrXml.java  |  338 --
 .../apache/solr/core/TestXIncludeConfig.java    |   83 -
 .../core/snapshots/TestSolrCloudSnapshots.java  |  323 --
 .../core/snapshots/TestSolrCoreSnapshots.java   |  312 --
 .../handler/AnalysisRequestHandlerTestBase.java |  123 -
 .../apache/solr/handler/BackupRestoreUtils.java |  104 -
 .../handler/BinaryUpdateRequestHandlerTest.java |   73 -
 .../solr/handler/CSVRequestHandlerTest.java     |   53 -
 .../apache/solr/handler/CheckBackupStatus.java  |   69 -
 .../DocumentAnalysisRequestHandlerTest.java     |  336 --
 .../FieldAnalysisRequestHandlerTest.java        |  547 --
 .../org/apache/solr/handler/JsonLoaderTest.java | 1084 ----
 .../solr/handler/MoreLikeThisHandlerTest.java   |  188 -
 .../solr/handler/PingRequestHandlerTest.java    |  242 -
 .../apache/solr/handler/RequestLoggingTest.java |  102 -
 .../solr/handler/ResponseBuilderTest.java       |   46 -
 .../apache/solr/handler/SearchHandlerTest.java  |   80 -
 .../apache/solr/handler/TestBlobHandler.java    |  183 -
 .../org/apache/solr/handler/TestCSVLoader.java  |  328 --
 .../apache/solr/handler/TestConfigReload.java   |  128 -
 .../org/apache/solr/handler/TestCoreBackup.java |   61 -
 .../solr/handler/TestHdfsBackupRestoreCore.java |  250 -
 .../solr/handler/TestReplicationHandler.java    | 1668 ------
 .../handler/TestReplicationHandlerBackup.java   |  316 --
 .../apache/solr/handler/TestReqParamsAPI.java   |  300 --
 .../apache/solr/handler/TestRestoreCore.java    |  248 -
 .../org/apache/solr/handler/TestSQLHandler.java | 2761 ----------
 .../solr/handler/TestSQLHandlerNonCloud.java    |   92 -
 .../handler/TestSolrConfigHandlerCloud.java     |  275 -
 .../TestSolrConfigHandlerConcurrent.java        |  198 -
 .../solr/handler/TestSystemCollAutoCreate.java  |   29 -
 .../handler/ThrowErrorOnInitRequestHandler.java |   51 -
 .../solr/handler/V2ApiIntegrationTest.java      |  158 -
 .../apache/solr/handler/V2StandaloneTest.java   |   53 -
 .../handler/XmlUpdateRequestHandlerTest.java    |  233 -
 .../handler/XsltUpdateRequestHandlerTest.java   |  128 -
 .../handler/admin/AdminHandlersProxyTest.java   |  119 -
 .../admin/AutoscalingHistoryHandlerTest.java    |  439 --
 .../admin/CoreAdminCreateDiscoverTest.java      |  277 -
 .../handler/admin/CoreAdminHandlerTest.java     |  444 --
 .../handler/admin/CoreAdminOperationTest.java   |  681 ---
 .../admin/CoreAdminRequestStatusTest.java       |  102 -
 .../admin/CoreMergeIndexesAdminHandlerTest.java |  105 -
 .../solr/handler/admin/InfoHandlerTest.java     |  171 -
 .../solr/handler/admin/LoggingHandlerTest.java  |   73 -
 .../handler/admin/LukeRequestHandlerTest.java   |  263 -
 .../solr/handler/admin/MBeansHandlerTest.java   |  199 -
 .../solr/handler/admin/MetricsHandlerTest.java  |  321 --
 .../admin/MetricsHistoryHandlerTest.java        |  128 -
 .../admin/PropertiesRequestHandlerTest.java     |   73 -
 .../SecurityConfHandlerLocalForTesting.java     |   39 -
 .../handler/admin/SecurityConfHandlerTest.java  |  277 -
 .../admin/SegmentsInfoRequestHandlerTest.java   |  123 -
 .../admin/ShowFileRequestHandlerTest.java       |  125 -
 .../solr/handler/admin/StatsReloadRaceTest.java |  146 -
 .../handler/admin/SystemInfoHandlerTest.java    |   53 -
 .../solr/handler/admin/TestApiFramework.java    |  219 -
 .../solr/handler/admin/TestCollectionAPIs.java  |  308 --
 .../solr/handler/admin/TestConfigsApi.java      |   59 -
 .../solr/handler/admin/TestCoreAdminApis.java   |  106 -
 .../admin/ZookeeperStatusHandlerTest.java       |   86 -
 .../handler/component/BadComponentTest.java     |   41 -
 .../component/CustomHighlightComponentTest.java |  306 --
 .../handler/component/DebugComponentTest.java   |  280 -
 .../DistributedDebugComponentTest.java          |  445 --
 .../DistributedExpandComponentTest.java         |  229 -
 .../DistributedFacetExistsSmallTest.java        |  236 -
 .../DistributedFacetPivotLargeTest.java         |  974 ----
 .../DistributedFacetPivotLongTailTest.java      |  324 --
 .../DistributedFacetPivotSmallAdvancedTest.java |  249 -
 .../DistributedFacetPivotSmallTest.java         | 1680 ------
 .../DistributedFacetPivotWhiteBoxTest.java      |  139 -
 .../component/DistributedMLTComponentTest.java  |  188 -
 ...DistributedQueryComponentCustomSortTest.java |  122 -
 ...stributedQueryComponentOptimizationTest.java |  343 --
 .../DistributedQueryElevationComponentTest.java |  136 -
 .../DistributedSpellCheckComponentTest.java     |  228 -
 .../DistributedSuggestComponentTest.java        |  140 -
 .../DistributedTermsComponentTest.java          |   60 -
 .../component/DummyCustomParamSpellChecker.java |   69 -
 .../handler/component/FacetPivotSmallTest.java  |  511 --
 .../handler/component/InfixSuggestersTest.java  |  155 -
 .../PhrasesIdentificationComponentTest.java     |  796 ---
 .../component/QueryElevationComponentTest.java  |  797 ---
 .../component/ReplicaListTransformerTest.java   |  163 -
 .../component/ResourceSharingTestComponent.java |  144 -
 .../component/ResponseLogComponentTest.java     |   86 -
 .../handler/component/SearchHandlerTest.java    |  273 -
 .../ShufflingReplicaListTransformerTest.java    |   76 -
 .../component/SpellCheckComponentTest.java      |  358 --
 .../handler/component/StatsComponentTest.java   | 1995 -------
 .../SuggestComponentContextFilterQueryTest.java |  257 -
 .../handler/component/SuggestComponentTest.java |  525 --
 .../TermVectorComponentDistributedTest.java     |  237 -
 .../component/TermVectorComponentTest.java      |  331 --
 .../handler/component/TermsComponentTest.java   |  514 --
 ...estDistributedStatsComponentCardinality.java |  291 -
 .../handler/component/TestExpandComponent.java  |  334 --
 .../component/TestHttpShardHandlerFactory.java  |  221 -
 .../handler/component/TestPivotHelperCode.java  |  116 -
 .../TestTrackingShardHandlerFactory.java        |  133 -
 .../solr/handler/export/TestExportWriter.java   |  796 ---
 .../solr/handler/loader/JavabinLoaderTest.java  |   91 -
 .../tagger/EmbeddedSolrNoSerializeTest.java     |  154 -
 .../handler/tagger/RandomizedTaggerTest.java    |  150 -
 .../apache/solr/handler/tagger/Tagger2Test.java |  176 -
 .../apache/solr/handler/tagger/TaggerTest.java  |  296 -
 .../solr/handler/tagger/TaggerTestCase.java     |  251 -
 .../handler/tagger/TaggingAttributeTest.java    |   73 -
 .../handler/tagger/WordLengthTaggingFilter.java |  110 -
 .../tagger/WordLengthTaggingFilterFactory.java  |   67 -
 .../handler/tagger/XmlInterpolationTest.java    |  224 -
 .../apache/solr/highlight/DummyHighlighter.java |   37 -
 .../highlight/FastVectorHighlighterTest.java    |   92 -
 .../solr/highlight/HighlighterConfigTest.java   |   77 -
 .../highlight/HighlighterMaxOffsetTest.java     |  119 -
 .../apache/solr/highlight/HighlighterTest.java  | 1250 -----
 .../highlight/TestPostingsSolrHighlighter.java  |  189 -
 .../highlight/TestUnifiedSolrHighlighter.java   |  295 -
 .../solr/index/DummyMergePolicyFactory.java     |   33 -
 .../index/TestSlowCompositeReaderWrapper.java   |  150 -
 .../index/UninvertDocValuesMergePolicyTest.java |  242 -
 .../index/WrapperMergePolicyFactoryTest.java    |  135 -
 .../solr/index/hdfs/CheckHdfsIndexTest.java     |  140 -
 .../apache/solr/internal/csv/CSVParserTest.java |  583 --
 .../solr/internal/csv/CSVPrinterTest.java       |  195 -
 .../solr/internal/csv/CSVStrategyTest.java      |   89 -
 .../apache/solr/internal/csv/CSVUtilsTest.java  |  150 -
 .../solr/internal/csv/CharBufferTest.java       |  190 -
 .../csv/ExtendedBufferedReaderTest.java         |  220 -
 .../csv/writer/CSVConfigGuesserTest.java        |   87 -
 .../solr/internal/csv/writer/CSVConfigTest.java |   98 -
 .../solr/internal/csv/writer/CSVFieldTest.java  |   46 -
 .../solr/internal/csv/writer/CSVWriterTest.java |   54 -
 .../org/apache/solr/legacy/TestLegacyField.java |  186 -
 .../solr/legacy/TestLegacyFieldReuse.java       |   81 -
 .../solr/legacy/TestLegacyNumericUtils.java     |  571 --
 .../org/apache/solr/legacy/TestLegacyTerms.java |  159 -
 .../TestMultiValuedNumericRangeQuery.java       |   84 -
 .../solr/legacy/TestNumericRangeQuery32.java    |  461 --
 .../solr/legacy/TestNumericRangeQuery64.java    |  490 --
 .../solr/legacy/TestNumericTokenStream.java     |  188 -
 .../org/apache/solr/logging/TestLogWatcher.java |   80 -
 .../org/apache/solr/metrics/JvmMetricsTest.java |  151 -
 .../apache/solr/metrics/MetricsConfigTest.java  |  126 -
 .../solr/metrics/MockCounterSupplier.java       |   36 -
 .../solr/metrics/MockHistogramSupplier.java     |   40 -
 .../apache/solr/metrics/MockMeterSupplier.java  |   36 -
 .../apache/solr/metrics/MockTimerSupplier.java  |   46 -
 .../solr/metrics/SolrCoreMetricManagerTest.java |  164 -
 .../solr/metrics/SolrMetricManagerTest.java     |  256 -
 .../solr/metrics/SolrMetricReporterTest.java    |   70 -
 .../solr/metrics/SolrMetricTestUtils.java       |  103 -
 .../metrics/SolrMetricsIntegrationTest.java     |  190 -
 .../metrics/reporters/MockMetricReporter.java   |   81 -
 .../reporters/SolrGangliaReporterTest.java      |   84 -
 .../reporters/SolrGraphiteReporterTest.java     |  118 -
 .../reporters/SolrJmxReporterCloudTest.java     |  114 -
 .../metrics/reporters/SolrJmxReporterTest.java  |  244 -
 .../reporters/SolrSlf4jReporterTest.java        |  106 -
 .../reporters/solr/SolrCloudReportersTest.java  |  185 -
 .../reporters/solr/SolrShardReporterTest.java   |  118 -
 .../metrics/rrd/SolrRrdBackendFactoryTest.java  |  201 -
 .../solr/request/RegexBytesRefFilterTest.java   |   34 -
 .../apache/solr/request/SimpleFacetsTest.java   | 3523 ------------
 .../request/SubstringBytesRefFilterTest.java    |   51 -
 .../apache/solr/request/TestFacetMethods.java   |  226 -
 .../org/apache/solr/request/TestFaceting.java   |  904 ----
 .../solr/request/TestIntervalFaceting.java      | 1207 -----
 .../solr/request/TestRemoteStreaming.java       |  136 -
 .../org/apache/solr/request/TestStreamBody.java |  138 -
 .../request/TestUnInvertedFieldException.java   |  129 -
 .../org/apache/solr/request/TestWriterPerf.java |  199 -
 .../solr/request/macro/TestMacroExpander.java   |  129 -
 .../apache/solr/request/macro/TestMacros.java   |   89 -
 .../apache/solr/response/JSONWriterTest.java    |  304 --
 .../apache/solr/response/SmileWriterTest.java   |  258 -
 .../solr/response/TestBinaryResponseWriter.java |  107 -
 .../solr/response/TestCSVResponseWriter.java    |  257 -
 .../solr/response/TestCustomDocTransformer.java |  128 -
 .../response/TestGeoJSONResponseWriter.java     |  279 -
 .../response/TestGraphMLResponseWriter.java     |  155 -
 .../response/TestJavabinTupleStreamParser.java  |  202 -
 .../TestPHPSerializedResponseWriter.java        |  105 -
 .../apache/solr/response/TestPushWriter.java    |   85 -
 .../solr/response/TestRawResponseWriter.java    |  202 -
 .../solr/response/TestRawTransformer.java       |   70 -
 .../response/TestRetrieveFieldsOptimizer.java   |  648 ---
 .../solr/response/TestSolrQueryResponse.java    |  293 -
 .../transform/TestChildDocTransformer.java      |  472 --
 .../TestChildDocTransformerHierarchy.java       |  387 --
 .../transform/TestSubQueryTransformer.java      |  568 --
 .../TestSubQueryTransformerCrossCore.java       |  131 -
 .../TestSubQueryTransformerDistrib.java         |  250 -
 .../apache/solr/rest/SolrRestletTestBase.java   |   67 -
 .../apache/solr/rest/TestManagedResource.java   |  327 --
 .../solr/rest/TestManagedResourceStorage.java   |  140 -
 .../org/apache/solr/rest/TestRestManager.java   |  280 -
 .../solr/rest/schema/TestBulkSchemaAPI.java     | 1029 ----
 .../schema/TestCopyFieldCollectionResource.java |  137 -
 .../TestDynamicFieldCollectionResource.java     |   63 -
 .../rest/schema/TestDynamicFieldResource.java   |   72 -
 .../schema/TestFieldCollectionResource.java     |   89 -
 .../solr/rest/schema/TestFieldResource.java     |   99 -
 .../schema/TestFieldTypeCollectionResource.java |   38 -
 .../solr/rest/schema/TestFieldTypeResource.java |   90 -
 .../rest/schema/TestSchemaNameResource.java     |   28 -
 .../solr/rest/schema/TestSchemaResource.java    |  182 -
 .../schema/TestSchemaSimilarityResource.java    |   35 -
 .../rest/schema/TestSchemaVersionResource.java  |   29 -
 .../TestSerializedLuceneMatchVersion.java       |   71 -
 .../rest/schema/TestUniqueKeyFieldResource.java |   30 -
 .../analysis/TestManagedStopFilterFactory.java  |  217 -
 .../TestManagedSynonymFilterFactory.java        |  295 -
 .../TestManagedSynonymGraphFilterFactory.java   |  298 -
 .../apache/solr/schema/BadCopyFieldTest.java    |   57 -
 .../apache/solr/schema/BadIndexSchemaTest.java  |  188 -
 .../apache/solr/schema/BooleanFieldTest.java    |   78 -
 .../solr/schema/ChangedSchemaMergeTest.java     |  201 -
 .../org/apache/solr/schema/CopyFieldTest.java   |  263 -
 .../solr/schema/CurrencyFieldTypeTest.java      |  729 ---
 .../solr/schema/CustomAnalyzerStrField.java     |   73 -
 .../org/apache/solr/schema/DateFieldTest.java   |   64 -
 .../apache/solr/schema/DateRangeFieldTest.java  |  128 -
 .../solr/schema/DocValuesMissingTest.java       |  547 --
 .../apache/solr/schema/DocValuesMultiTest.java  |  347 --
 .../org/apache/solr/schema/DocValuesTest.java   |  749 ---
 .../org/apache/solr/schema/EnumFieldTest.java   |  573 --
 .../solr/schema/ExternalFileFieldSortTest.java  |   64 -
 .../schema/IndexSchemaRuntimeFieldTest.java     |   71 -
 .../org/apache/solr/schema/IndexSchemaTest.java |  132 -
 ...IntPointPrefixActsAsRangeQueryFieldType.java |   34 -
 .../ManagedSchemaRoundRobinCloudTest.java       |   98 -
 .../solr/schema/MockExchangeRateProvider.java   |   91 -
 .../org/apache/solr/schema/MultiTermTest.java   |   93 -
 .../apache/solr/schema/MyCrazyCustomField.java  |   60 -
 .../solr/schema/NotRequiredUniqueKeyTest.java   |   49 -
 .../apache/solr/schema/NumericFieldsTest.java   |  240 -
 .../OpenExchangeRatesOrgProviderTest.java       |  119 -
 .../org/apache/solr/schema/PolyFieldTest.java   |  187 -
 .../PreAnalyzedFieldManagedSchemaCloudTest.java |   73 -
 .../solr/schema/PreAnalyzedFieldTest.java       |  256 -
 .../solr/schema/PrimitiveFieldTypeTest.java     |  131 -
 .../apache/solr/schema/RequiredFieldsTest.java  |  142 -
 .../solr/schema/SchemaApiFailureTest.java       |   63 -
 .../SchemaVersionSpecificBehaviorTest.java      |  229 -
 .../apache/solr/schema/SchemaWatcherTest.java   |   59 -
 .../apache/solr/schema/SortableBinaryField.java |   87 -
 .../solr/schema/SpatialRPTFieldTypeTest.java    |  284 -
 .../solr/schema/SynonymTokenizerTest.java       |   43 -
 .../org/apache/solr/schema/TestBinaryField.java |  173 -
 .../solr/schema/TestBulkSchemaConcurrent.java   |  315 --
 .../solr/schema/TestCloudManagedSchema.java     |  105 -
 .../apache/solr/schema/TestCloudSchemaless.java |  178 -
 .../apache/solr/schema/TestCollationField.java  |  195 -
 .../schema/TestCollationFieldDocValues.java     |  194 -
 .../solr/schema/TestHalfAndHalfDocValues.java   |  131 -
 .../apache/solr/schema/TestManagedSchema.java   |  472 --
 .../solr/schema/TestManagedSchemaAPI.java       |  102 -
 .../schema/TestManagedSchemaThreadSafety.java   |  189 -
 .../apache/solr/schema/TestOmitPositions.java   |   59 -
 .../org/apache/solr/schema/TestPointFields.java | 3942 --------------
 .../apache/solr/schema/TestSchemaManager.java   |   73 -
 .../schema/TestSchemalessBufferedUpdates.java   |  159 -
 .../solr/schema/TestSortableTextField.java      |  562 --
 .../solr/schema/TestUseDocValuesAsStored.java   |  429 --
 .../solr/schema/TestUseDocValuesAsStored2.java  |  185 -
 .../solr/schema/ThrowErrorOnInitFieldType.java  |   29 -
 .../TrieIntPrefixActsAsRangeQueryFieldType.java |   36 -
 .../org/apache/solr/schema/UUIDFieldTest.java   |   67 -
 .../solr/schema/WrappedIntPointField.java       |   46 -
 .../apache/solr/schema/WrappedTrieIntField.java |   31 -
 .../solr/search/AnalyticsMergeStrategyTest.java |  122 -
 .../apache/solr/search/AnalyticsQueryTest.java  |   84 -
 .../solr/search/AnalyticsTestQParserPlugin.java |  171 -
 .../ApacheLuceneSolrNearQueryBuilder.java       |   54 -
 .../solr/search/ChooseOneWordQueryBuilder.java  |   62 -
 .../search/CurrencyRangeFacetCloudTest.java     |  486 --
 .../org/apache/solr/search/CursorMarkTest.java  |  292 -
 .../solr/search/DelayingSearchComponent.java    |   51 -
 .../test/org/apache/solr/search/DocSetPerf.java |  173 -
 .../apache/solr/search/FooQParserPlugin.java    |   42 -
 .../apache/solr/search/GoodbyeQueryBuilder.java |   38 -
 .../apache/solr/search/HandyQueryBuilder.java   |   69 -
 .../apache/solr/search/HelloQueryBuilder.java   |   38 -
 .../org/apache/solr/search/LargeFieldTest.java  |  126 -
 .../apache/solr/search/MergeStrategyTest.java   |  177 -
 .../apache/solr/search/MockSearchComponent.java |   49 -
 .../apache/solr/search/QueryEqualityTest.java   | 1279 -----
 .../apache/solr/search/QueryParsingTest.java    |   97 -
 .../org/apache/solr/search/RankQueryTest.java   |  113 -
 .../apache/solr/search/ReturnFieldsTest.java    |  561 --
 .../SignificantTermsQParserPluginTest.java      |   35 -
 .../apache/solr/search/SortSpecParsingTest.java |  245 -
 .../apache/solr/search/SpatialFilterTest.java   |  207 -
 .../solr/search/TestAddFieldRealTimeGet.java    |   88 -
 .../solr/search/TestCollapseQParserPlugin.java  |  955 ----
 .../TestComplexPhraseLeadingWildcard.java       |  113 -
 .../search/TestComplexPhraseQParserPlugin.java  |  396 --
 .../apache/solr/search/TestComponentsName.java  |   72 -
 .../org/apache/solr/search/TestCustomSort.java  |  125 -
 .../test/org/apache/solr/search/TestDocSet.java |  572 --
 .../solr/search/TestElisionMultitermQuery.java  |   51 -
 .../solr/search/TestExtendedDismaxParser.java   | 2123 --------
 .../apache/solr/search/TestFastLRUCache.java    |  409 --
 .../apache/solr/search/TestFieldSortValues.java |   53 -
 .../solr/search/TestFilteredDocIdSet.java       |  222 -
 .../org/apache/solr/search/TestFiltering.java   |  549 --
 .../solr/search/TestFoldingMultitermQuery.java  |  349 --
 .../search/TestGraphTermsQParserPlugin.java     |  123 -
 .../solr/search/TestHashQParserPlugin.java      |  312 --
 .../apache/solr/search/TestIndexSearcher.java   |  484 --
 .../org/apache/solr/search/TestInitQParser.java |   62 -
 .../org/apache/solr/search/TestLFUCache.java    |  548 --
 .../org/apache/solr/search/TestLRUCache.java    |  192 -
 .../TestLegacyNumericRangeQueryBuilder.java     |  179 -
 .../solr/search/TestMaxScoreQueryParser.java    |  166 -
 .../apache/solr/search/TestMissingGroups.java   |  179 -
 .../solr/search/TestMultiWordSynonyms.java      |  100 -
 .../apache/solr/search/TestNoOpRegenerator.java |   60 -
 ...OverriddenPrefixQueryForCustomFieldType.java |  153 -
 .../search/TestPayloadCheckQParserPlugin.java   |   73 -
 .../search/TestPayloadScoreQParserPlugin.java   |   62 -
 .../solr/search/TestPseudoReturnFields.java     |  753 ---
 .../org/apache/solr/search/TestQueryTypes.java  |  484 --
 .../org/apache/solr/search/TestQueryUtils.java  |  288 -
 .../org/apache/solr/search/TestRTGBase.java     |  129 -
 .../search/TestRandomCollapseQParserPlugin.java |  212 -
 .../org/apache/solr/search/TestRangeQuery.java  |  705 ---
 .../apache/solr/search/TestRankQueryPlugin.java |  783 ---
 .../solr/search/TestReRankQParserPlugin.java    |  650 ---
 .../org/apache/solr/search/TestRealTimeGet.java |  798 ---
 .../org/apache/solr/search/TestRecovery.java    | 1718 ------
 .../apache/solr/search/TestRecoveryHdfs.java    | 1063 ----
 .../test/org/apache/solr/search/TestReload.java |  101 -
 .../apache/solr/search/TestReloadDeadlock.java  |  239 -
 .../org/apache/solr/search/TestSearchPerf.java  |  249 -
 .../apache/solr/search/TestSearcherReuse.java   |  256 -
 .../solr/search/TestSimpleQParserPlugin.java    |  232 -
 .../apache/solr/search/TestSmileRequest.java    |  110 -
 .../apache/solr/search/TestSolr4Spatial.java    |  440 --
 .../apache/solr/search/TestSolr4Spatial2.java   |  336 --
 .../apache/solr/search/TestSolrCoreParser.java  |  218 -
 .../solr/search/TestSolrFieldCacheBean.java     |   97 -
 .../test/org/apache/solr/search/TestSolrJ.java  |  183 -
 .../apache/solr/search/TestSolrQueryParser.java | 1128 ----
 .../test/org/apache/solr/search/TestSort.java   |  372 --
 .../solr/search/TestStandardQParsers.java       |   91 -
 .../apache/solr/search/TestStressLucene.java    |  375 --
 .../apache/solr/search/TestStressRecovery.java  |  417 --
 .../apache/solr/search/TestStressReorder.java   |  335 --
 .../solr/search/TestStressUserVersions.java     |  325 --
 .../apache/solr/search/TestStressVersions.java  |  281 -
 .../solr/search/TestSurroundQueryParser.java    |  107 -
 .../org/apache/solr/search/TestTrieFacet.java   |  205 -
 .../solr/search/TestValueSourceCache.java       |  134 -
 .../org/apache/solr/search/TestXmlQParser.java  |   42 -
 .../solr/search/TestXmlQParserPlugin.java       |   78 -
 .../org/apache/solr/search/facet/DebugAgg.java  |  221 -
 ...ibutedFacetSimpleRefinementLongTailTest.java |  443 --
 .../solr/search/facet/RangeFacetCloudTest.java  |  786 ---
 .../search/facet/SpatialHeatmapFacetsTest.java  |  349 --
 .../search/facet/TestJsonFacetRefinement.java   | 1299 -----
 .../solr/search/facet/TestJsonFacets.java       | 2663 ---------
 .../facet/TestJsonFacetsStatsParsing.java       |  106 -
 .../facet/TestJsonFacetsWithNestedObjects.java  |  363 --
 .../search/function/NvlValueSourceParser.java   |   80 -
 .../search/function/SortByFunctionTest.java     |  215 -
 .../solr/search/function/TestFunctionQuery.java | 1069 ----
 .../function/TestMinMaxOnMultiValuedField.java  |  927 ----
 .../solr/search/function/TestOrdValues.java     |  309 --
 .../function/TestSortByMinMaxFunction.java      |   35 -
 .../function/distance/DistanceFunctionTest.java |  242 -
 .../apache/solr/search/join/BJQParserTest.java  |  483 --
 .../search/join/BlockJoinFacetDistribTest.java  |  235 -
 .../search/join/BlockJoinFacetRandomTest.java   |  635 ---
 .../search/join/BlockJoinFacetSimpleTest.java   |  121 -
 .../apache/solr/search/join/GraphQueryTest.java |  143 -
 .../search/join/TestCloudNestedDocsSort.java    |  193 -
 .../solr/search/join/TestNestedDocsSort.java    |  150 -
 .../search/join/TestScoreJoinQPNoScore.java     |  369 --
 .../solr/search/join/TestScoreJoinQPScore.java  |  341 --
 .../solr/search/json/TestJsonRequest.java       |  539 --
 .../solr/search/mlt/CloudMLTQParserTest.java    |  250 -
 .../solr/search/mlt/SimpleMLTQParserTest.java   |  168 -
 .../similarities/BaseSimilarityTestCase.java    |   54 -
 .../similarities/CustomSimilarityFactory.java   |   27 -
 .../MockConfigurableSimilarity.java             |   31 -
 .../similarities/TestBM25SimilarityFactory.java |   45 -
 .../TestClassicSimilarityFactory.java           |   43 -
 .../similarities/TestDFISimilarityFactory.java  |   55 -
 .../similarities/TestDFRSimilarityFactory.java  |   71 -
 .../similarities/TestIBSimilarityFactory.java   |   59 -
 .../TestLMDirichletSimilarityFactory.java       |   44 -
 .../TestLMJelinekMercerSimilarityFactory.java   |   44 -
 .../TestNonDefinedSimilarityFactory.java        |   42 -
 .../similarities/TestPerFieldSimilarity.java    |   75 -
 ...stPerFieldSimilarityWithDefaultOverride.java |   73 -
 .../TestSweetSpotSimilarityFactory.java         |  152 -
 .../solr/search/stats/TestBaseStatsCache.java   |   68 -
 .../search/stats/TestDefaultStatsCache.java     |  112 -
 .../solr/search/stats/TestDistribIDF.java       |  257 -
 .../search/stats/TestExactSharedStatsCache.java |   25 -
 .../solr/search/stats/TestExactStatsCache.java  |   24 -
 .../solr/search/stats/TestLRUStatsCache.java    |   24 -
 .../solr/security/BasicAuthIntegrationTest.java |  320 --
 .../solr/security/BasicAuthStandaloneTest.java  |  221 -
 .../HttpParamDelegationTokenPlugin.java         |  272 -
 .../solr/security/MockAuthenticationPlugin.java |   80 -
 .../solr/security/MockAuthorizationPlugin.java  |   67 -
 .../PKIAuthenticationIntegrationTest.java       |  108 -
 .../security/TestAuthorizationFramework.java    |  119 -
 .../security/TestPKIAuthenticationPlugin.java   |  166 -
 .../TestRuleBasedAuthorizationPlugin.java       |  468 --
 .../TestSha256AuthenticationProvider.java       |   67 -
 .../solr/security/hadoop/ImpersonationUtil.java |   73 -
 .../hadoop/ImpersonatorCollectionsHandler.java  |   60 -
 .../hadoop/TestDelegationWithHadoopAuth.java    |  402 --
 .../hadoop/TestImpersonationWithHadoopAuth.java |  215 -
 .../TestSolrCloudWithHadoopAuthPlugin.java      |  139 -
 .../hadoop/TestZkAclsWithHadoopAuth.java        |  165 -
 .../apache/solr/servlet/CacheHeaderTest.java    |  269 -
 .../solr/servlet/CacheHeaderTestBase.java       |  151 -
 .../solr/servlet/DirectSolrConnectionTest.java  |   90 -
 .../solr/servlet/HttpSolrCallGetCoreTest.java   |  168 -
 .../apache/solr/servlet/NoCacheHeaderTest.java  |  175 -
 .../apache/solr/servlet/ResponseHeaderTest.java |   92 -
 .../solr/servlet/SolrRequestParserTest.java     |  510 --
 .../ConjunctionSolrSpellCheckerTest.java        |   96 -
 .../spelling/DirectSolrSpellCheckerTest.java    |   93 -
 .../spelling/FileBasedSpellCheckerTest.java     |  179 -
 .../spelling/IndexBasedSpellCheckerTest.java    |  332 --
 .../apache/solr/spelling/SampleComparator.java  |   35 -
 .../solr/spelling/SimpleQueryConverter.java     |   70 -
 .../solr/spelling/SpellCheckCollatorTest.java   |  635 ---
 .../SpellCheckCollatorWithCollapseTest.java     |   67 -
 .../spelling/SpellPossibilityIteratorTest.java  |  225 -
 .../spelling/SpellingQueryConverterTest.java    |  224 -
 .../spelling/TestSuggestSpellingConverter.java  |   73 -
 .../spelling/WordBreakSolrSpellCheckerTest.java |  331 --
 .../suggest/RandomTestDictionaryFactory.java    |  117 -
 .../solr/spelling/suggest/SuggesterFSTTest.java |   23 -
 .../solr/spelling/suggest/SuggesterTSTTest.java |   23 -
 .../solr/spelling/suggest/SuggesterTest.java    |  114 -
 .../spelling/suggest/SuggesterWFSTTest.java     |   23 -
 .../suggest/TestAnalyzeInfixSuggestions.java    |  117 -
 .../suggest/TestAnalyzedSuggestions.java        |   58 -
 .../suggest/TestBlendedInfixSuggestions.java    |  135 -
 .../suggest/TestFileDictionaryLookup.java       |   60 -
 .../suggest/TestFreeTextSuggestions.java        |   57 -
 .../suggest/TestFuzzyAnalyzedSuggestions.java   |  112 -
 .../TestHighFrequencyDictionaryFactory.java     |   80 -
 .../spelling/suggest/TestPhraseSuggestions.java |   45 -
 .../solr/store/blockcache/BlockCacheTest.java   |  357 --
 .../store/blockcache/BlockDirectoryTest.java    |  293 -
 .../solr/store/blockcache/BufferStoreTest.java  |  101 -
 .../solr/store/hdfs/HdfsDirectoryTest.java      |  235 -
 .../solr/store/hdfs/HdfsLockFactoryTest.java    |   81 -
 .../solr/uninverting/TestDocTermOrds.java       |  687 ---
 .../TestDocTermOrdsUninvertLimit.java           |   89 -
 .../apache/solr/uninverting/TestFieldCache.java |  730 ---
 .../solr/uninverting/TestFieldCacheReopen.java  |   72 -
 .../solr/uninverting/TestFieldCacheSort.java    | 1811 -------
 .../uninverting/TestFieldCacheSortRandom.java   |  325 --
 .../uninverting/TestFieldCacheVsDocValues.java  |  517 --
 .../uninverting/TestFieldCacheWithThreads.java  |  256 -
 .../solr/uninverting/TestLegacyFieldCache.java  |  473 --
 .../solr/uninverting/TestNumericTerms32.java    |  156 -
 .../solr/uninverting/TestNumericTerms64.java    |  166 -
 .../solr/uninverting/TestUninvertingReader.java |  403 --
 .../apache/solr/update/AddBlockUpdateTest.java  |  910 ----
 .../solr/update/AnalysisErrorHandlingTest.java  |   50 -
 .../apache/solr/update/CdcrUpdateLogTest.java   |  774 ---
 .../solr/update/DataDrivenBlockJoinTest.java    |   70 -
 .../solr/update/DirectUpdateHandlerTest.java    |  467 --
 .../apache/solr/update/DocumentBuilderTest.java |  295 -
 .../apache/solr/update/DummyMergePolicy.java    |   33 -
 .../solr/update/MaxSizeAutoCommitTest.java      |  233 -
 .../solr/update/MockStreamingSolrClients.java   |   96 -
 .../org/apache/solr/update/PeerSyncTest.java    |  352 --
 .../update/PeerSyncWithBufferUpdatesTest.java   |  223 -
 ...PeerSyncWithIndexFingerprintCachingTest.java |  108 -
 ...ithLeaderAndIndexFingerprintCachingTest.java |   36 -
 .../solr/update/PeerSyncWithLeaderTest.java     |   39 -
 .../apache/solr/update/SoftAutoCommitTest.java  |  590 --
 .../solr/update/SolrCmdDistributorTest.java     |  842 ---
 .../apache/solr/update/SolrIndexConfigTest.java |  210 -
 .../solr/update/SolrIndexMetricsTest.java       |  126 -
 .../solr/update/SolrIndexSplitterTest.java      |  382 --
 .../solr/update/TestAtomicUpdateErrorCases.java |  101 -
 .../update/TestDocBasedVersionConstraints.java  |  544 --
 .../solr/update/TestExceedMaxTermLength.java    |  150 -
 .../apache/solr/update/TestHdfsUpdateLog.java   |  148 -
 .../solr/update/TestInPlaceUpdatesDistrib.java  | 1247 -----
 .../update/TestInPlaceUpdatesStandalone.java    | 1103 ----
 .../solr/update/TestIndexingPerformance.java    |  129 -
 .../solr/update/TestNestedUpdateProcessor.java  |  195 -
 .../test/org/apache/solr/update/TestUpdate.java |  259 -
 .../apache/solr/update/TransactionLogTest.java  |   44 -
 .../org/apache/solr/update/UpdateLogTest.java   |  263 -
 .../apache/solr/update/UpdateParamsTest.java    |   76 -
 .../org/apache/solr/update/VersionInfoTest.java |  172 -
 ...dSchemaFieldsUpdateProcessorFactoryTest.java |  310 --
 .../AtomicUpdateProcessorFactoryTest.java       |  279 -
 .../update/processor/AtomicUpdatesTest.java     | 1347 -----
 ...lassificationUpdateProcessorFactoryTest.java |  136 -
 ...ificationUpdateProcessorIntegrationTest.java |  192 -
 .../ClassificationUpdateProcessorTest.java      |  507 --
 .../CloneFieldUpdateProcessorFactoryTest.java   |  370 --
 .../processor/CustomUpdateRequestProcessor.java |   30 -
 .../CustomUpdateRequestProcessorFactory.java    |   44 -
 .../DefaultValueUpdateProcessorTest.java        |  154 -
 .../DistributedUpdateProcessorTest.java         |   54 -
 ...DocExpirationUpdateProcessorFactoryTest.java |  221 -
 .../FieldMutatingUpdateProcessorTest.java       |  833 ---
 ...ommitOptimizeUpdateProcessorFactoryTest.java |   84 -
 ...IgnoreLargeDocumentProcessorFactoryTest.java |  181 -
 .../ParsingFieldUpdateProcessorsTest.java       | 1053 ----
 .../PreAnalyzedUpdateProcessorTest.java         |  119 -
 .../RecordingUpdateProcessorFactory.java        |  118 -
 .../processor/RegexBoostProcessorTest.java      |  126 -
 .../solr/update/processor/RuntimeUrp.java       |   40 -
 .../solr/update/processor/ScriptEngineTest.java |  110 -
 .../SignatureUpdateProcessorFactoryTest.java    |  358 --
 ...ipExistingDocumentsProcessorFactoryTest.java |  342 --
 ...atelessScriptUpdateProcessorFactoryTest.java |  275 -
 .../processor/TemplateUpdateProcessorTest.java  |  101 -
 .../processor/TestNamedUpdateProcessors.java    |  168 -
 .../TestPartialUpdateDeduplication.java         |   68 -
 .../TimeRoutedAliasUpdateProcessorTest.java     |  715 ---
 .../processor/TolerantUpdateProcessorTest.java  |  448 --
 .../TrackingUpdateProcessorFactory.java         |  156 -
 .../processor/URLClassifyProcessorTest.java     |  104 -
 .../UUIDUpdateProcessorFallbackTest.java        |  208 -
 .../UniqFieldsUpdateProcessorFactoryTest.java   |  121 -
 .../UpdateRequestProcessorFactoryTest.java      |  184 -
 .../test/org/apache/solr/util/BitSetPerf.java   |  195 -
 .../org/apache/solr/util/CircularListTest.java  |   48 -
 .../test/org/apache/solr/util/DOMUtilTest.java  |   57 -
 .../apache/solr/util/DateMathParserTest.java    |  441 --
 .../org/apache/solr/util/DistanceUnitsTest.java |   28 -
 .../org/apache/solr/util/FileUtilsTest.java     |   32 -
 .../org/apache/solr/util/MockCoreContainer.java |   40 -
 .../solr/util/MockSearchableSolrClient.java     |  117 -
 .../org/apache/solr/util/NumberUtilsTest.java   |   40 -
 .../apache/solr/util/OrderedExecutorTest.java   |  105 -
 .../org/apache/solr/util/PrimUtilsTest.java     |   92 -
 .../apache/solr/util/SimplePostToolTest.java    |  250 -
 .../apache/solr/util/SolrPluginUtilsTest.java   |  451 --
 .../apache/solr/util/TestFastOutputStream.java  |   83 -
 .../solr/util/TestMaxTokenLenTokenizer.java     |  119 -
 .../solr/util/TestObjectReleaseTracker.java     |   62 -
 .../org/apache/solr/util/TestRTimerTree.java    |   87 -
 .../apache/solr/util/TestSafeXMLParsing.java    |   99 -
 .../apache/solr/util/TestSolrCLIRunExample.java |  657 ---
 .../apache/solr/util/TestSystemIdResolver.java  |  106 -
 .../org/apache/solr/util/TestTestInjection.java |  105 -
 .../test/org/apache/solr/util/TestUtils.java    |  324 --
 .../org/apache/solr/util/TimeZoneUtilsTest.java |  134 -
 .../org/apache/solr/util/UtilsToolTest.java     |  189 -
 .../configuration/SSLConfigurationsTest.java    |  268 -
 .../SSLCredentialProviderFactoryTest.java       |   90 -
 .../providers/EnvSSLCredentialProviderTest.java |   61 -
 .../HadoopSSLCredentialProviderTest.java        |   74 -
 .../SysPropSSLCredentialProviderTest.java       |   66 -
 .../BigEndianAscendingWordDeserializerTest.java |  188 -
 .../BigEndianAscendingWordSerializerTest.java   |  336 --
 .../org/apache/solr/util/hll/BitVectorTest.java |  169 -
 .../apache/solr/util/hll/ExplicitHLLTest.java   |  234 -
 .../org/apache/solr/util/hll/FullHLLTest.java   |  341 --
 .../solr/util/hll/HLLSerializationTest.java     |  225 -
 .../org/apache/solr/util/hll/HLLUtilTest.java   |   44 -
 .../solr/util/hll/IntegrationTestGenerator.java |  711 ---
 .../solr/util/hll/ProbabilisticTestUtil.java    |   75 -
 .../org/apache/solr/util/hll/SparseHLLTest.java |  452 --
 .../apache/solr/util/stats/MetricUtilsTest.java |  166 -
 solr/core/src/test/resources/README             |   21 +
 .../src/test/resources/books_numeric_ids.csv    |   11 +
 solr/core/src/test/resources/cryptokeys/pk1.pem |    9 +
 solr/core/src/test/resources/cryptokeys/pk2.pem |   12 +
 .../src/test/resources/cryptokeys/pubk1.der     |  Bin 0 -> 94 bytes
 .../src/test/resources/cryptokeys/pubk2.der     |  Bin 0 -> 126 bytes
 .../test/resources/cryptokeys/samplefile.bin    |  Bin 0 -> 3262 bytes
 .../src/test/resources/exampledocs/example.html |   49 +
 .../src/test/resources/exampledocs/example.txt  |    3 +
 solr/core/src/test/resources/lib-dirs/README    |   18 +
 .../resources/lib-dirs/a/a1/empty-file-a1.txt   |    1 +
 .../resources/lib-dirs/a/a2/empty-file-a2.txt   |    1 +
 .../resources/lib-dirs/b/b1/empty-file-b1.txt   |    1 +
 .../resources/lib-dirs/b/b2/empty-file-b2.txt   |    1 +
 .../resources/lib-dirs/c/c1/empty-file-c1.txt   |    1 +
 .../resources/lib-dirs/c/c2/empty-file-c2.txt   |    1 +
 .../resources/lib-dirs/d/d2/empty-file-d2.txt   |    1 +
 solr/core/src/test/resources/log4j2.xml         |   39 +
 solr/core/src/test/resources/mailing_lists.pdf  |  382 ++
 .../test/resources/old-solr-example/README.txt  |    0
 .../test/resources/old-solr-example/solr.xml    |    0
 .../runtimecode/RuntimeLibReqHandler.java       |   33 +
 .../runtimecode/RuntimeLibResponseWriter.java   |   49 +
 .../runtimecode/RuntimeLibSearchComponent.java  |   37 +
 .../src/test/resources/runtimecode/TestURP.java |   30 +
 .../resources/runtimecode/runtimelibs.jar.bin   |  Bin 0 -> 6860 bytes
 .../runtimecode/runtimelibs_v2.jar.bin          |  Bin 0 -> 6582 bytes
 .../resources/runtimecode/runtimeurp.jar.bin    |  Bin 0 -> 753 bytes
 .../solr/analysisconfs/analysis-err-schema.xml  |   40 +
 .../conf/addfields.updateprocessor.js           |   26 +
 .../collection1/conf/analyzingInfixSuggest.txt  |    5 +
 .../solr/collection1/conf/bad-currency.xml      |   31 +
 .../collection1/conf/bad-error-solrconfig.xml   |   30 +
 .../collection1/conf/bad-mpf-solrconfig.xml     |   37 +
 .../bad-schema-analyzer-class-and-nested.xml    |   35 +
 .../bad-schema-bogus-analysis-parameters.xml    |   28 +
 .../conf/bad-schema-bogus-field-parameters.xml  |   25 +
 .../bad-schema-codec-global-vs-ft-mismatch.xml  |   31 +
 .../bad-schema-currency-dynamic-multivalued.xml |   30 +
 .../bad-schema-currency-ft-amount-suffix.xml    |   34 +
 ...bad-schema-currency-ft-bogus-code-in-xml.xml |   33 +
 ...ad-schema-currency-ft-bogus-default-code.xml |   33 +
 .../conf/bad-schema-currency-ft-code-suffix.xml |   33 +
 .../conf/bad-schema-currency-ft-multivalued.xml |   29 +
 .../conf/bad-schema-currency-ft-oer-norates.xml |   32 +
 .../conf/bad-schema-currency-multivalued.xml    |   30 +
 ...ma-currencyfieldtype-bogus-amount-suffix.xml |   34 +
 ...hema-currencyfieldtype-bogus-code-suffix.xml |   35 +
 ...ma-currencyfieldtype-dynamic-multivalued.xml |   36 +
 ...a-currencyfieldtype-ft-bogus-code-in-xml.xml |   41 +
 ...-currencyfieldtype-ft-bogus-default-code.xml |   41 +
 ...-schema-currencyfieldtype-ft-multivalued.xml |   36 +
 ...-schema-currencyfieldtype-ft-oer-norates.xml |   40 +
 ...-currencyfieldtype-missing-amount-suffix.xml |   34 +
 ...ma-currencyfieldtype-missing-code-suffix.xml |   35 +
 ...bad-schema-currencyfieldtype-multivalued.xml |   36 +
 ...schema-currencyfieldtype-wrong-amount-ft.xml |   36 +
 ...d-schema-currencyfieldtype-wrong-code-ft.xml |   35 +
 .../conf/bad-schema-default-operator.xml        |   26 +
 .../conf/bad-schema-defaultsearchfield.xml      |   26 +
 .../conf/bad-schema-dup-dynamicField.xml        |   35 +
 .../collection1/conf/bad-schema-dup-field.xml   |   38 +
 .../conf/bad-schema-dup-fieldType.xml           |   37 +
 .../bad-schema-dynamicfield-default-val.xml     |   29 +
 .../conf/bad-schema-dynamicfield-required.xml   |   29 +
 .../solr/collection1/conf/bad-schema-eff.xml    |   44 +
 .../solr/collection1/conf/bad-schema-enums.xml  |   34 +
 ...asterisk-copyfield-dest-should-fail-test.xml |   27 +
 ...terisk-copyfield-source-should-fail-test.xml |   27 +
 ...asterisk-copyfield-dest-should-fail-test.xml |   25 +
 ...terisk-copyfield-source-should-fail-test.xml |   25 +
 ...source-matching-nothing-should-fail-test.xml |   31 +
 .../conf/bad-schema-nontext-analyzer.xml        |   33 +
 .../conf/bad-schema-not-indexed-but-norms.xml   |   35 +
 .../conf/bad-schema-not-indexed-but-pos.xml     |   35 +
 .../conf/bad-schema-not-indexed-but-tf.xml      |   34 +
 .../conf/bad-schema-omit-tf-but-not-pos.xml     |   35 +
 .../bad-schema-sim-default-does-not-exist.xml   |   41 +
 ...d-schema-sim-default-has-no-explicit-sim.xml |   41 +
 .../bad-schema-sim-global-vs-ft-mismatch.xml    |   34 +
 .../conf/bad-schema-sweetspot-both-tf.xml       |   43 +
 .../bad-schema-sweetspot-partial-baseline.xml   |   39 +
 .../bad-schema-sweetspot-partial-hyperbolic.xml |   41 +
 .../conf/bad-schema-sweetspot-partial-norms.xml |   40 +
 ...-schema-uniquekey-diff-type-dynamic-root.xml |   36 +
 .../bad-schema-uniquekey-diff-type-root.xml     |   35 +
 .../bad-schema-uniquekey-is-copyfield-dest.xml  |   31 +
 .../conf/bad-schema-uniquekey-multivalued.xml   |   28 +
 .../conf/bad-schema-uniquekey-uses-default.xml  |   28 +
 .../conf/bad-schema-uniquekey-uses-points.xml   |   28 +
 .../conf/bad-schema-unsupported-docValues.xml   |   26 +
 .../bad-solrconfig-bogus-scriptengine-name.xml  |   34 +
 .../conf/bad-solrconfig-invalid-scriptfile.xml  |   35 +
 ...lrconfig-managed-schema-named-schema.xml.xml |   30 +
 .../conf/bad-solrconfig-missing-scriptfile.xml  |   33 +
 .../conf/bad-solrconfig-multiple-cfs.xml        |   32 +
 .../conf/bad-solrconfig-multiple-dirfactory.xml |   34 +
 .../bad-solrconfig-multiple-indexconfigs.xml    |   35 +
 .../conf/bad-solrconfig-no-autocommit-tag.xml   |   52 +
 .../collection1/conf/bad-solrconfig-nrtmode.xml |   37 +
 ...olrconfig-schema-mutable-but-not-managed.xml |   32 +
 ...d-solrconfig-unexpected-schema-attribute.xml |   32 +
 .../solr/collection1/conf/bad_solrconfig.xml    |   28 +
 .../collection1/conf/blendedInfixSuggest.txt    |    3 +
 .../collection1/conf/compoundDictionary.txt     |   19 +
 .../conf/conditional.updateprocessor.js         |   25 +
 .../solr/collection1/conf/cross-compatible.js   |   53 +
 .../solr/collection1/conf/currency.xml          |   37 +
 .../resources/solr/collection1/conf/da_UTF8.xml | 1208 +++++
 .../collection1/conf/da_compoundDictionary.txt  |   19 +
 .../resources/solr/collection1/conf/elevate.xml |   54 +
 .../solr/collection1/conf/enumsConfig.xml       |   52 +
 .../solr/collection1/conf/freeTextSuggest.txt   |    2 +
 .../solr/collection1/conf/frenchArticles.txt    |   24 +
 .../solr/collection1/conf/fuzzysuggest.txt      |    4 +
 .../solr/collection1/conf/hunspell-test.aff     |   13 +
 .../solr/collection1/conf/hunspell-test.dic     |    6 +
 .../solr/collection1/conf/hyphenation.dtd       |   68 +
 .../solr/collection1/conf/jasuggest.txt         |    5 +
 .../resources/solr/collection1/conf/keep-1.txt  |   17 +
 .../resources/solr/collection1/conf/keep-2.txt  |   17 +
 .../conf/mapping-ISOLatin1Accent.txt            |  246 +
 .../conf/missing.functions.updateprocessor.js   |    3 +
 ...missleading.extension.updateprocessor.js.txt |   23 +
 .../collection1/conf/multiword-synonyms.txt     |   13 +
 .../solr/collection1/conf/old_synonyms.txt      |   22 +
 .../collection1/conf/open-exchange-rates.json   |   18 +
 .../solr/collection1/conf/phrasesuggest.txt     |    8 +
 .../solr/collection1/conf/protected-1.txt       |   17 +
 .../solr/collection1/conf/protected-2.txt       |   17 +
 .../solr/collection1/conf/protwords.txt         |   23 +
 .../conf/regex-boost-processor-test.txt         |   10 +
 .../conf/schema-HighlighterMaxOffsetTest.xml    |   85 +
 .../collection1/conf/schema-SimpleTextCodec.xml |   32 +
 ...chema-add-schema-fields-update-processor.xml |   72 +
 .../solr/collection1/conf/schema-behavior.xml   |  132 +
 .../collection1/conf/schema-binaryfield.xml     |   41 +
 .../conf/schema-blockjoinfacetcomponent.xml     |   40 +
 .../solr/collection1/conf/schema-bm25.xml       |   46 +
 .../collection1/conf/schema-charfilters.xml     |   47 +
 ...a-class-name-shortening-on-serialization.xml |   44 +
 .../collection1/conf/schema-classification.xml  |   43 +
 .../solr/collection1/conf/schema-collate-dv.xml |   59 +
 .../solr/collection1/conf/schema-collate.xml    |   58 +
 .../collection1/conf/schema-copyfield-test.xml  |  456 ++
 .../collection1/conf/schema-custom-field.xml    |   44 +
 .../collection1/conf/schema-customfield.xml     |   55 +
 .../solr/collection1/conf/schema-dfi.xml        |   50 +
 .../solr/collection1/conf/schema-dfr.xml        |   64 +
 .../conf/schema-distrib-interval-faceting.xml   |   78 +
 .../conf/schema-distributed-missing-sort.xml    |   86 +
 .../solr/collection1/conf/schema-docValues.xml  |   81 +
 .../conf/schema-docValuesFaceting.xml           |   96 +
 .../collection1/conf/schema-docValuesJoin.xml   |  105 +
 .../conf/schema-docValuesMissing.xml            |  116 +
 .../collection1/conf/schema-docValuesMulti.xml  |   49 +
 .../solr/collection1/conf/schema-eff.xml        |   40 +
 .../solr/collection1/conf/schema-enums.xml      |   49 +
 .../conf/schema-field-sort-values.xml           |   36 +
 .../solr/collection1/conf/schema-folding.xml    |  262 +
 .../solr/collection1/conf/schema-hash.xml       |  614 +++
 .../solr/collection1/conf/schema-ib.xml         |   51 +
 .../conf/schema-id-and-version-fields-only.xml  |   25 +
 .../collection1/conf/schema-inplace-updates.xml |   67 +
 .../collection1/conf/schema-lmdirichlet.xml     |   44 +
 .../collection1/conf/schema-lmjelinekmercer.xml |   44 +
 .../conf/schema-luceneMatchVersion.xml          |   52 +
 .../conf/schema-minimal-atomic-stress.xml       |   38 +
 .../schema-minimal-with-another-uniqkey.xml     |   23 +
 .../solr/collection1/conf/schema-minimal.xml    |   21 +
 .../conf/schema-multiword-synonyms.xml          |   50 +
 .../solr/collection1/conf/schema-nest.xml       |   65 +
 .../conf/schema-non-stored-docvalues.xml        |   74 +
 .../conf/schema-not-required-unique-key.xml     |   38 +
 .../conf/schema-null-charfilters-analyzer.xml   |   27 +
 .../solr/collection1/conf/schema-numeric.xml    |   85 +
 ...ma-one-field-no-dynamic-field-unique-key.xml |   25 +
 .../conf/schema-one-field-no-dynamic-field.xml  |   24 +
 .../conf/schema-phrases-identification.xml      |   97 +
 .../collection1/conf/schema-phrasesuggest.xml   |   56 +
 .../solr/collection1/conf/schema-point.xml      |  187 +
 .../conf/schema-postingshighlight.xml           |   46 +
 .../collection1/conf/schema-preanalyzed.xml     |   44 +
 .../collection1/conf/schema-protected-term.xml  |   86 +
 .../collection1/conf/schema-psuedo-fields.xml   |   74 +
 .../collection1/conf/schema-replication1.xml    |   38 +
 .../collection1/conf/schema-replication2.xml    |   40 +
 .../collection1/conf/schema-required-fields.xml |  401 ++
 .../conf/schema-rest-lucene-match-version.xml   |   36 +
 .../solr/collection1/conf/schema-rest.xml       |  747 +++
 .../solr/collection1/conf/schema-reversed.xml   |   80 +
 .../conf/schema-sim-default-override.xml        |   66 +
 .../solr/collection1/conf/schema-sim.xml        |   69 +
 .../collection1/conf/schema-simpleqpplugin.xml  |   60 +
 .../collection1/conf/schema-snippet-field.xml   |    3 +
 .../collection1/conf/schema-snippet-type.xml    |    3 +
 .../collection1/conf/schema-snippet-types.incl  |   19 +
 .../collection1/conf/schema-sorting-text.xml    |  149 +
 .../collection1/conf/schema-sortingresponse.xml |  107 +
 .../solr/collection1/conf/schema-sorts.xml      |  314 ++
 .../solr/collection1/conf/schema-spatial.xml    |  101 +
 .../collection1/conf/schema-spellchecker.xml    |   71 +
 .../solr/collection1/conf/schema-sql.xml        |  650 +++
 .../solr/collection1/conf/schema-sweetspot.xml  |   69 +
 .../conf/schema-synonym-tokenizer.xml           |   40 +
 .../solr/collection1/conf/schema-tagger.xml     |  187 +
 .../solr/collection1/conf/schema-tfidf.xml      |   44 +
 .../solr/collection1/conf/schema-tiny.xml       |   35 +
 .../collection1/conf/schema-tokenizer-test.xml  |  125 +
 .../solr/collection1/conf/schema-trie.xml       |  324 ++
 .../conf/schema-unifiedhighlight.xml            |   45 +
 .../solr/collection1/conf/schema-version-dv.xml |   33 +
 .../collection1/conf/schema-version-indexed.xml |   33 +
 .../solr/collection1/conf/schema-xinclude.xml   |   26 +
 .../resources/solr/collection1/conf/schema.xml  |  834 +++
 .../solr/collection1/conf/schema11.xml          |  539 ++
 .../solr/collection1/conf/schema12.xml          |  745 +++
 .../solr/collection1/conf/schema15.xml          |  625 +++
 .../solr/collection1/conf/schema_codec.xml      |   51 +
 .../solr/collection1/conf/schema_latest.xml     |  791 +++
 .../solr/collection1/conf/schemasurround.xml    |  609 +++
 .../collection1/conf/solrconfig-SOLR-749.xml    |   35 +
 ...dd-schema-fields-update-processor-chains.xml |  223 +
 .../conf/solrconfig-altdirectory.xml            |   27 +
 .../conf/solrconfig-analytics-query.xml         |  319 ++
 .../solr/collection1/conf/solrconfig-basic.xml  |   29 +
 .../conf/solrconfig-blockjoinfacetcomponent.xml |   58 +
 .../conf/solrconfig-cache-enable-disable.xml    |   86 +
 .../collection1/conf/solrconfig-caching.xml     |   40 +
 .../solr/collection1/conf/solrconfig-cdcr.xml   |   77 +
 .../conf/solrconfig-cdcrupdatelog.xml           |   49 +
 .../conf/solrconfig-classification.xml          |   68 +
 .../conf/solrconfig-collapseqparser.xml         |  326 ++
 .../conf/solrconfig-components-name.xml         |   74 +
 .../solrconfig-concurrentmergescheduler.xml     |   37 +
 .../solrconfig-configurerecoverystrategy.xml    |   28 +
 .../conf/solrconfig-customrecoverystrategy.xml  |   32 +
 .../collection1/conf/solrconfig-deeppaging.xml  |   52 +
 .../collection1/conf/solrconfig-defaults.xml    |   43 +
 .../conf/solrconfig-delaying-component.xml      |   62 +
 .../collection1/conf/solrconfig-delpolicy1.xml  |   51 +
 .../collection1/conf/solrconfig-delpolicy2.xml  |   48 +
 ...lrconfig-distrib-update-processor-chains.xml |   84 +
 .../solrconfig-doc-expire-update-processor.xml  |   96 +
 .../conf/solrconfig-doctransformers.xml         |   52 +
 .../collection1/conf/solrconfig-elevate.xml     |  160 +
 .../solrconfig-externalversionconstraint.xml    |  155 +
 .../conf/solrconfig-functionquery.xml           |   48 +
 .../solr/collection1/conf/solrconfig-hash.xml   |   61 +
 .../collection1/conf/solrconfig-headers.xml     |   32 +
 .../collection1/conf/solrconfig-highlight.xml   |   61 +
 .../conf/solrconfig-implicitproperties.xml      |   76 +
 ...olrconfig-indexconfig-mergepolicyfactory.xml |   31 +
 .../conf/solrconfig-indexmetrics.xml            |   61 +
 .../conf/solrconfig-infixsuggesters.xml         |  101 +
 .../conf/solrconfig-infostream-logging.xml      |   29 +
 .../conf/solrconfig-logmergepolicyfactory.xml   |   37 +
 .../conf/solrconfig-managed-schema-test.xml     |   27 +
 .../conf/solrconfig-managed-schema.xml          |   86 +
 .../conf/solrconfig-master-throttled.xml        |   66 +
 .../solr/collection1/conf/solrconfig-master.xml |   70 +
 .../conf/solrconfig-master1-keepOneBackup.xml   |   49 +
 .../collection1/conf/solrconfig-master1.xml     |   68 +
 .../collection1/conf/solrconfig-master2.xml     |   66 +
 .../collection1/conf/solrconfig-master3.xml     |   67 +
 .../conf/solrconfig-mergepolicy-defaults.xml    |   33 +
 .../conf/solrconfig-mergepolicy-legacy.xml      |   31 +
 .../solrconfig-mergepolicyfactory-nocfs.xml     |   34 +
 .../collection1/conf/solrconfig-minimal.xml     |   65 +
 .../collection1/conf/solrconfig-nocache.xml     |   48 +
 .../conf/solrconfig-nomergepolicyfactory.xml    |   32 +
 .../collection1/conf/solrconfig-noopregen.xml   |   36 +
 .../collection1/conf/solrconfig-paramset.xml    |   89 +
 ...lrconfig-parsing-update-processor-chains.xml |  234 +
 .../conf/solrconfig-phrases-identification.xml  |   53 +
 .../conf/solrconfig-phrasesuggest.xml           |  468 ++
 .../conf/solrconfig-plugcollector.xml           |  543 ++
 .../conf/solrconfig-postingshighlight.xml       |   36 +
 .../conf/solrconfig-query-parser-init.xml       |   37 +
 .../conf/solrconfig-querysender-noquery.xml     |   75 +
 .../collection1/conf/solrconfig-querysender.xml |   71 +
 .../collection1/conf/solrconfig-repeater.xml    |   61 +
 .../collection1/conf/solrconfig-reqHandler.incl |    5 +
 .../conf/solrconfig-response-log-component.xml  |   61 +
 .../collection1/conf/solrconfig-schemaless.xml  |   98 +
 .../conf/solrconfig-script-updateprocessor.xml  |  120 +
 .../conf/solrconfig-searcher-listeners1.xml     |   51 +
 .../solr/collection1/conf/solrconfig-slave.xml  |   59 +
 .../solr/collection1/conf/solrconfig-slave1.xml |   52 +
 .../conf/solrconfig-snippet-processor.xml       |    6 +
 .../conf/solrconfig-solcoreproperties.xml       |   36 +
 .../solrconfig-sortingmergepolicyfactory.xml    |   56 +
 .../conf/solrconfig-sortingresponse.xml         |   45 +
 .../collection1/conf/solrconfig-spatial.xml     |   42 +
 .../conf/solrconfig-spellcheckcomponent.xml     |  197 +
 .../conf/solrconfig-spellchecker.xml            |  143 +
 .../solr/collection1/conf/solrconfig-sql.xml    |   72 +
 ...-suggestercomponent-context-filter-query.xml |  122 +
 .../conf/solrconfig-suggestercomponent.xml      |  146 +
 .../solr/collection1/conf/solrconfig-tagger.xml |   59 +
 .../collection1/conf/solrconfig-test-misc.xml   |   53 +
 .../conf/solrconfig-testxmlparser.xml           |   33 +
 .../solrconfig-tieredmergepolicyfactory.xml     |   41 +
 .../solr/collection1/conf/solrconfig-tlog.xml   |  181 +
 .../conf/solrconfig-tolerant-search.xml         |   53 +
 .../conf/solrconfig-tolerant-update-minimal.xml |   40 +
 .../conf/solrconfig-transformers.xml            |   89 +
 ...nfig-uninvertdocvaluesmergepolicyfactory.xml |   38 +
 .../conf/solrconfig-update-processor-chains.xml |  657 +++
 ...lrconfig-warmer-randommergepolicyfactory.xml |   46 +
 .../conf/solrconfig-withgethandler.xml          |   52 +
 .../collection1/conf/solrconfig-xinclude.xml    |   36 +
 .../solrconfig.snippet.randomindexconfig.xml    |   49 +
 .../solr/collection1/conf/solrconfig.xml        |  567 ++
 .../conf/solrconfig_SimpleTextCodec.xml         |   26 +
 .../solr/collection1/conf/solrconfig_codec.xml  |   28 +
 .../solr/collection1/conf/solrconfig_codec2.xml |   26 +
 .../solr/collection1/conf/solrconfig_perf.xml   |   73 +
 .../solr/collection1/conf/stemdict.txt          |   22 +
 .../resources/solr/collection1/conf/stop-1.txt  |   17 +
 .../resources/solr/collection1/conf/stop-2.txt  |   17 +
 .../solr/collection1/conf/stop-snowball.txt     |   10 +
 .../solr/collection1/conf/stoptypes-1.txt       |   17 +
 .../solr/collection1/conf/stoptypes-2.txt       |   17 +
 .../solr/collection1/conf/stopwithbom.txt       |    1 +
 .../solr/collection1/conf/stopwords.txt         |   58 +
 .../collection1/conf/stopwordsWrongEncoding.txt |   18 +
 .../solr/collection1/conf/synonyms.txt          |   40 +
 .../conf/throw.error.on.add.updateprocessor.js  |   21 +
 .../conf/trivial.updateprocessor0.js            |   59 +
 .../conf/trivial.updateprocessor1.js            |   25 +
 .../solr/collection1/conf/wdftypes.txt          |   32 +
 .../conf/xslt/dummy-using-include.xsl           |   31 +
 .../solr/collection1/conf/xslt/dummy.xsl        |   39 +
 .../conf/xslt/xsl-update-handler-test.xsl       |   49 +
 .../test/resources/solr/conf/core.properties    |   19 +
 .../_default/conf/lang/contractions_ca.txt      |    8 +
 .../_default/conf/lang/contractions_fr.txt      |   15 +
 .../_default/conf/lang/contractions_ga.txt      |    5 +
 .../_default/conf/lang/contractions_it.txt      |   23 +
 .../_default/conf/lang/hyphenations_ga.txt      |    5 +
 .../_default/conf/lang/stemdict_nl.txt          |    6 +
 .../_default/conf/lang/stoptags_ja.txt          |  420 ++
 .../_default/conf/lang/stopwords_ar.txt         |  125 +
 .../_default/conf/lang/stopwords_bg.txt         |  193 +
 .../_default/conf/lang/stopwords_ca.txt         |  220 +
 .../_default/conf/lang/stopwords_cz.txt         |  172 +
 .../_default/conf/lang/stopwords_da.txt         |  110 +
 .../_default/conf/lang/stopwords_de.txt         |  294 +
 .../_default/conf/lang/stopwords_el.txt         |   78 +
 .../_default/conf/lang/stopwords_en.txt         |   54 +
 .../_default/conf/lang/stopwords_es.txt         |  356 ++
 .../_default/conf/lang/stopwords_eu.txt         |   99 +
 .../_default/conf/lang/stopwords_fa.txt         |  313 ++
 .../_default/conf/lang/stopwords_fi.txt         |   97 +
 .../_default/conf/lang/stopwords_fr.txt         |  186 +
 .../_default/conf/lang/stopwords_ga.txt         |  110 +
 .../_default/conf/lang/stopwords_gl.txt         |  161 +
 .../_default/conf/lang/stopwords_hi.txt         |  235 +
 .../_default/conf/lang/stopwords_hu.txt         |  211 +
 .../_default/conf/lang/stopwords_hy.txt         |   46 +
 .../_default/conf/lang/stopwords_id.txt         |  359 ++
 .../_default/conf/lang/stopwords_it.txt         |  303 ++
 .../_default/conf/lang/stopwords_ja.txt         |  127 +
 .../_default/conf/lang/stopwords_lv.txt         |  172 +
 .../_default/conf/lang/stopwords_nl.txt         |  119 +
 .../_default/conf/lang/stopwords_no.txt         |  194 +
 .../_default/conf/lang/stopwords_pt.txt         |  253 +
 .../_default/conf/lang/stopwords_ro.txt         |  233 +
 .../_default/conf/lang/stopwords_ru.txt         |  243 +
 .../_default/conf/lang/stopwords_sv.txt         |  133 +
 .../_default/conf/lang/stopwords_th.txt         |  119 +
 .../_default/conf/lang/stopwords_tr.txt         |  212 +
 .../_default/conf/lang/userdict_ja.txt          |   29 +
 .../configsets/_default/conf/managed-schema     | 1007 ++++
 .../solr/configsets/_default/conf/params.json   |   20 +
 .../solr/configsets/_default/conf/protwords.txt |   21 +
 .../configsets/_default/conf/solrconfig.xml     | 1355 +++++
 .../solr/configsets/_default/conf/stopwords.txt |   14 +
 .../solr/configsets/_default/conf/synonyms.txt  |   29 +
 .../solr/configsets/backcompat/conf/schema.xml  |   22 +
 .../configsets/backcompat/conf/solrconfig.xml   |   43 +
 .../configsets/bad-mergepolicy/conf/schema.xml  |   21 +
 .../bad-mergepolicy/conf/solrconfig.xml         |   36 +
 .../cdcr-cluster1/conf/managed-schema           |   29 +
 .../cdcr-cluster1/conf/solrconfig.xml           |   80 +
 .../cdcr-cluster2/conf/managed-schema           |   29 +
 .../cdcr-cluster2/conf/solrconfig.xml           |   80 +
 .../cdcr-source-disabled/conf/schema.xml        |   29 +
 .../cdcr-source-disabled/conf/solrconfig.xml    |   60 +
 .../solr/configsets/cdcr-source/conf/schema.xml |   29 +
 .../configsets/cdcr-source/conf/solrconfig.xml  |   75 +
 .../solr/configsets/cdcr-target/conf/schema.xml |   29 +
 .../configsets/cdcr-target/conf/solrconfig.xml  |   62 +
 .../configsets/cloud-dynamic/conf/schema.xml    |  293 +
 .../cloud-dynamic/conf/solrconfig.xml           |   48 +
 .../solr/configsets/cloud-hdfs/conf/schema.xml  |   28 +
 .../configsets/cloud-hdfs/conf/solrconfig.xml   |   52 +
 .../conf/managed-schema                         |   41 +
 .../conf/solrconfig.xml                         |   51 +
 .../cloud-managed-upgrade/conf/schema.xml       |   27 +
 .../cloud-managed-upgrade/conf/solrconfig.xml   |   50 +
 .../cloud-managed/conf/managed-schema           |   27 +
 .../cloud-managed/conf/solrconfig.xml           |   51 +
 .../conf/schema.xml                             |   31 +
 .../conf/solrconfig.xml                         |   48 +
 .../cloud-minimal-jmx/conf/schema.xml           |   28 +
 .../cloud-minimal-jmx/conf/solrconfig.xml       |   50 +
 .../configsets/cloud-minimal/conf/schema.xml    |   29 +
 .../cloud-minimal/conf/solrconfig.xml           |   51 +
 .../configsets/cloud-subdirs/conf/schema.xml    |   28 +
 .../cloud-subdirs/conf/solrconfig.xml           |   48 +
 .../conf/stopwords/stopwords-en.txt             |   62 +
 .../solr/configsets/configset-2/conf/schema.xml |   25 +
 .../configsets/configset-2/conf/solrconfig.xml  |   49 +
 .../solr/configsets/doc-expiry/conf/schema.xml  |  287 +
 .../configsets/doc-expiry/conf/solrconfig.xml   |  107 +
 .../exitable-directory/conf/schema.xml          |   28 +
 .../exitable-directory/conf/solrconfig.xml      |  117 +
 .../solr/configsets/minimal/conf/schema.xml     |   21 +
 .../solr/configsets/minimal/conf/solrconfig.xml |   47 +
 .../solr/configsets/resource-sharing/schema.xml |   21 +
 .../configsets/resource-sharing/solrconfig.xml  |   51 +
 .../dih-script-transformer/managed-schema       |   25 +
 .../dih-script-transformer/solrconfig.xml       |   61 +
 .../configsets/upload/regular/managed-schema    |   25 +
 .../configsets/upload/regular/solrconfig.xml    |   61 +
 .../regular/xslt/xsl-update-handler-test.xsl    |   49 +
 .../upload/with-script-processor/managed-schema |   25 +
 ...missleading.extension.updateprocessor.js.txt |   23 +
 .../upload/with-script-processor/solrconfig.xml |   65 +
 .../resources/solr/crazy-path-to-config.xml     |   68 +
 .../resources/solr/crazy-path-to-schema.xml     |   44 +
 solr/core/src/test/resources/solr/external_eff  |   10 +
 .../solr/security/hadoop_kerberos_config.json   |   16 +
 .../hadoop_simple_auth_with_delegation.json     |   29 +
 .../src/test/resources/solr/solr-50-all.xml     |   64 +
 .../resources/solr/solr-gangliareporter.xml     |   32 +
 .../resources/solr/solr-graphitereporter.xml    |   31 +
 .../test/resources/solr/solr-hiddensysprops.xml |   31 +
 .../test/resources/solr/solr-jmxreporter.xml    |   43 +
 .../test/resources/solr/solr-metricreporter.xml |   57 +
 .../test/resources/solr/solr-metricsconfig.xml  |   61 +
 .../solr-shardhandler-loadBalancerRequests.xml  |   23 +
 .../test/resources/solr/solr-shardhandler.xml   |   29 +
 .../test/resources/solr/solr-slf4jreporter.xml  |   42 +
 .../test/resources/solr/solr-solrDataHome.xml   |   24 +
 .../test/resources/solr/solr-solrreporter.xml   |   70 +
 .../src/test/resources/solr/solr-stress-new.xml |   34 +
 .../solr/solr-trackingshardhandler.xml          |   48 +
 solr/core/src/test/resources/solr/solr.xml      |   54 +
 solr/core/src/test/resources/spellings.txt      |   16 +
 solr/server/build.gradle                        |   36 +
 solr/solrj/build.gradle                         |   21 +
 .../solr/client/solrj/ResponseParser.java       |   52 -
 .../apache/solr/client/solrj/SolrClient.java    | 1278 -----
 .../org/apache/solr/client/solrj/SolrQuery.java | 1364 -----
 .../apache/solr/client/solrj/SolrRequest.java   |  218 -
 .../apache/solr/client/solrj/SolrResponse.java  |   76 -
 .../solr/client/solrj/SolrServerException.java  |   53 -
 .../client/solrj/StreamingResponseCallback.java |   36 -
 .../solr/client/solrj/V2RequestSupport.java     |   30 -
 .../client/solrj/beans/BindingException.java    |   28 -
 .../solrj/beans/DocumentObjectBinder.java       |  478 --
 .../apache/solr/client/solrj/beans/Field.java   |   38 -
 .../solr/client/solrj/beans/package-info.java   |   23 -
 .../client/solrj/cloud/DistribStateManager.java |  148 -
 .../client/solrj/cloud/DistributedQueue.java    |   58 -
 .../solrj/cloud/DistributedQueueFactory.java    |   28 -
 .../client/solrj/cloud/NodeStateProvider.java   |   45 -
 .../client/solrj/cloud/SolrCloudManager.java    |   54 -
 .../cloud/autoscaling/AddReplicaSuggester.java  |   81 -
 .../autoscaling/AlreadyExistsException.java     |   35 -
 .../cloud/autoscaling/AutoScalingConfig.java    |  588 --
 .../cloud/autoscaling/BadVersionException.java  |   40 -
 .../client/solrj/cloud/autoscaling/Cell.java    |   74 -
 .../client/solrj/cloud/autoscaling/Clause.java  |  660 ---
 .../solrj/cloud/autoscaling/ComputedType.java   |   99 -
 .../solrj/cloud/autoscaling/Condition.java      |  120 -
 .../solrj/cloud/autoscaling/CoresVariable.java  |  115 -
 .../autoscaling/DelegatingCloudManager.java     |   93 -
 .../DelegatingClusterStateProvider.java         |  112 -
 .../DelegatingDistribStateManager.java          |  107 -
 .../DelegatingNodeStateProvider.java            |   56 -
 .../cloud/autoscaling/DeleteNodeSuggester.java  |   46 -
 .../autoscaling/DeleteReplicaSuggester.java     |   74 -
 .../cloud/autoscaling/FreeDiskVariable.java     |  175 -
 .../cloud/autoscaling/MoveReplicaSuggester.java |  110 -
 .../solrj/cloud/autoscaling/NodeVariable.java   |   44 -
 .../solrj/cloud/autoscaling/NoneSuggester.java  |   39 -
 .../cloud/autoscaling/NotEmptyException.java    |   35 -
 .../client/solrj/cloud/autoscaling/Operand.java |  209 -
 .../client/solrj/cloud/autoscaling/Policy.java  |  651 ---
 .../solrj/cloud/autoscaling/PolicyHelper.java   |  565 --
 .../solrj/cloud/autoscaling/Preference.java     |  148 -
 .../solrj/cloud/autoscaling/RangeVal.java       |   60 -
 .../solrj/cloud/autoscaling/ReplicaCount.java   |  111 -
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |  182 -
 .../cloud/autoscaling/ReplicaVariable.java      |  150 -
 .../client/solrj/cloud/autoscaling/Row.java     |  270 -
 .../solrj/cloud/autoscaling/SealedClause.java   |   29 -
 .../cloud/autoscaling/SplitShardSuggester.java  |   49 -
 .../solrj/cloud/autoscaling/Suggester.java      |  489 --
 .../solrj/cloud/autoscaling/Suggestion.java     |  116 -
 .../autoscaling/TriggerEventProcessorStage.java |   30 -
 .../cloud/autoscaling/TriggerEventType.java     |   33 -
 .../cloud/autoscaling/UnsupportedSuggester.java |   59 -
 .../solrj/cloud/autoscaling/Variable.java       |  401 --
 .../solrj/cloud/autoscaling/VariableBase.java   |  202 -
 .../solrj/cloud/autoscaling/VersionedData.java  |   50 -
 .../solrj/cloud/autoscaling/Violation.java      |  181 -
 .../autoscaling/WithCollectionVariable.java     |  166 -
 .../solrj/cloud/autoscaling/package-info.java   |   23 -
 .../solr/client/solrj/cloud/package-info.java   |   23 -
 .../client/solrj/impl/BinaryRequestWriter.java  |   95 -
 .../client/solrj/impl/BinaryResponseParser.java |   71 -
 .../solr/client/solrj/impl/CloudSolrClient.java | 1605 ------
 .../client/solrj/impl/ClusterStateProvider.java |   85 -
 .../solrj/impl/ConcurrentUpdateSolrClient.java  |  890 ---
 .../impl/DelegationTokenHttpSolrClient.java     |  110 -
 .../solrj/impl/HttpClientBuilderFactory.java    |   41 -
 .../solr/client/solrj/impl/HttpClientUtil.java  |  516 --
 .../solrj/impl/HttpClusterStateProvider.java    |  312 --
 .../solr/client/solrj/impl/HttpSolrClient.java  |  968 ----
 .../solrj/impl/InputStreamResponseParser.java   |   52 -
 .../solrj/impl/Krb5HttpClientBuilder.java       |  189 -
 .../client/solrj/impl/LBHttpSolrClient.java     |  975 ----
 .../client/solrj/impl/NoOpResponseParser.java   |   82 -
 .../solr/client/solrj/impl/PreemptiveAuth.java  |   59 -
 ...PreemptiveBasicAuthClientBuilderFactory.java |  132 -
 .../client/solrj/impl/SolrClientBuilder.java    |   78 -
 .../solrj/impl/SolrClientCloudManager.java      |  182 -
 .../solrj/impl/SolrClientNodeStateProvider.java |  349 --
 .../solrj/impl/SolrHttpClientBuilder.java       |   91 -
 .../impl/SolrHttpClientContextBuilder.java      |   98 -
 .../solrj/impl/SolrHttpRequestRetryHandler.java |  161 -
 .../impl/SolrPortAwareCookieSpecFactory.java    |  113 -
 .../impl/StreamingBinaryResponseParser.java     |   90 -
 .../client/solrj/impl/XMLResponseParser.java    |  484 --
 .../impl/ZkClientClusterStateProvider.java      |  222 -
 .../solrj/impl/ZkDistribStateManager.java       |  197 -
 .../solr/client/solrj/impl/package-info.java    |   24 -
 .../solrj/io/ClassificationEvaluation.java      |   85 -
 .../org/apache/solr/client/solrj/io/Lang.java   |  330 --
 .../apache/solr/client/solrj/io/ModelCache.java |  147 -
 .../solr/client/solrj/io/SolrClientCache.java   |   99 -
 .../org/apache/solr/client/solrj/io/Tuple.java  |  224 -
 .../client/solrj/io/comp/ComparatorLambda.java  |   28 -
 .../client/solrj/io/comp/ComparatorOrder.java   |   47 -
 .../client/solrj/io/comp/FieldComparator.java   |  174 -
 .../solr/client/solrj/io/comp/HashKey.java      |   70 -
 .../solrj/io/comp/MultipleFieldComparator.java  |  138 -
 .../solrj/io/comp/SingleValueComparator.java    |   65 -
 .../client/solrj/io/comp/StreamComparator.java  |   31 -
 .../solr/client/solrj/io/comp/package-info.java |   25 -
 .../solr/client/solrj/io/eq/Equalitor.java      |   30 -
 .../solr/client/solrj/io/eq/FieldEqualitor.java |  130 -
 .../solrj/io/eq/MultipleFieldEqualitor.java     |  124 -
 .../client/solrj/io/eq/StreamEqualitor.java     |   29 -
 .../solr/client/solrj/io/eq/package-info.java   |   21 -
 .../solrj/io/eval/AbsoluteValueEvaluator.java   |   50 -
 .../solr/client/solrj/io/eval/AddEvaluator.java |   76 -
 .../client/solrj/io/eval/AkimaEvaluator.java    |   71 -
 .../solr/client/solrj/io/eval/AndEvaluator.java |   52 -
 .../client/solrj/io/eval/AnovaEvaluator.java    |   64 -
 .../client/solrj/io/eval/AppendEvaluator.java   |   53 -
 .../solrj/io/eval/ArcCosineEvaluator.java       |   50 -
 .../client/solrj/io/eval/ArcSineEvaluator.java  |   50 -
 .../solrj/io/eval/ArcTangentEvaluator.java      |   51 -
 .../client/solrj/io/eval/ArrayEvaluator.java    |   69 -
 .../solr/client/solrj/io/eval/AscEvaluator.java |   67 -
 .../solr/client/solrj/io/eval/Attributes.java   |   26 -
 .../io/eval/BetaDistributionEvaluator.java      |   48 -
 .../solrj/io/eval/BicubicSplineEvaluator.java   |   74 -
 .../io/eval/BinomialCoefficientEvaluator.java   |   46 -
 .../io/eval/BinomialDistributionEvaluator.java  |   48 -
 .../client/solrj/io/eval/CanberraEvaluator.java |   49 -
 .../client/solrj/io/eval/CeilingEvaluator.java  |   50 -
 .../io/eval/ChebyshevDistanceEvaluator.java     |   56 -
 .../solrj/io/eval/ChebyshevEvaluator.java       |   49 -
 .../io/eval/ChiSquareDataSetEvaluator.java      |   65 -
 .../client/solrj/io/eval/CoalesceEvaluator.java |   46 -
 .../client/solrj/io/eval/ColumnAtEvaluator.java |   55 -
 .../solrj/io/eval/ColumnCountEvaluator.java     |   42 -
 .../client/solrj/io/eval/ColumnEvaluator.java   |   77 -
 .../io/eval/ConstantDistributionEvaluator.java  |   44 -
 .../solrj/io/eval/ConversionEvaluator.java      |  160 -
 .../solrj/io/eval/ConvexHullEvaluator.java      |   62 -
 .../solrj/io/eval/ConvolutionEvaluator.java     |   58 -
 .../client/solrj/io/eval/CopyOfEvaluator.java   |   69 -
 .../solrj/io/eval/CopyOfRangeEvaluator.java     |   89 -
 .../solrj/io/eval/CorrelationEvaluator.java     |  137 -
 .../eval/CorrelationSignificanceEvaluator.java  |   57 -
 .../client/solrj/io/eval/CosineEvaluator.java   |   50 -
 .../io/eval/CosineSimilarityEvaluator.java      |   66 -
 .../solrj/io/eval/CovarianceEvaluator.java      |   57 -
 .../solrj/io/eval/CubedRootEvaluator.java       |   50 -
 .../io/eval/CumulativeProbabilityEvaluator.java |   59 -
 .../client/solrj/io/eval/DensityEvaluator.java  |   55 -
 .../solrj/io/eval/DerivativeEvaluator.java      |   63 -
 .../client/solrj/io/eval/DescribeEvaluator.java |   68 -
 .../client/solrj/io/eval/DistanceEvaluator.java |  128 -
 .../client/solrj/io/eval/DivideEvaluator.java   |   75 -
 .../solrj/io/eval/DotProductEvaluator.java      |   57 -
 .../client/solrj/io/eval/EBEAddEvaluator.java   |   68 -
 .../solrj/io/eval/EBEDivideEvaluator.java       |   62 -
 .../solrj/io/eval/EBEMultiplyEvaluator.java     |   62 -
 .../solrj/io/eval/EBESubtractEvaluator.java     |   67 -
 .../solrj/io/eval/EarthMoversEvaluator.java     |   49 -
 .../io/eval/EmpiricalDistributionEvaluator.java |   53 -
 .../solrj/io/eval/EnclosingDiskEvaluator.java   |   64 -
 .../eval/EnumeratedDistributionEvaluator.java   |   53 -
 .../client/solrj/io/eval/EqualToEvaluator.java  |   73 -
 .../io/eval/EuclideanDistanceEvaluator.java     |   56 -
 .../solrj/io/eval/EuclideanEvaluator.java       |   49 -
 .../solrj/io/eval/EvaluatorException.java       |   32 -
 .../solrj/io/eval/ExclusiveOrEvaluator.java     |   72 -
 .../eval/ExponentialMovingAverageEvaluator.java |   99 -
 .../solr/client/solrj/io/eval/FFTEvaluator.java |   73 -
 .../solrj/io/eval/FactorialEvaluator.java       |   53 -
 .../solrj/io/eval/FeatureSelectEvaluator.java   |   93 -
 .../solrj/io/eval/FieldValueEvaluator.java      |  107 -
 .../solrj/io/eval/FindDelayEvaluator.java       |   75 -
 .../client/solrj/io/eval/FloorEvaluator.java    |   50 -
 .../solrj/io/eval/FrequencyTableEvaluator.java  |   84 -
 .../solrj/io/eval/FuzzyKmeansEvaluator.java     |  106 -
 .../solrj/io/eval/GTestDataSetEvaluator.java    |   64 -
 .../io/eval/GammaDistributionEvaluator.java     |   48 -
 .../client/solrj/io/eval/GaussFitEvaluator.java |   91 -
 .../io/eval/GeometricDistributionEvaluator.java |   44 -
 .../solrj/io/eval/GetAmplitudeEvaluator.java    |   42 -
 .../io/eval/GetAngularFrequencyEvaluator.java   |   42 -
 .../client/solrj/io/eval/GetAreaEvaluator.java  |   43 -
 .../solrj/io/eval/GetAttributeEvaluator.java    |   43 -
 .../solrj/io/eval/GetAttributesEvaluator.java   |   42 -
 .../solrj/io/eval/GetBaryCenterEvaluator.java   |   50 -
 .../solrj/io/eval/GetBoundarySizeEvaluator.java |   43 -
 .../client/solrj/io/eval/GetCacheEvaluator.java |   57 -
 .../solrj/io/eval/GetCenterEvaluator.java       |   52 -
 .../solrj/io/eval/GetCentroidsEvaluator.java    |   55 -
 .../solrj/io/eval/GetClusterEvaluator.java      |   64 -
 .../solrj/io/eval/GetColumnLabelsEvaluator.java |   42 -
 .../io/eval/GetMembershipMatrixEvaluator.java   |   42 -
 .../client/solrj/io/eval/GetPhaseEvaluator.java |   42 -
 .../solrj/io/eval/GetRadiusEvaluator.java       |   44 -
 .../solrj/io/eval/GetRowLabelsEvaluator.java    |   42 -
 .../io/eval/GetSupportPointsEvaluator.java      |   56 -
 .../client/solrj/io/eval/GetValueEvaluator.java |   50 -
 .../solrj/io/eval/GetVerticesEvaluator.java     |   51 -
 .../client/solrj/io/eval/GrandSumEvaluator.java |   54 -
 .../io/eval/GreaterThanEqualToEvaluator.java    |   61 -
 .../solrj/io/eval/GreaterThanEvaluator.java     |   61 -
 .../solrj/io/eval/HarmonicFitEvaluator.java     |   93 -
 .../solrj/io/eval/HistogramEvaluator.java       |   90 -
 .../io/eval/HyperbolicCosineEvaluator.java      |   50 -
 .../solrj/io/eval/HyperbolicSineEvaluator.java  |   50 -
 .../io/eval/HyperbolicTangentEvaluator.java     |   52 -
 .../client/solrj/io/eval/IFFTEvaluator.java     |   71 -
 .../solrj/io/eval/IfThenElseEvaluator.java      |   48 -
 .../client/solrj/io/eval/IndexOfEvaluator.java  |   51 -
 .../solrj/io/eval/IntegrateEvaluator.java       |   70 -
 .../client/solrj/io/eval/KmeansEvaluator.java   |  149 -
 .../solr/client/solrj/io/eval/KnnEvaluator.java |  155 -
 .../solrj/io/eval/KnnRegressionEvaluator.java   |  238 -
 .../io/eval/KolmogorovSmirnovEvaluator.java     |   73 -
 .../client/solrj/io/eval/L1NormEvaluator.java   |   56 -
 .../client/solrj/io/eval/LInfNormEvaluator.java |   56 -
 .../solrj/io/eval/LatLonVectorsEvaluator.java   |  115 -
 .../client/solrj/io/eval/LengthEvaluator.java   |   49 -
 .../client/solrj/io/eval/LerpEvaluator.java     |   71 -
 .../solrj/io/eval/LessThanEqualToEvaluator.java |   61 -
 .../client/solrj/io/eval/LessThanEvaluator.java |   61 -
 .../solrj/io/eval/ListCacheEvaluator.java       |   73 -
 .../client/solrj/io/eval/LoessEvaluator.java    |  105 -
 .../client/solrj/io/eval/Log10Evaluator.java    |   50 -
 .../io/eval/LogNormalDistributionEvaluator.java |   48 -
 .../solrj/io/eval/ManhattanEvaluator.java       |   49 -
 .../solrj/io/eval/MannWhitneyUEvaluator.java    |   62 -
 .../client/solrj/io/eval/ManyValueWorker.java   |   25 -
 .../solrj/io/eval/MarkovChainEvaluator.java     |  100 -
 .../solr/client/solrj/io/eval/Matrix.java       |  105 -
 .../client/solrj/io/eval/MatrixEvaluator.java   |   52 -
 .../solrj/io/eval/MatrixMultiplyEvaluator.java  |   65 -
 .../solrj/io/eval/MeanDifferenceEvaluator.java  |   54 -
 .../client/solrj/io/eval/MeanEvaluator.java     |   56 -
 .../client/solrj/io/eval/MemsetEvaluator.java   |  167 -
 .../solrj/io/eval/MinMaxScaleEvaluator.java     |  115 -
 .../client/solrj/io/eval/ModeEvaluator.java     |   64 -
 .../client/solrj/io/eval/ModuloEvaluator.java   |   75 -
 .../solrj/io/eval/MonteCarloEvaluator.java      |  140 -
 .../solrj/io/eval/MovingAverageEvaluator.java   |   66 -
 .../solrj/io/eval/MovingMedianEvaluator.java    |   68 -
 .../solrj/io/eval/MultiKmeansEvaluator.java     |  108 -
 ...MultiVariateNormalDistributionEvaluator.java |   54 -
 .../client/solrj/io/eval/MultiplyEvaluator.java |   76 -
 .../solrj/io/eval/NaturalLogEvaluator.java      |   50 -
 .../client/solrj/io/eval/NormEvaluator.java     |   56 -
 .../io/eval/NormalDistributionEvaluator.java    |   48 -
 .../solrj/io/eval/NormalizeEvaluator.java       |   61 -
 .../solrj/io/eval/NormalizeSumEvaluator.java    |   84 -
 .../solr/client/solrj/io/eval/NotEvaluator.java |   62 -
 .../solrj/io/eval/OLSRegressionEvaluator.java   |  123 -
 .../client/solrj/io/eval/OneValueWorker.java    |   34 -
 .../client/solrj/io/eval/OnesEvaluator.java     |   47 -
 .../solr/client/solrj/io/eval/OrEvaluator.java  |   52 -
 .../solrj/io/eval/OscillateEvaluator.java       |   57 -
 .../client/solrj/io/eval/OutliersEvaluator.java |  138 -
 .../client/solrj/io/eval/PairSortEvaluator.java |   93 -
 .../solrj/io/eval/PairedTTestEvaluator.java     |   75 -
 .../solrj/io/eval/PercentileEvaluator.java      |   54 -
 .../io/eval/PoissonDistributionEvaluator.java   |   44 -
 .../io/eval/PolyFitDerivativeEvaluator.java     |  103 -
 .../client/solrj/io/eval/PolyFitEvaluator.java  |  106 -
 .../client/solrj/io/eval/PowerEvaluator.java    |   92 -
 .../solrj/io/eval/PrecisionEvaluator.java       |   51 -
 .../client/solrj/io/eval/PredictEvaluator.java  |  160 -
 .../client/solrj/io/eval/PrimesEvaluator.java   |   58 -
 .../solrj/io/eval/ProbabilityEvaluator.java     |   86 -
 .../client/solrj/io/eval/PutCacheEvaluator.java |   61 -
 .../client/solrj/io/eval/RankEvaluator.java     |   53 -
 .../client/solrj/io/eval/RawValueEvaluator.java |   87 -
 .../io/eval/RecursiveBooleanEvaluator.java      |  112 -
 .../solrj/io/eval/RecursiveEvaluator.java       |  257 -
 .../io/eval/RecursiveNumericEvaluator.java      |   76 -
 .../io/eval/RecursiveNumericListEvaluator.java  |   67 -
 .../solrj/io/eval/RecursiveObjectEvaluator.java |   35 -
 .../io/eval/RecursiveTemporalEvaluator.java     |  114 -
 .../solrj/io/eval/RegressionEvaluator.java      |   92 -
 .../solrj/io/eval/RemoveCacheEvaluator.java     |   57 -
 .../client/solrj/io/eval/ReverseEvaluator.java  |   57 -
 .../client/solrj/io/eval/RoundEvaluator.java    |   50 -
 .../client/solrj/io/eval/RowAtEvaluator.java    |   56 -
 .../client/solrj/io/eval/RowCountEvaluator.java |   42 -
 .../client/solrj/io/eval/SampleEvaluator.java   |  101 -
 .../solrj/io/eval/ScalarAddEvaluator.java       |   74 -
 .../solrj/io/eval/ScalarDivideEvaluator.java    |   39 -
 .../solrj/io/eval/ScalarMultiplyEvaluator.java  |   39 -
 .../solrj/io/eval/ScalarSubtractEvaluator.java  |   39 -
 .../client/solrj/io/eval/ScaleEvaluator.java    |   62 -
 .../client/solrj/io/eval/SequenceEvaluator.java |   56 -
 .../solrj/io/eval/SetColumnLabelsEvaluator.java |   47 -
 .../solrj/io/eval/SetRowLabelsEvaluator.java    |   47 -
 .../client/solrj/io/eval/SetValueEvaluator.java |   58 -
 .../client/solrj/io/eval/SineEvaluator.java     |   50 -
 .../client/solrj/io/eval/SourceEvaluator.java   |   36 -
 .../client/solrj/io/eval/SplineEvaluator.java   |   71 -
 .../solrj/io/eval/SquareRootEvaluator.java      |   50 -
 .../client/solrj/io/eval/StreamEvaluator.java   |   53 -
 .../solrj/io/eval/StreamEvaluatorException.java |   32 -
 .../client/solrj/io/eval/SubtractEvaluator.java |   95 -
 .../solrj/io/eval/SumColumnsEvaluator.java      |   68 -
 .../solrj/io/eval/SumDifferenceEvaluator.java   |   54 -
 .../client/solrj/io/eval/SumRowsEvaluator.java  |   63 -
 .../client/solrj/io/eval/SumSqEvaluator.java    |   56 -
 .../client/solrj/io/eval/TTestEvaluator.java    |   95 -
 .../client/solrj/io/eval/TangentEvaluator.java  |   50 -
 .../solrj/io/eval/TemporalEvaluatorDay.java     |   44 -
 .../io/eval/TemporalEvaluatorDayOfQuarter.java  |   44 -
 .../io/eval/TemporalEvaluatorDayOfYear.java     |   44 -
 .../solrj/io/eval/TemporalEvaluatorEpoch.java   |   45 -
 .../solrj/io/eval/TemporalEvaluatorHour.java    |   44 -
 .../solrj/io/eval/TemporalEvaluatorMinute.java  |   44 -
 .../solrj/io/eval/TemporalEvaluatorMonth.java   |   44 -
 .../solrj/io/eval/TemporalEvaluatorQuarter.java |   44 -
 .../solrj/io/eval/TemporalEvaluatorSecond.java  |   44 -
 .../solrj/io/eval/TemporalEvaluatorWeek.java    |   44 -
 .../solrj/io/eval/TemporalEvaluatorYear.java    |   44 -
 .../solrj/io/eval/TermVectorsEvaluator.java     |  188 -
 .../io/eval/TimeDifferencingEvaluator.java      |   69 -
 .../solrj/io/eval/TopFeaturesEvaluator.java     |  112 -
 .../solrj/io/eval/TransposeEvaluator.java       |   56 -
 .../eval/TriangularDistributionEvaluator.java   |   46 -
 .../client/solrj/io/eval/TwoValueWorker.java    |   34 -
 .../io/eval/UniformDistributionEvaluator.java   |   48 -
 .../UniformIntegerDistributionEvaluator.java    |   48 -
 .../client/solrj/io/eval/UnitEvaluator.java     |   80 -
 .../client/solrj/io/eval/UuidEvaluator.java     |   54 -
 .../client/solrj/io/eval/ValueAtEvaluator.java  |   74 -
 .../solr/client/solrj/io/eval/ValueWorker.java  |   25 -
 .../client/solrj/io/eval/VectorFunction.java    |   55 -
 .../io/eval/WeibullDistributionEvaluator.java   |   48 -
 .../client/solrj/io/eval/ZerosEvaluator.java    |   47 -
 .../io/eval/ZipFDistributionEvaluator.java      |   48 -
 .../solr/client/solrj/io/eval/package-info.java |   21 -
 .../solrj/io/graph/GatherNodesStream.java       |  682 ---
 .../apache/solr/client/solrj/io/graph/Node.java |   90 -
 .../solrj/io/graph/ShortestPathStream.java      |  522 --
 .../solr/client/solrj/io/graph/Traversal.java   |   96 -
 .../solrj/io/graph/TraversalIterator.java       |  120 -
 .../client/solrj/io/graph/package-info.java     |   22 -
 .../client/solrj/io/ops/ConcatOperation.java    |  110 -
 .../client/solrj/io/ops/DistinctOperation.java  |   75 -
 .../client/solrj/io/ops/GroupOperation.java     |  148 -
 .../client/solrj/io/ops/ReduceOperation.java    |   23 -
 .../client/solrj/io/ops/ReplaceOperation.java   |   89 -
 .../solrj/io/ops/ReplaceWithFieldOperation.java |  123 -
 .../solrj/io/ops/ReplaceWithValueOperation.java |  128 -
 .../client/solrj/io/ops/StreamOperation.java    |   29 -
 .../solr/client/solrj/io/ops/package-info.java  |   25 -
 .../solr/client/solrj/io/package-info.java      |   24 -
 .../client/solrj/io/sql/ConnectionImpl.java     |  393 --
 .../solrj/io/sql/DatabaseMetaDataImpl.java      |  987 ----
 .../solr/client/solrj/io/sql/DriverImpl.java    |  132 -
 .../solrj/io/sql/PreparedStatementImpl.java     |  394 --
 .../solr/client/solrj/io/sql/ResultSetImpl.java | 1250 -----
 .../solrj/io/sql/ResultSetMetaDataImpl.java     |  181 -
 .../solr/client/solrj/io/sql/StatementImpl.java |  377 --
 .../solr/client/solrj/io/sql/package-info.java  |   51 -
 .../client/solrj/io/stream/BiJoinStream.java    |  162 -
 .../solrj/io/stream/CalculatorStream.java       |  112 -
 .../solrj/io/stream/CartesianProductStream.java |  310 --
 .../solr/client/solrj/io/stream/CellStream.java |  155 -
 .../client/solrj/io/stream/CloudSolrStream.java |  518 --
 .../client/solrj/io/stream/CommitStream.java    |  261 -
 .../solrj/io/stream/ComplementStream.java       |  207 -
 .../client/solrj/io/stream/DaemonStream.java    |  379 --
 .../solr/client/solrj/io/stream/EchoStream.java |  122 -
 .../solr/client/solrj/io/stream/EvalStream.java |  141 -
 .../client/solrj/io/stream/ExceptionStream.java |  102 -
 .../client/solrj/io/stream/ExecutorStream.java  |  227 -
 .../client/solrj/io/stream/FacetStream.java     |  538 --
 .../io/stream/FeaturesSelectionStream.java      |  442 --
 .../client/solrj/io/stream/FetchStream.java     |  312 --
 .../solr/client/solrj/io/stream/GetStream.java  |  124 -
 .../client/solrj/io/stream/HashJoinStream.java  |  287 -
 .../client/solrj/io/stream/HavingStream.java    |  173 -
 .../client/solrj/io/stream/InnerJoinStream.java |  109 -
 .../client/solrj/io/stream/IntersectStream.java |  202 -
 .../solr/client/solrj/io/stream/JDBCStream.java |  605 ---
 .../client/solrj/io/stream/JSONTupleStream.java |  187 -
 .../io/stream/JavabinTupleStreamParser.java     |  189 -
 .../solr/client/solrj/io/stream/JoinStream.java |  227 -
 .../solr/client/solrj/io/stream/KnnStream.java  |  259 -
 .../solrj/io/stream/LeftOuterJoinStream.java    |  110 -
 .../solr/client/solrj/io/stream/LetStream.java  |  221 -
 .../solr/client/solrj/io/stream/ListStream.java |  148 -
 .../client/solrj/io/stream/MergeStream.java     |  250 -
 .../client/solrj/io/stream/ModelStream.java     |  203 -
 .../solr/client/solrj/io/stream/NullStream.java |  156 -
 .../solrj/io/stream/OuterHashJoinStream.java    |  133 -
 .../client/solrj/io/stream/ParallelStream.java  |  279 -
 .../solr/client/solrj/io/stream/PlotStream.java |  224 -
 .../client/solrj/io/stream/PriorityStream.java  |  157 -
 .../client/solrj/io/stream/PushBackStream.java  |  102 -
 .../client/solrj/io/stream/RandomStream.java    |  243 -
 .../solr/client/solrj/io/stream/RankStream.java |  232 -
 .../client/solrj/io/stream/ReducerStream.java   |  248 -
 .../client/solrj/io/stream/RollupStream.java    |  276 -
 .../solrj/io/stream/ScoreNodesStream.java       |  282 -
 .../client/solrj/io/stream/SelectStream.java    |  302 --
 .../client/solrj/io/stream/ShuffleStream.java   |  106 -
 .../solrj/io/stream/SignificantTermsStream.java |  407 --
 .../solr/client/solrj/io/stream/SolrStream.java |  273 -
 .../solr/client/solrj/io/stream/SortStream.java |  200 -
 .../solr/client/solrj/io/stream/SqlStream.java  |  221 -
 .../client/solrj/io/stream/StatsStream.java     |  363 --
 .../client/solrj/io/stream/StreamContext.java   |  104 -
 .../client/solrj/io/stream/TextLogitStream.java |  662 ---
 .../solrj/io/stream/TimeSeriesStream.java       |  421 --
 .../client/solrj/io/stream/TopicStream.java     |  552 --
 .../solr/client/solrj/io/stream/TupStream.java  |  220 -
 .../client/solrj/io/stream/TupleStream.java     |  166 -
 .../solrj/io/stream/TupleStreamParser.java      |   27 -
 .../client/solrj/io/stream/UniqueStream.java    |  173 -
 .../client/solrj/io/stream/UpdateStream.java    |  324 --
 .../io/stream/expr/DefaultStreamFactory.java    |   33 -
 .../solrj/io/stream/expr/Explanation.java       |  162 -
 .../solrj/io/stream/expr/Expressible.java       |   37 -
 .../solrj/io/stream/expr/StreamExplanation.java |   71 -
 .../solrj/io/stream/expr/StreamExpression.java  |  126 -
 .../expr/StreamExpressionNamedParameter.java    |  108 -
 .../stream/expr/StreamExpressionParameter.java  |   24 -
 .../io/stream/expr/StreamExpressionParser.java  |  350 --
 .../io/stream/expr/StreamExpressionValue.java   |   65 -
 .../solrj/io/stream/expr/StreamFactory.java     |  447 --
 .../solrj/io/stream/expr/package-info.java      |   27 -
 .../client/solrj/io/stream/metrics/Bucket.java  |   44 -
 .../solrj/io/stream/metrics/CountMetric.java    |   85 -
 .../solrj/io/stream/metrics/MaxMetric.java      |   93 -
 .../solrj/io/stream/metrics/MeanMetric.java     |  112 -
 .../client/solrj/io/stream/metrics/Metric.java  |   75 -
 .../solrj/io/stream/metrics/MinMetric.java      |   94 -
 .../solrj/io/stream/metrics/SumMetric.java      |   89 -
 .../solrj/io/stream/metrics/package-info.java   |   24 -
 .../client/solrj/io/stream/package-info.java    |   27 -
 .../apache/solr/client/solrj/package-info.java  |   23 -
 .../solrj/request/AbstractUpdateRequest.java    |  144 -
 .../solrj/request/CollectionAdminRequest.java   | 2509 ---------
 .../solrj/request/CollectionApiMapping.java     |  474 --
 .../solrj/request/ConfigSetAdminRequest.java    |  179 -
 .../request/ContentStreamUpdateRequest.java     |   96 -
 .../client/solrj/request/CoreAdminRequest.java  |  692 ---
 .../client/solrj/request/CoreApiMapping.java    |  117 -
 .../solr/client/solrj/request/CoreStatus.java   |   48 -
 .../solrj/request/DelegationTokenRequest.java   |  140 -
 .../client/solrj/request/DirectXmlRequest.java  |   62 -
 .../solrj/request/DocumentAnalysisRequest.java  |  212 -
 .../solrj/request/FieldAnalysisRequest.java     |  256 -
 .../solrj/request/GenericSolrRequest.java       |   54 -
 .../solrj/request/HealthCheckRequest.java       |   52 -
 .../client/solrj/request/IsUpdateRequest.java   |   24 -
 .../request/JavaBinUpdateRequestCodec.java      |  290 -
 .../solr/client/solrj/request/LukeRequest.java  |  113 -
 .../request/MultiContentWriterRequest.java      |  124 -
 .../solr/client/solrj/request/QueryRequest.java |   80 -
 .../client/solrj/request/RequestWriter.java     |  139 -
 .../solr/client/solrj/request/SolrPing.java     |  102 -
 .../solrj/request/StreamingUpdateRequest.java   |   76 -
 .../client/solrj/request/UpdateRequest.java     |  542 --
 .../client/solrj/request/V1toV2ApiMapper.java   |  145 -
 .../solr/client/solrj/request/V2Request.java    |  167 -
 .../solr/client/solrj/request/package-info.java |   23 -
 .../request/schema/AbstractSchemaRequest.java   |   40 -
 .../request/schema/AnalyzerDefinition.java      |   62 -
 .../request/schema/FieldTypeDefinition.java     |   86 -
 .../solrj/request/schema/SchemaRequest.java     |  780 ---
 .../solrj/request/schema/package-info.java      |   23 -
 .../solrj/response/AnalysisResponseBase.java    |  270 -
 .../solr/client/solrj/response/Cluster.java     |  107 -
 .../solrj/response/ClusteringResponse.java      |   77 -
 .../solrj/response/CollectionAdminResponse.java |  101 -
 .../solrj/response/ConfigSetAdminResponse.java  |   37 -
 .../solrj/response/CoreAdminResponse.java       |   57 -
 .../solrj/response/DelegationTokenResponse.java |  109 -
 .../response/DocumentAnalysisResponse.java      |  254 -
 .../solr/client/solrj/response/FacetField.java  |  148 -
 .../solrj/response/FieldAnalysisResponse.java   |  200 -
 .../client/solrj/response/FieldStatsInfo.java   |  227 -
 .../solr/client/solrj/response/Group.java       |   68 -
 .../client/solrj/response/GroupCommand.java     |  124 -
 .../client/solrj/response/GroupResponse.java    |   55 -
 .../solrj/response/HealthCheckResponse.java     |   39 -
 .../client/solrj/response/IntervalFacet.java    |   85 -
 .../client/solrj/response/LukeResponse.java     |  309 --
 .../solr/client/solrj/response/PivotField.java  |  106 -
 .../client/solrj/response/QueryResponse.java    |  641 ---
 .../solr/client/solrj/response/RangeFacet.java  |  131 -
 .../solrj/response/RequestStatusState.java      |   73 -
 .../solrj/response/SimpleSolrResponse.java      |   47 -
 .../client/solrj/response/SolrPingResponse.java |   27 -
 .../client/solrj/response/SolrResponseBase.java |   98 -
 .../solrj/response/SpellCheckResponse.java      |  272 -
 .../solrj/response/SuggesterResponse.java       |   86 -
 .../solr/client/solrj/response/Suggestion.java  |   63 -
 .../client/solrj/response/TermsResponse.java    |  116 -
 .../client/solrj/response/UpdateResponse.java   |   29 -
 .../solr/client/solrj/response/V2Response.java  |   22 -
 .../client/solrj/response/package-info.java     |   24 -
 .../schema/FieldTypeRepresentation.java         |   46 -
 .../response/schema/SchemaRepresentation.java   |  107 -
 .../solrj/response/schema/SchemaResponse.java   |  435 --
 .../solrj/response/schema/package-info.java     |   23 -
 .../solr/client/solrj/util/ClientUtils.java     |  209 -
 .../solrj/util/SolrIdentifierValidator.java     |   72 -
 .../solr/client/solrj/util/package-info.java    |   23 -
 .../java/org/apache/solr/common/Callable.java   |   21 -
 .../solr/common/ConditionalMapWriter.java       |   82 -
 .../apache/solr/common/EmptyEntityResolver.java |   96 -
 .../org/apache/solr/common/EnumFieldValue.java  |  115 -
 .../org/apache/solr/common/IteratorWriter.java  |   93 -
 .../apache/solr/common/LinkedHashMapWriter.java |   60 -
 .../org/apache/solr/common/MapSerializable.java |   30 -
 .../java/org/apache/solr/common/MapWriter.java  |  195 -
 .../org/apache/solr/common/MapWriterMap.java    |   53 -
 .../solr/common/NonExistentCoreException.java   |   25 -
 .../java/org/apache/solr/common/PushWriter.java |   42 -
 .../org/apache/solr/common/SolrCloseable.java   |   30 -
 .../apache/solr/common/SolrCloseableLatch.java  |   65 -
 .../org/apache/solr/common/SolrDocument.java    |  406 --
 .../apache/solr/common/SolrDocumentBase.java    |   73 -
 .../apache/solr/common/SolrDocumentList.java    |   67 -
 .../org/apache/solr/common/SolrException.java   |  258 -
 .../apache/solr/common/SolrInputDocument.java   |  276 -
 .../org/apache/solr/common/SolrInputField.java  |  213 -
 .../org/apache/solr/common/SpecProvider.java    |   25 -
 .../org/apache/solr/common/StringUtils.java     |   35 -
 .../solr/common/ToleratedUpdateError.java       |  199 -
 .../org/apache/solr/common/cloud/Aliases.java   |  288 -
 .../solr/common/cloud/BeforeReconnect.java      |   21 -
 .../common/cloud/CloudCollectionsListener.java  |   40 -
 .../solr/common/cloud/ClusterProperties.java    |  208 -
 .../apache/solr/common/cloud/ClusterState.java  |  411 --
 .../solr/common/cloud/ClusterStateUtil.java     |  257 -
 .../solr/common/cloud/CollectionProperties.java |  117 -
 .../common/cloud/CollectionPropsWatcher.java    |   40 -
 .../common/cloud/CollectionStatePredicate.java  |   42 -
 .../common/cloud/CollectionStateWatcher.java    |   43 -
 .../solr/common/cloud/CompositeIdRouter.java    |  326 --
 .../solr/common/cloud/ConnectionManager.java    |  261 -
 .../common/cloud/DefaultConnectionStrategy.java |   75 -
 .../solr/common/cloud/DefaultZkACLProvider.java |   44 -
 .../cloud/DefaultZkCredentialsProvider.java     |   40 -
 .../solr/common/cloud/DistributedQueue.java     |   42 -
 .../apache/solr/common/cloud/DocCollection.java |  418 --
 .../org/apache/solr/common/cloud/DocRouter.java |  217 -
 .../solr/common/cloud/HashBasedRouter.java      |   83 -
 .../solr/common/cloud/ImplicitDocRouter.java    |   98 -
 .../solr/common/cloud/LiveNodesListener.java    |   38 -
 .../apache/solr/common/cloud/OnReconnect.java   |   28 -
 .../apache/solr/common/cloud/PlainIdRouter.java |   22 -
 .../org/apache/solr/common/cloud/Replica.java   |  188 -
 .../solr/common/cloud/ReplicaPosition.java      |   55 -
 .../apache/solr/common/cloud/RoutingRule.java   |   77 -
 .../solr/common/cloud/SaslZkACLProvider.java    |   51 -
 .../cloud/SecurityAwareZkACLProvider.java       |   79 -
 .../org/apache/solr/common/cloud/Slice.java     |  288 -
 .../apache/solr/common/cloud/SolrZkClient.java  |  831 ---
 .../apache/solr/common/cloud/SolrZooKeeper.java |  111 -
 ...ParamsAllAndReadonlyDigestZkACLProvider.java |  117 -
 ...tCredentialsDigestZkCredentialsProvider.java |   59 -
 .../apache/solr/common/cloud/ZkACLProvider.java |   27 -
 .../cloud/ZkClientConnectionStrategy.java       |  115 -
 .../apache/solr/common/cloud/ZkCmdExecutor.java |  114 -
 .../solr/common/cloud/ZkConfigManager.java      |  220 -
 .../solr/common/cloud/ZkCoreNodeProps.java      |   73 -
 .../common/cloud/ZkCredentialsProvider.java     |   44 -
 .../solr/common/cloud/ZkMaintenanceUtils.java   |  471 --
 .../apache/solr/common/cloud/ZkNodeProps.java   |  160 -
 .../apache/solr/common/cloud/ZkOperation.java   |   33 -
 .../apache/solr/common/cloud/ZkStateReader.java | 1864 -------
 .../solr/common/cloud/ZooKeeperException.java   |   31 -
 .../apache/solr/common/cloud/package-info.java  |   23 -
 .../solr/common/cloud/rule/ImplicitSnitch.java  |  191 -
 .../solr/common/cloud/rule/RemoteCallback.java  |   23 -
 .../apache/solr/common/cloud/rule/Snitch.java   |   32 -
 .../solr/common/cloud/rule/SnitchContext.java   |  106 -
 .../solr/common/cloud/rule/package-info.java    |   23 -
 .../org/apache/solr/common/luke/FieldFlag.java  |   69 -
 .../apache/solr/common/luke/package-info.java   |   23 -
 .../org/apache/solr/common/package-info.java    |   23 -
 .../solr/common/params/AnalysisParams.java      |   59 -
 .../solr/common/params/AppendedSolrParams.java  |   54 -
 .../solr/common/params/AutoScalingParams.java   |   75 -
 .../common/params/CollectionAdminParams.java    |  112 -
 .../solr/common/params/CollectionParams.java    |  155 -
 .../solr/common/params/CommonAdminParams.java   |   32 -
 .../apache/solr/common/params/CommonParams.java |  303 --
 .../solr/common/params/ConfigSetParams.java     |   52 -
 .../solr/common/params/CoreAdminParams.java     |  187 -
 .../solr/common/params/CursorMarkParams.java    |   47 -
 .../solr/common/params/DefaultSolrParams.java   |   67 -
 .../apache/solr/common/params/DisMaxParams.java |   82 -
 .../apache/solr/common/params/EventParams.java  |   27 -
 .../apache/solr/common/params/ExpandParams.java |   31 -
 .../apache/solr/common/params/FacetParams.java  |  492 --
 .../apache/solr/common/params/GroupParams.java  |   70 -
 .../solr/common/params/HighlightParams.java     |   94 -
 .../solr/common/params/MapSolrParams.java       |   59 -
 .../common/params/ModifiableSolrParams.java     |  214 -
 .../solr/common/params/MoreLikeThisParams.java  |   76 -
 .../solr/common/params/MultiMapSolrParams.java  |  112 -
 .../common/params/QueryElevationParams.java     |   58 -
 .../solr/common/params/RequiredSolrParams.java  |  154 -
 .../apache/solr/common/params/ShardParams.java  |   97 -
 .../apache/solr/common/params/SimpleParams.java |   49 -
 .../apache/solr/common/params/SolrParams.java   |  636 ---
 .../solr/common/params/SpatialParams.java       |   39 -
 .../solr/common/params/SpellingParams.java      |  183 -
 .../apache/solr/common/params/StatsParams.java  |   27 -
 .../solr/common/params/TermVectorParams.java    |   69 -
 .../apache/solr/common/params/TermsParams.java  |  137 -
 .../apache/solr/common/params/UpdateParams.java |   71 -
 .../apache/solr/common/params/package-info.java |   22 -
 .../org/apache/solr/common/util/Base64.java     |  157 -
 .../org/apache/solr/common/util/ByteUtils.java  |  225 -
 .../java/org/apache/solr/common/util/Cache.java |   46 -
 .../solr/common/util/CommandOperation.java      |  389 --
 .../apache/solr/common/util/ContentStream.java  |   80 -
 .../solr/common/util/ContentStreamBase.java     |  348 --
 .../solr/common/util/DataInputInputStream.java  |   26 -
 .../apache/solr/common/util/ExecutorUtil.java   |  249 -
 .../solr/common/util/FastInputStream.java       |  267 -
 .../solr/common/util/FastOutputStream.java      |  232 -
 .../org/apache/solr/common/util/FastWriter.java |  157 -
 .../java/org/apache/solr/common/util/Hash.java  |  519 --
 .../org/apache/solr/common/util/IOUtils.java    |   37 -
 .../apache/solr/common/util/JavaBinCodec.java   | 1204 -----
 .../solr/common/util/JsonRecordReader.java      |  633 ---
 .../solr/common/util/JsonSchemaValidator.java   |  315 --
 .../apache/solr/common/util/JsonTextWriter.java |  499 --
 .../apache/solr/common/util/MapBackedCache.java |   57 -
 .../org/apache/solr/common/util/NamedList.java  |  852 ---
 .../apache/solr/common/util/ObjectCache.java    |   91 -
 .../solr/common/util/ObjectReleaseTracker.java  |  106 -
 .../java/org/apache/solr/common/util/Pair.java  |   73 -
 .../org/apache/solr/common/util/PathTrie.java   |  195 -
 .../org/apache/solr/common/util/RetryUtil.java  |   96 -
 .../solr/common/util/SimpleOrderedMap.java      |   75 -
 .../apache/solr/common/util/SolrJSONWriter.java |  114 -
 .../common/util/SolrjNamedThreadFactory.java    |   49 -
 .../org/apache/solr/common/util/StrUtils.java   |  320 --
 .../solr/common/util/SuppressForbidden.java     |   33 -
 .../org/apache/solr/common/util/Template.java   |   66 -
 .../org/apache/solr/common/util/TextWriter.java |  231 -
 .../org/apache/solr/common/util/TimeSource.java |  256 -
 .../org/apache/solr/common/util/URLUtil.java    |   49 -
 .../java/org/apache/solr/common/util/Utils.java |  692 ---
 .../solr/common/util/ValidatingJsonMap.java     |  355 --
 .../apache/solr/common/util/WriteableValue.java |   25 -
 .../java/org/apache/solr/common/util/XML.java   |  156 -
 .../apache/solr/common/util/XMLErrorLogger.java |   83 -
 .../apache/solr/common/util/package-info.java   |   23 -
 solr/solrj/src/java/overview.html               |   21 -
 .../solr/client/solrj/ResponseParser.java       |   52 +
 .../apache/solr/client/solrj/SolrClient.java    | 1278 +++++
 .../org/apache/solr/client/solrj/SolrQuery.java | 1364 +++++
 .../apache/solr/client/solrj/SolrRequest.java   |  218 +
 .../apache/solr/client/solrj/SolrResponse.java  |   76 +
 .../solr/client/solrj/SolrServerException.java  |   53 +
 .../client/solrj/StreamingResponseCallback.java |   36 +
 .../solr/client/solrj/V2RequestSupport.java     |   30 +
 .../client/solrj/beans/BindingException.java    |   28 +
 .../solrj/beans/DocumentObjectBinder.java       |  478 ++
 .../apache/solr/client/solrj/beans/Field.java   |   38 +
 .../solr/client/solrj/beans/package-info.java   |   23 +
 .../client/solrj/cloud/DistribStateManager.java |  148 +
 .../client/solrj/cloud/DistributedQueue.java    |   58 +
 .../solrj/cloud/DistributedQueueFactory.java    |   28 +
 .../client/solrj/cloud/NodeStateProvider.java   |   45 +
 .../client/solrj/cloud/SolrCloudManager.java    |   54 +
 .../cloud/autoscaling/AddReplicaSuggester.java  |   81 +
 .../autoscaling/AlreadyExistsException.java     |   35 +
 .../cloud/autoscaling/AutoScalingConfig.java    |  588 ++
 .../cloud/autoscaling/BadVersionException.java  |   40 +
 .../client/solrj/cloud/autoscaling/Cell.java    |   74 +
 .../client/solrj/cloud/autoscaling/Clause.java  |  660 +++
 .../solrj/cloud/autoscaling/ComputedType.java   |   99 +
 .../solrj/cloud/autoscaling/Condition.java      |  120 +
 .../solrj/cloud/autoscaling/CoresVariable.java  |  115 +
 .../autoscaling/DelegatingCloudManager.java     |   93 +
 .../DelegatingClusterStateProvider.java         |  112 +
 .../DelegatingDistribStateManager.java          |  107 +
 .../DelegatingNodeStateProvider.java            |   56 +
 .../cloud/autoscaling/DeleteNodeSuggester.java  |   46 +
 .../autoscaling/DeleteReplicaSuggester.java     |   74 +
 .../cloud/autoscaling/FreeDiskVariable.java     |  175 +
 .../cloud/autoscaling/MoveReplicaSuggester.java |  110 +
 .../solrj/cloud/autoscaling/NodeVariable.java   |   44 +
 .../solrj/cloud/autoscaling/NoneSuggester.java  |   39 +
 .../cloud/autoscaling/NotEmptyException.java    |   35 +
 .../client/solrj/cloud/autoscaling/Operand.java |  209 +
 .../client/solrj/cloud/autoscaling/Policy.java  |  651 +++
 .../solrj/cloud/autoscaling/PolicyHelper.java   |  565 ++
 .../solrj/cloud/autoscaling/Preference.java     |  148 +
 .../solrj/cloud/autoscaling/RangeVal.java       |   60 +
 .../solrj/cloud/autoscaling/ReplicaCount.java   |  111 +
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |  182 +
 .../cloud/autoscaling/ReplicaVariable.java      |  150 +
 .../client/solrj/cloud/autoscaling/Row.java     |  270 +
 .../solrj/cloud/autoscaling/SealedClause.java   |   29 +
 .../cloud/autoscaling/SplitShardSuggester.java  |   49 +
 .../solrj/cloud/autoscaling/Suggester.java      |  489 ++
 .../solrj/cloud/autoscaling/Suggestion.java     |  116 +
 .../autoscaling/TriggerEventProcessorStage.java |   30 +
 .../cloud/autoscaling/TriggerEventType.java     |   33 +
 .../cloud/autoscaling/UnsupportedSuggester.java |   59 +
 .../solrj/cloud/autoscaling/Variable.java       |  401 ++
 .../solrj/cloud/autoscaling/VariableBase.java   |  202 +
 .../solrj/cloud/autoscaling/VersionedData.java  |   50 +
 .../solrj/cloud/autoscaling/Violation.java      |  181 +
 .../autoscaling/WithCollectionVariable.java     |  166 +
 .../solrj/cloud/autoscaling/package-info.java   |   23 +
 .../solr/client/solrj/cloud/package-info.java   |   23 +
 .../client/solrj/impl/BinaryRequestWriter.java  |   95 +
 .../client/solrj/impl/BinaryResponseParser.java |   71 +
 .../solr/client/solrj/impl/CloudSolrClient.java | 1605 ++++++
 .../client/solrj/impl/ClusterStateProvider.java |   85 +
 .../solrj/impl/ConcurrentUpdateSolrClient.java  |  890 +++
 .../impl/DelegationTokenHttpSolrClient.java     |  110 +
 .../solrj/impl/HttpClientBuilderFactory.java    |   41 +
 .../solr/client/solrj/impl/HttpClientUtil.java  |  516 ++
 .../solrj/impl/HttpClusterStateProvider.java    |  312 ++
 .../solr/client/solrj/impl/HttpSolrClient.java  |  968 ++++
 .../solrj/impl/InputStreamResponseParser.java   |   52 +
 .../solrj/impl/Krb5HttpClientBuilder.java       |  189 +
 .../client/solrj/impl/LBHttpSolrClient.java     |  975 ++++
 .../client/solrj/impl/NoOpResponseParser.java   |   82 +
 .../solr/client/solrj/impl/PreemptiveAuth.java  |   59 +
 ...PreemptiveBasicAuthClientBuilderFactory.java |  132 +
 .../client/solrj/impl/SolrClientBuilder.java    |   78 +
 .../solrj/impl/SolrClientCloudManager.java      |  182 +
 .../solrj/impl/SolrClientNodeStateProvider.java |  349 ++
 .../solrj/impl/SolrHttpClientBuilder.java       |   91 +
 .../impl/SolrHttpClientContextBuilder.java      |   98 +
 .../solrj/impl/SolrHttpRequestRetryHandler.java |  161 +
 .../impl/SolrPortAwareCookieSpecFactory.java    |  113 +
 .../impl/StreamingBinaryResponseParser.java     |   90 +
 .../client/solrj/impl/XMLResponseParser.java    |  484 ++
 .../impl/ZkClientClusterStateProvider.java      |  222 +
 .../solrj/impl/ZkDistribStateManager.java       |  197 +
 .../solr/client/solrj/impl/package-info.java    |   24 +
 .../solrj/io/ClassificationEvaluation.java      |   85 +
 .../org/apache/solr/client/solrj/io/Lang.java   |  330 ++
 .../apache/solr/client/solrj/io/ModelCache.java |  147 +
 .../solr/client/solrj/io/SolrClientCache.java   |   99 +
 .../org/apache/solr/client/solrj/io/Tuple.java  |  224 +
 .../client/solrj/io/comp/ComparatorLambda.java  |   28 +
 .../client/solrj/io/comp/ComparatorOrder.java   |   47 +
 .../client/solrj/io/comp/FieldComparator.java   |  174 +
 .../solr/client/solrj/io/comp/HashKey.java      |   70 +
 .../solrj/io/comp/MultipleFieldComparator.java  |  138 +
 .../solrj/io/comp/SingleValueComparator.java    |   65 +
 .../client/solrj/io/comp/StreamComparator.java  |   31 +
 .../solr/client/solrj/io/comp/package-info.java |   25 +
 .../solr/client/solrj/io/eq/Equalitor.java      |   30 +
 .../solr/client/solrj/io/eq/FieldEqualitor.java |  130 +
 .../solrj/io/eq/MultipleFieldEqualitor.java     |  124 +
 .../client/solrj/io/eq/StreamEqualitor.java     |   29 +
 .../solr/client/solrj/io/eq/package-info.java   |   21 +
 .../solrj/io/eval/AbsoluteValueEvaluator.java   |   50 +
 .../solr/client/solrj/io/eval/AddEvaluator.java |   76 +
 .../client/solrj/io/eval/AkimaEvaluator.java    |   71 +
 .../solr/client/solrj/io/eval/AndEvaluator.java |   52 +
 .../client/solrj/io/eval/AnovaEvaluator.java    |   64 +
 .../client/solrj/io/eval/AppendEvaluator.java   |   53 +
 .../solrj/io/eval/ArcCosineEvaluator.java       |   50 +
 .../client/solrj/io/eval/ArcSineEvaluator.java  |   50 +
 .../solrj/io/eval/ArcTangentEvaluator.java      |   51 +
 .../client/solrj/io/eval/ArrayEvaluator.java    |   69 +
 .../solr/client/solrj/io/eval/AscEvaluator.java |   67 +
 .../solr/client/solrj/io/eval/Attributes.java   |   26 +
 .../io/eval/BetaDistributionEvaluator.java      |   48 +
 .../solrj/io/eval/BicubicSplineEvaluator.java   |   74 +
 .../io/eval/BinomialCoefficientEvaluator.java   |   46 +
 .../io/eval/BinomialDistributionEvaluator.java  |   48 +
 .../client/solrj/io/eval/CanberraEvaluator.java |   49 +
 .../client/solrj/io/eval/CeilingEvaluator.java  |   50 +
 .../io/eval/ChebyshevDistanceEvaluator.java     |   56 +
 .../solrj/io/eval/ChebyshevEvaluator.java       |   49 +
 .../io/eval/ChiSquareDataSetEvaluator.java      |   65 +
 .../client/solrj/io/eval/CoalesceEvaluator.java |   46 +
 .../client/solrj/io/eval/ColumnAtEvaluator.java |   55 +
 .../solrj/io/eval/ColumnCountEvaluator.java     |   42 +
 .../client/solrj/io/eval/ColumnEvaluator.java   |   77 +
 .../io/eval/ConstantDistributionEvaluator.java  |   44 +
 .../solrj/io/eval/ConversionEvaluator.java      |  160 +
 .../solrj/io/eval/ConvexHullEvaluator.java      |   62 +
 .../solrj/io/eval/ConvolutionEvaluator.java     |   58 +
 .../client/solrj/io/eval/CopyOfEvaluator.java   |   69 +
 .../solrj/io/eval/CopyOfRangeEvaluator.java     |   89 +
 .../solrj/io/eval/CorrelationEvaluator.java     |  137 +
 .../eval/CorrelationSignificanceEvaluator.java  |   57 +
 .../client/solrj/io/eval/CosineEvaluator.java   |   50 +
 .../io/eval/CosineSimilarityEvaluator.java      |   66 +
 .../solrj/io/eval/CovarianceEvaluator.java      |   57 +
 .../solrj/io/eval/CubedRootEvaluator.java       |   50 +
 .../io/eval/CumulativeProbabilityEvaluator.java |   59 +
 .../client/solrj/io/eval/DensityEvaluator.java  |   55 +
 .../solrj/io/eval/DerivativeEvaluator.java      |   63 +
 .../client/solrj/io/eval/DescribeEvaluator.java |   68 +
 .../client/solrj/io/eval/DistanceEvaluator.java |  128 +
 .../client/solrj/io/eval/DivideEvaluator.java   |   75 +
 .../solrj/io/eval/DotProductEvaluator.java      |   57 +
 .../client/solrj/io/eval/EBEAddEvaluator.java   |   68 +
 .../solrj/io/eval/EBEDivideEvaluator.java       |   62 +
 .../solrj/io/eval/EBEMultiplyEvaluator.java     |   62 +
 .../solrj/io/eval/EBESubtractEvaluator.java     |   67 +
 .../solrj/io/eval/EarthMoversEvaluator.java     |   49 +
 .../io/eval/EmpiricalDistributionEvaluator.java |   53 +
 .../solrj/io/eval/EnclosingDiskEvaluator.java   |   64 +
 .../eval/EnumeratedDistributionEvaluator.java   |   53 +
 .../client/solrj/io/eval/EqualToEvaluator.java  |   73 +
 .../io/eval/EuclideanDistanceEvaluator.java     |   56 +
 .../solrj/io/eval/EuclideanEvaluator.java       |   49 +
 .../solrj/io/eval/EvaluatorException.java       |   32 +
 .../solrj/io/eval/ExclusiveOrEvaluator.java     |   72 +
 .../eval/ExponentialMovingAverageEvaluator.java |   99 +
 .../solr/client/solrj/io/eval/FFTEvaluator.java |   73 +
 .../solrj/io/eval/FactorialEvaluator.java       |   53 +
 .../solrj/io/eval/FeatureSelectEvaluator.java   |   93 +
 .../solrj/io/eval/FieldValueEvaluator.java      |  107 +
 .../solrj/io/eval/FindDelayEvaluator.java       |   75 +
 .../client/solrj/io/eval/FloorEvaluator.java    |   50 +
 .../solrj/io/eval/FrequencyTableEvaluator.java  |   84 +
 .../solrj/io/eval/FuzzyKmeansEvaluator.java     |  106 +
 .../solrj/io/eval/GTestDataSetEvaluator.java    |   64 +
 .../io/eval/GammaDistributionEvaluator.java     |   48 +
 .../client/solrj/io/eval/GaussFitEvaluator.java |   91 +
 .../io/eval/GeometricDistributionEvaluator.java |   44 +
 .../solrj/io/eval/GetAmplitudeEvaluator.java    |   42 +
 .../io/eval/GetAngularFrequencyEvaluator.java   |   42 +
 .../client/solrj/io/eval/GetAreaEvaluator.java  |   43 +
 .../solrj/io/eval/GetAttributeEvaluator.java    |   43 +
 .../solrj/io/eval/GetAttributesEvaluator.java   |   42 +
 .../solrj/io/eval/GetBaryCenterEvaluator.java   |   50 +
 .../solrj/io/eval/GetBoundarySizeEvaluator.java |   43 +
 .../client/solrj/io/eval/GetCacheEvaluator.java |   57 +
 .../solrj/io/eval/GetCenterEvaluator.java       |   52 +
 .../solrj/io/eval/GetCentroidsEvaluator.java    |   55 +
 .../solrj/io/eval/GetClusterEvaluator.java      |   64 +
 .../solrj/io/eval/GetColumnLabelsEvaluator.java |   42 +
 .../io/eval/GetMembershipMatrixEvaluator.java   |   42 +
 .../client/solrj/io/eval/GetPhaseEvaluator.java |   42 +
 .../solrj/io/eval/GetRadiusEvaluator.java       |   44 +
 .../solrj/io/eval/GetRowLabelsEvaluator.java    |   42 +
 .../io/eval/GetSupportPointsEvaluator.java      |   56 +
 .../client/solrj/io/eval/GetValueEvaluator.java |   50 +
 .../solrj/io/eval/GetVerticesEvaluator.java     |   51 +
 .../client/solrj/io/eval/GrandSumEvaluator.java |   54 +
 .../io/eval/GreaterThanEqualToEvaluator.java    |   61 +
 .../solrj/io/eval/GreaterThanEvaluator.java     |   61 +
 .../solrj/io/eval/HarmonicFitEvaluator.java     |   93 +
 .../solrj/io/eval/HistogramEvaluator.java       |   90 +
 .../io/eval/HyperbolicCosineEvaluator.java      |   50 +
 .../solrj/io/eval/HyperbolicSineEvaluator.java  |   50 +
 .../io/eval/HyperbolicTangentEvaluator.java     |   52 +
 .../client/solrj/io/eval/IFFTEvaluator.java     |   71 +
 .../solrj/io/eval/IfThenElseEvaluator.java      |   48 +
 .../client/solrj/io/eval/IndexOfEvaluator.java  |   51 +
 .../solrj/io/eval/IntegrateEvaluator.java       |   70 +
 .../client/solrj/io/eval/KmeansEvaluator.java   |  149 +
 .../solr/client/solrj/io/eval/KnnEvaluator.java |  155 +
 .../solrj/io/eval/KnnRegressionEvaluator.java   |  238 +
 .../io/eval/KolmogorovSmirnovEvaluator.java     |   73 +
 .../client/solrj/io/eval/L1NormEvaluator.java   |   56 +
 .../client/solrj/io/eval/LInfNormEvaluator.java |   56 +
 .../solrj/io/eval/LatLonVectorsEvaluator.java   |  115 +
 .../client/solrj/io/eval/LengthEvaluator.java   |   49 +
 .../client/solrj/io/eval/LerpEvaluator.java     |   71 +
 .../solrj/io/eval/LessThanEqualToEvaluator.java |   61 +
 .../client/solrj/io/eval/LessThanEvaluator.java |   61 +
 .../solrj/io/eval/ListCacheEvaluator.java       |   73 +
 .../client/solrj/io/eval/LoessEvaluator.java    |  105 +
 .../client/solrj/io/eval/Log10Evaluator.java    |   50 +
 .../io/eval/LogNormalDistributionEvaluator.java |   48 +
 .../solrj/io/eval/ManhattanEvaluator.java       |   49 +
 .../solrj/io/eval/MannWhitneyUEvaluator.java    |   62 +
 .../client/solrj/io/eval/ManyValueWorker.java   |   25 +
 .../solrj/io/eval/MarkovChainEvaluator.java     |  100 +
 .../solr/client/solrj/io/eval/Matrix.java       |  105 +
 .../client/solrj/io/eval/MatrixEvaluator.java   |   52 +
 .../solrj/io/eval/MatrixMultiplyEvaluator.java  |   65 +
 .../solrj/io/eval/MeanDifferenceEvaluator.java  |   54 +
 .../client/solrj/io/eval/MeanEvaluator.java     |   56 +
 .../client/solrj/io/eval/MemsetEvaluator.java   |  167 +
 .../solrj/io/eval/MinMaxScaleEvaluator.java     |  115 +
 .../client/solrj/io/eval/ModeEvaluator.java     |   64 +
 .../client/solrj/io/eval/ModuloEvaluator.java   |   75 +
 .../solrj/io/eval/MonteCarloEvaluator.java      |  140 +
 .../solrj/io/eval/MovingAverageEvaluator.java   |   66 +
 .../solrj/io/eval/MovingMedianEvaluator.java    |   68 +
 .../solrj/io/eval/MultiKmeansEvaluator.java     |  108 +
 ...MultiVariateNormalDistributionEvaluator.java |   54 +
 .../client/solrj/io/eval/MultiplyEvaluator.java |   76 +
 .../solrj/io/eval/NaturalLogEvaluator.java      |   50 +
 .../client/solrj/io/eval/NormEvaluator.java     |   56 +
 .../io/eval/NormalDistributionEvaluator.java    |   48 +
 .../solrj/io/eval/NormalizeEvaluator.java       |   61 +
 .../solrj/io/eval/NormalizeSumEvaluator.java    |   84 +
 .../solr/client/solrj/io/eval/NotEvaluator.java |   62 +
 .../solrj/io/eval/OLSRegressionEvaluator.java   |  123 +
 .../client/solrj/io/eval/OneValueWorker.java    |   34 +
 .../client/solrj/io/eval/OnesEvaluator.java     |   47 +
 .../solr/client/solrj/io/eval/OrEvaluator.java  |   52 +
 .../solrj/io/eval/OscillateEvaluator.java       |   57 +
 .../client/solrj/io/eval/OutliersEvaluator.java |  138 +
 .../client/solrj/io/eval/PairSortEvaluator.java |   93 +
 .../solrj/io/eval/PairedTTestEvaluator.java     |   75 +
 .../solrj/io/eval/PercentileEvaluator.java      |   54 +
 .../io/eval/PoissonDistributionEvaluator.java   |   44 +
 .../io/eval/PolyFitDerivativeEvaluator.java     |  103 +
 .../client/solrj/io/eval/PolyFitEvaluator.java  |  106 +
 .../client/solrj/io/eval/PowerEvaluator.java    |   92 +
 .../solrj/io/eval/PrecisionEvaluator.java       |   51 +
 .../client/solrj/io/eval/PredictEvaluator.java  |  160 +
 .../client/solrj/io/eval/PrimesEvaluator.java   |   58 +
 .../solrj/io/eval/ProbabilityEvaluator.java     |   86 +
 .../client/solrj/io/eval/PutCacheEvaluator.java |   61 +
 .../client/solrj/io/eval/RankEvaluator.java     |   53 +
 .../client/solrj/io/eval/RawValueEvaluator.java |   87 +
 .../io/eval/RecursiveBooleanEvaluator.java      |  112 +
 .../solrj/io/eval/RecursiveEvaluator.java       |  257 +
 .../io/eval/RecursiveNumericEvaluator.java      |   76 +
 .../io/eval/RecursiveNumericListEvaluator.java  |   67 +
 .../solrj/io/eval/RecursiveObjectEvaluator.java |   35 +
 .../io/eval/RecursiveTemporalEvaluator.java     |  114 +
 .../solrj/io/eval/RegressionEvaluator.java      |   92 +
 .../solrj/io/eval/RemoveCacheEvaluator.java     |   57 +
 .../client/solrj/io/eval/ReverseEvaluator.java  |   57 +
 .../client/solrj/io/eval/RoundEvaluator.java    |   50 +
 .../client/solrj/io/eval/RowAtEvaluator.java    |   56 +
 .../client/solrj/io/eval/RowCountEvaluator.java |   42 +
 .../client/solrj/io/eval/SampleEvaluator.java   |  101 +
 .../solrj/io/eval/ScalarAddEvaluator.java       |   74 +
 .../solrj/io/eval/ScalarDivideEvaluator.java    |   39 +
 .../solrj/io/eval/ScalarMultiplyEvaluator.java  |   39 +
 .../solrj/io/eval/ScalarSubtractEvaluator.java  |   39 +
 .../client/solrj/io/eval/ScaleEvaluator.java    |   62 +
 .../client/solrj/io/eval/SequenceEvaluator.java |   56 +
 .../solrj/io/eval/SetColumnLabelsEvaluator.java |   47 +
 .../solrj/io/eval/SetRowLabelsEvaluator.java    |   47 +
 .../client/solrj/io/eval/SetValueEvaluator.java |   58 +
 .../client/solrj/io/eval/SineEvaluator.java     |   50 +
 .../client/solrj/io/eval/SourceEvaluator.java   |   36 +
 .../client/solrj/io/eval/SplineEvaluator.java   |   71 +
 .../solrj/io/eval/SquareRootEvaluator.java      |   50 +
 .../client/solrj/io/eval/StreamEvaluator.java   |   53 +
 .../solrj/io/eval/StreamEvaluatorException.java |   32 +
 .../client/solrj/io/eval/SubtractEvaluator.java |   95 +
 .../solrj/io/eval/SumColumnsEvaluator.java      |   68 +
 .../solrj/io/eval/SumDifferenceEvaluator.java   |   54 +
 .../client/solrj/io/eval/SumRowsEvaluator.java  |   63 +
 .../client/solrj/io/eval/SumSqEvaluator.java    |   56 +
 .../client/solrj/io/eval/TTestEvaluator.java    |   95 +
 .../client/solrj/io/eval/TangentEvaluator.java  |   50 +
 .../solrj/io/eval/TemporalEvaluatorDay.java     |   44 +
 .../io/eval/TemporalEvaluatorDayOfQuarter.java  |   44 +
 .../io/eval/TemporalEvaluatorDayOfYear.java     |   44 +
 .../solrj/io/eval/TemporalEvaluatorEpoch.java   |   45 +
 .../solrj/io/eval/TemporalEvaluatorHour.java    |   44 +
 .../solrj/io/eval/TemporalEvaluatorMinute.java  |   44 +
 .../solrj/io/eval/TemporalEvaluatorMonth.java   |   44 +
 .../solrj/io/eval/TemporalEvaluatorQuarter.java |   44 +
 .../solrj/io/eval/TemporalEvaluatorSecond.java  |   44 +
 .../solrj/io/eval/TemporalEvaluatorWeek.java    |   44 +
 .../solrj/io/eval/TemporalEvaluatorYear.java    |   44 +
 .../solrj/io/eval/TermVectorsEvaluator.java     |  188 +
 .../io/eval/TimeDifferencingEvaluator.java      |   69 +
 .../solrj/io/eval/TopFeaturesEvaluator.java     |  112 +
 .../solrj/io/eval/TransposeEvaluator.java       |   56 +
 .../eval/TriangularDistributionEvaluator.java   |   46 +
 .../client/solrj/io/eval/TwoValueWorker.java    |   34 +
 .../io/eval/UniformDistributionEvaluator.java   |   48 +
 .../UniformIntegerDistributionEvaluator.java    |   48 +
 .../client/solrj/io/eval/UnitEvaluator.java     |   80 +
 .../client/solrj/io/eval/UuidEvaluator.java     |   54 +
 .../client/solrj/io/eval/ValueAtEvaluator.java  |   74 +
 .../solr/client/solrj/io/eval/ValueWorker.java  |   25 +
 .../client/solrj/io/eval/VectorFunction.java    |   55 +
 .../io/eval/WeibullDistributionEvaluator.java   |   48 +
 .../client/solrj/io/eval/ZerosEvaluator.java    |   47 +
 .../io/eval/ZipFDistributionEvaluator.java      |   48 +
 .../solr/client/solrj/io/eval/package-info.java |   21 +
 .../solrj/io/graph/GatherNodesStream.java       |  682 +++
 .../apache/solr/client/solrj/io/graph/Node.java |   90 +
 .../solrj/io/graph/ShortestPathStream.java      |  522 ++
 .../solr/client/solrj/io/graph/Traversal.java   |   96 +
 .../solrj/io/graph/TraversalIterator.java       |  120 +
 .../client/solrj/io/graph/package-info.java     |   22 +
 .../client/solrj/io/ops/ConcatOperation.java    |  110 +
 .../client/solrj/io/ops/DistinctOperation.java  |   75 +
 .../client/solrj/io/ops/GroupOperation.java     |  148 +
 .../client/solrj/io/ops/ReduceOperation.java    |   23 +
 .../client/solrj/io/ops/ReplaceOperation.java   |   89 +
 .../solrj/io/ops/ReplaceWithFieldOperation.java |  123 +
 .../solrj/io/ops/ReplaceWithValueOperation.java |  128 +
 .../client/solrj/io/ops/StreamOperation.java    |   29 +
 .../solr/client/solrj/io/ops/package-info.java  |   25 +
 .../solr/client/solrj/io/package-info.java      |   24 +
 .../client/solrj/io/sql/ConnectionImpl.java     |  393 ++
 .../solrj/io/sql/DatabaseMetaDataImpl.java      |  987 ++++
 .../solr/client/solrj/io/sql/DriverImpl.java    |  132 +
 .../solrj/io/sql/PreparedStatementImpl.java     |  394 ++
 .../solr/client/solrj/io/sql/ResultSetImpl.java | 1250 +++++
 .../solrj/io/sql/ResultSetMetaDataImpl.java     |  181 +
 .../solr/client/solrj/io/sql/StatementImpl.java |  377 ++
 .../solr/client/solrj/io/sql/package-info.java  |   51 +
 .../client/solrj/io/stream/BiJoinStream.java    |  162 +
 .../solrj/io/stream/CalculatorStream.java       |  112 +
 .../solrj/io/stream/CartesianProductStream.java |  310 ++
 .../solr/client/solrj/io/stream/CellStream.java |  155 +
 .../client/solrj/io/stream/CloudSolrStream.java |  518 ++
 .../client/solrj/io/stream/CommitStream.java    |  261 +
 .../solrj/io/stream/ComplementStream.java       |  207 +
 .../client/solrj/io/stream/DaemonStream.java    |  379 ++
 .../solr/client/solrj/io/stream/EchoStream.java |  122 +
 .../solr/client/solrj/io/stream/EvalStream.java |  141 +
 .../client/solrj/io/stream/ExceptionStream.java |  102 +
 .../client/solrj/io/stream/ExecutorStream.java  |  227 +
 .../client/solrj/io/stream/FacetStream.java     |  538 ++
 .../io/stream/FeaturesSelectionStream.java      |  442 ++
 .../client/solrj/io/stream/FetchStream.java     |  312 ++
 .../solr/client/solrj/io/stream/GetStream.java  |  124 +
 .../client/solrj/io/stream/HashJoinStream.java  |  287 +
 .../client/solrj/io/stream/HavingStream.java    |  173 +
 .../client/solrj/io/stream/InnerJoinStream.java |  109 +
 .../client/solrj/io/stream/IntersectStream.java |  202 +
 .../solr/client/solrj/io/stream/JDBCStream.java |  605 +++
 .../client/solrj/io/stream/JSONTupleStream.java |  187 +
 .../io/stream/JavabinTupleStreamParser.java     |  189 +
 .../solr/client/solrj/io/stream/JoinStream.java |  227 +
 .../solr/client/solrj/io/stream/KnnStream.java  |  259 +
 .../solrj/io/stream/LeftOuterJoinStream.java    |  110 +
 .../solr/client/solrj/io/stream/LetStream.java  |  221 +
 .../solr/client/solrj/io/stream/ListStream.java |  148 +
 .../client/solrj/io/stream/MergeStream.java     |  250 +
 .../client/solrj/io/stream/ModelStream.java     |  203 +
 .../solr/client/solrj/io/stream/NullStream.java |  156 +
 .../solrj/io/stream/OuterHashJoinStream.java    |  133 +
 .../client/solrj/io/stream/ParallelStream.java  |  279 +
 .../solr/client/solrj/io/stream/PlotStream.java |  224 +
 .../client/solrj/io/stream/PriorityStream.java  |  157 +
 .../client/solrj/io/stream/PushBackStream.java  |  102 +
 .../client/solrj/io/stream/RandomStream.java    |  243 +
 .../solr/client/solrj/io/stream/RankStream.java |  232 +
 .../client/solrj/io/stream/ReducerStream.java   |  248 +
 .../client/solrj/io/stream/RollupStream.java    |  276 +
 .../solrj/io/stream/ScoreNodesStream.java       |  282 +
 .../client/solrj/io/stream/SelectStream.java    |  302 ++
 .../client/solrj/io/stream/ShuffleStream.java   |  106 +
 .../solrj/io/stream/SignificantTermsStream.java |  407 ++
 .../solr/client/solrj/io/stream/SolrStream.java |  273 +
 .../solr/client/solrj/io/stream/SortStream.java |  200 +
 .../solr/client/solrj/io/stream/SqlStream.java  |  221 +
 .../client/solrj/io/stream/StatsStream.java     |  363 ++
 .../client/solrj/io/stream/StreamContext.java   |  104 +
 .../client/solrj/io/stream/TextLogitStream.java |  662 +++
 .../solrj/io/stream/TimeSeriesStream.java       |  421 ++
 .../client/solrj/io/stream/TopicStream.java     |  552 ++
 .../solr/client/solrj/io/stream/TupStream.java  |  220 +
 .../client/solrj/io/stream/TupleStream.java     |  166 +
 .../solrj/io/stream/TupleStreamParser.java      |   27 +
 .../client/solrj/io/stream/UniqueStream.java    |  173 +
 .../client/solrj/io/stream/UpdateStream.java    |  324 ++
 .../io/stream/expr/DefaultStreamFactory.java    |   33 +
 .../solrj/io/stream/expr/Explanation.java       |  162 +
 .../solrj/io/stream/expr/Expressible.java       |   37 +
 .../solrj/io/stream/expr/StreamExplanation.java |   71 +
 .../solrj/io/stream/expr/StreamExpression.java  |  126 +
 .../expr/StreamExpressionNamedParameter.java    |  108 +
 .../stream/expr/StreamExpressionParameter.java  |   24 +
 .../io/stream/expr/StreamExpressionParser.java  |  350 ++
 .../io/stream/expr/StreamExpressionValue.java   |   65 +
 .../solrj/io/stream/expr/StreamFactory.java     |  447 ++
 .../solrj/io/stream/expr/package-info.java      |   27 +
 .../client/solrj/io/stream/metrics/Bucket.java  |   44 +
 .../solrj/io/stream/metrics/CountMetric.java    |   85 +
 .../solrj/io/stream/metrics/MaxMetric.java      |   93 +
 .../solrj/io/stream/metrics/MeanMetric.java     |  112 +
 .../client/solrj/io/stream/metrics/Metric.java  |   75 +
 .../solrj/io/stream/metrics/MinMetric.java      |   94 +
 .../solrj/io/stream/metrics/SumMetric.java      |   89 +
 .../solrj/io/stream/metrics/package-info.java   |   24 +
 .../client/solrj/io/stream/package-info.java    |   27 +
 .../apache/solr/client/solrj/package-info.java  |   23 +
 .../solrj/request/AbstractUpdateRequest.java    |  144 +
 .../solrj/request/CollectionAdminRequest.java   | 2509 +++++++++
 .../solrj/request/CollectionApiMapping.java     |  474 ++
 .../solrj/request/ConfigSetAdminRequest.java    |  179 +
 .../request/ContentStreamUpdateRequest.java     |   96 +
 .../client/solrj/request/CoreAdminRequest.java  |  692 +++
 .../client/solrj/request/CoreApiMapping.java    |  117 +
 .../solr/client/solrj/request/CoreStatus.java   |   48 +
 .../solrj/request/DelegationTokenRequest.java   |  140 +
 .../client/solrj/request/DirectXmlRequest.java  |   62 +
 .../solrj/request/DocumentAnalysisRequest.java  |  212 +
 .../solrj/request/FieldAnalysisRequest.java     |  256 +
 .../solrj/request/GenericSolrRequest.java       |   54 +
 .../solrj/request/HealthCheckRequest.java       |   52 +
 .../client/solrj/request/IsUpdateRequest.java   |   24 +
 .../request/JavaBinUpdateRequestCodec.java      |  290 +
 .../solr/client/solrj/request/LukeRequest.java  |  113 +
 .../request/MultiContentWriterRequest.java      |  124 +
 .../solr/client/solrj/request/QueryRequest.java |   80 +
 .../client/solrj/request/RequestWriter.java     |  139 +
 .../solr/client/solrj/request/SolrPing.java     |  102 +
 .../solrj/request/StreamingUpdateRequest.java   |   76 +
 .../client/solrj/request/UpdateRequest.java     |  542 ++
 .../client/solrj/request/V1toV2ApiMapper.java   |  145 +
 .../solr/client/solrj/request/V2Request.java    |  167 +
 .../solr/client/solrj/request/package-info.java |   23 +
 .../request/schema/AbstractSchemaRequest.java   |   40 +
 .../request/schema/AnalyzerDefinition.java      |   62 +
 .../request/schema/FieldTypeDefinition.java     |   86 +
 .../solrj/request/schema/SchemaRequest.java     |  780 +++
 .../solrj/request/schema/package-info.java      |   23 +
 .../solrj/response/AnalysisResponseBase.java    |  270 +
 .../solr/client/solrj/response/Cluster.java     |  107 +
 .../solrj/response/ClusteringResponse.java      |   77 +
 .../solrj/response/CollectionAdminResponse.java |  101 +
 .../solrj/response/ConfigSetAdminResponse.java  |   37 +
 .../solrj/response/CoreAdminResponse.java       |   57 +
 .../solrj/response/DelegationTokenResponse.java |  109 +
 .../response/DocumentAnalysisResponse.java      |  254 +
 .../solr/client/solrj/response/FacetField.java  |  148 +
 .../solrj/response/FieldAnalysisResponse.java   |  200 +
 .../client/solrj/response/FieldStatsInfo.java   |  227 +
 .../solr/client/solrj/response/Group.java       |   68 +
 .../client/solrj/response/GroupCommand.java     |  124 +
 .../client/solrj/response/GroupResponse.java    |   55 +
 .../solrj/response/HealthCheckResponse.java     |   39 +
 .../client/solrj/response/IntervalFacet.java    |   85 +
 .../client/solrj/response/LukeResponse.java     |  309 ++
 .../solr/client/solrj/response/PivotField.java  |  106 +
 .../client/solrj/response/QueryResponse.java    |  641 +++
 .../solr/client/solrj/response/RangeFacet.java  |  131 +
 .../solrj/response/RequestStatusState.java      |   73 +
 .../solrj/response/SimpleSolrResponse.java      |   47 +
 .../client/solrj/response/SolrPingResponse.java |   27 +
 .../client/solrj/response/SolrResponseBase.java |   98 +
 .../solrj/response/SpellCheckResponse.java      |  272 +
 .../solrj/response/SuggesterResponse.java       |   86 +
 .../solr/client/solrj/response/Suggestion.java  |   63 +
 .../client/solrj/response/TermsResponse.java    |  116 +
 .../client/solrj/response/UpdateResponse.java   |   29 +
 .../solr/client/solrj/response/V2Response.java  |   22 +
 .../client/solrj/response/package-info.java     |   24 +
 .../schema/FieldTypeRepresentation.java         |   46 +
 .../response/schema/SchemaRepresentation.java   |  107 +
 .../solrj/response/schema/SchemaResponse.java   |  435 ++
 .../solrj/response/schema/package-info.java     |   23 +
 .../solr/client/solrj/util/ClientUtils.java     |  209 +
 .../solrj/util/SolrIdentifierValidator.java     |   72 +
 .../solr/client/solrj/util/package-info.java    |   23 +
 .../java/org/apache/solr/common/Callable.java   |   21 +
 .../solr/common/ConditionalMapWriter.java       |   82 +
 .../apache/solr/common/EmptyEntityResolver.java |   96 +
 .../org/apache/solr/common/EnumFieldValue.java  |  115 +
 .../org/apache/solr/common/IteratorWriter.java  |   93 +
 .../apache/solr/common/LinkedHashMapWriter.java |   60 +
 .../org/apache/solr/common/MapSerializable.java |   30 +
 .../java/org/apache/solr/common/MapWriter.java  |  195 +
 .../org/apache/solr/common/MapWriterMap.java    |   53 +
 .../solr/common/NonExistentCoreException.java   |   25 +
 .../java/org/apache/solr/common/PushWriter.java |   42 +
 .../org/apache/solr/common/SolrCloseable.java   |   30 +
 .../apache/solr/common/SolrCloseableLatch.java  |   65 +
 .../org/apache/solr/common/SolrDocument.java    |  406 ++
 .../apache/solr/common/SolrDocumentBase.java    |   73 +
 .../apache/solr/common/SolrDocumentList.java    |   67 +
 .../org/apache/solr/common/SolrException.java   |  258 +
 .../apache/solr/common/SolrInputDocument.java   |  276 +
 .../org/apache/solr/common/SolrInputField.java  |  213 +
 .../org/apache/solr/common/SpecProvider.java    |   25 +
 .../org/apache/solr/common/StringUtils.java     |   35 +
 .../solr/common/ToleratedUpdateError.java       |  199 +
 .../org/apache/solr/common/cloud/Aliases.java   |  288 +
 .../solr/common/cloud/BeforeReconnect.java      |   21 +
 .../common/cloud/CloudCollectionsListener.java  |   40 +
 .../solr/common/cloud/ClusterProperties.java    |  208 +
 .../apache/solr/common/cloud/ClusterState.java  |  411 ++
 .../solr/common/cloud/ClusterStateUtil.java     |  257 +
 .../solr/common/cloud/CollectionProperties.java |  117 +
 .../common/cloud/CollectionPropsWatcher.java    |   40 +
 .../common/cloud/CollectionStatePredicate.java  |   42 +
 .../common/cloud/CollectionStateWatcher.java    |   43 +
 .../solr/common/cloud/CompositeIdRouter.java    |  326 ++
 .../solr/common/cloud/ConnectionManager.java    |  261 +
 .../common/cloud/DefaultConnectionStrategy.java |   75 +
 .../solr/common/cloud/DefaultZkACLProvider.java |   44 +
 .../cloud/DefaultZkCredentialsProvider.java     |   40 +
 .../solr/common/cloud/DistributedQueue.java     |   42 +
 .../apache/solr/common/cloud/DocCollection.java |  418 ++
 .../org/apache/solr/common/cloud/DocRouter.java |  217 +
 .../solr/common/cloud/HashBasedRouter.java      |   83 +
 .../solr/common/cloud/ImplicitDocRouter.java    |   98 +
 .../solr/common/cloud/LiveNodesListener.java    |   38 +
 .../apache/solr/common/cloud/OnReconnect.java   |   28 +
 .../apache/solr/common/cloud/PlainIdRouter.java |   22 +
 .../org/apache/solr/common/cloud/Replica.java   |  188 +
 .../solr/common/cloud/ReplicaPosition.java      |   55 +
 .../apache/solr/common/cloud/RoutingRule.java   |   77 +
 .../solr/common/cloud/SaslZkACLProvider.java    |   51 +
 .../cloud/SecurityAwareZkACLProvider.java       |   79 +
 .../org/apache/solr/common/cloud/Slice.java     |  288 +
 .../apache/solr/common/cloud/SolrZkClient.java  |  831 +++
 .../apache/solr/common/cloud/SolrZooKeeper.java |  111 +
 ...ParamsAllAndReadonlyDigestZkACLProvider.java |  117 +
 ...tCredentialsDigestZkCredentialsProvider.java |   59 +
 .../apache/solr/common/cloud/ZkACLProvider.java |   27 +
 .../cloud/ZkClientConnectionStrategy.java       |  115 +
 .../apache/solr/common/cloud/ZkCmdExecutor.java |  114 +
 .../solr/common/cloud/ZkConfigManager.java      |  220 +
 .../solr/common/cloud/ZkCoreNodeProps.java      |   73 +
 .../common/cloud/ZkCredentialsProvider.java     |   44 +
 .../solr/common/cloud/ZkMaintenanceUtils.java   |  471 ++
 .../apache/solr/common/cloud/ZkNodeProps.java   |  160 +
 .../apache/solr/common/cloud/ZkOperation.java   |   33 +
 .../apache/solr/common/cloud/ZkStateReader.java | 1864 +++++++
 .../solr/common/cloud/ZooKeeperException.java   |   31 +
 .../apache/solr/common/cloud/package-info.java  |   23 +
 .../solr/common/cloud/rule/ImplicitSnitch.java  |  191 +
 .../solr/common/cloud/rule/RemoteCallback.java  |   23 +
 .../apache/solr/common/cloud/rule/Snitch.java   |   32 +
 .../solr/common/cloud/rule/SnitchContext.java   |  106 +
 .../solr/common/cloud/rule/package-info.java    |   23 +
 .../org/apache/solr/common/luke/FieldFlag.java  |   69 +
 .../apache/solr/common/luke/package-info.java   |   23 +
 .../org/apache/solr/common/package-info.java    |   23 +
 .../solr/common/params/AnalysisParams.java      |   59 +
 .../solr/common/params/AppendedSolrParams.java  |   54 +
 .../solr/common/params/AutoScalingParams.java   |   75 +
 .../common/params/CollectionAdminParams.java    |  112 +
 .../solr/common/params/CollectionParams.java    |  155 +
 .../solr/common/params/CommonAdminParams.java   |   32 +
 .../apache/solr/common/params/CommonParams.java |  303 ++
 .../solr/common/params/ConfigSetParams.java     |   52 +
 .../solr/common/params/CoreAdminParams.java     |  187 +
 .../solr/common/params/CursorMarkParams.java    |   47 +
 .../solr/common/params/DefaultSolrParams.java   |   67 +
 .../apache/solr/common/params/DisMaxParams.java |   82 +
 .../apache/solr/common/params/EventParams.java  |   27 +
 .../apache/solr/common/params/ExpandParams.java |   31 +
 .../apache/solr/common/params/FacetParams.java  |  492 ++
 .../apache/solr/common/params/GroupParams.java  |   70 +
 .../solr/common/params/HighlightParams.java     |   94 +
 .../solr/common/params/MapSolrParams.java       |   59 +
 .../common/params/ModifiableSolrParams.java     |  214 +
 .../solr/common/params/MoreLikeThisParams.java  |   76 +
 .../solr/common/params/MultiMapSolrParams.java  |  112 +
 .../common/params/QueryElevationParams.java     |   58 +
 .../solr/common/params/RequiredSolrParams.java  |  154 +
 .../apache/solr/common/params/ShardParams.java  |   97 +
 .../apache/solr/common/params/SimpleParams.java |   49 +
 .../apache/solr/common/params/SolrParams.java   |  636 +++
 .../solr/common/params/SpatialParams.java       |   39 +
 .../solr/common/params/SpellingParams.java      |  183 +
 .../apache/solr/common/params/StatsParams.java  |   27 +
 .../solr/common/params/TermVectorParams.java    |   69 +
 .../apache/solr/common/params/TermsParams.java  |  137 +
 .../apache/solr/common/params/UpdateParams.java |   71 +
 .../apache/solr/common/params/package-info.java |   22 +
 .../org/apache/solr/common/util/Base64.java     |  157 +
 .../org/apache/solr/common/util/ByteUtils.java  |  225 +
 .../java/org/apache/solr/common/util/Cache.java |   46 +
 .../solr/common/util/CommandOperation.java      |  389 ++
 .../apache/solr/common/util/ContentStream.java  |   80 +
 .../solr/common/util/ContentStreamBase.java     |  348 ++
 .../solr/common/util/DataInputInputStream.java  |   26 +
 .../apache/solr/common/util/ExecutorUtil.java   |  249 +
 .../solr/common/util/FastInputStream.java       |  267 +
 .../solr/common/util/FastOutputStream.java      |  232 +
 .../org/apache/solr/common/util/FastWriter.java |  157 +
 .../java/org/apache/solr/common/util/Hash.java  |  519 ++
 .../org/apache/solr/common/util/IOUtils.java    |   37 +
 .../apache/solr/common/util/JavaBinCodec.java   | 1204 +++++
 .../solr/common/util/JsonRecordReader.java      |  633 +++
 .../solr/common/util/JsonSchemaValidator.java   |  315 ++
 .../apache/solr/common/util/JsonTextWriter.java |  499 ++
 .../apache/solr/common/util/MapBackedCache.java |   57 +
 .../org/apache/solr/common/util/NamedList.java  |  852 +++
 .../apache/solr/common/util/ObjectCache.java    |   91 +
 .../solr/common/util/ObjectReleaseTracker.java  |  106 +
 .../java/org/apache/solr/common/util/Pair.java  |   73 +
 .../org/apache/solr/common/util/PathTrie.java   |  195 +
 .../org/apache/solr/common/util/RetryUtil.java  |   96 +
 .../solr/common/util/SimpleOrderedMap.java      |   75 +
 .../apache/solr/common/util/SolrJSONWriter.java |  114 +
 .../common/util/SolrjNamedThreadFactory.java    |   49 +
 .../org/apache/solr/common/util/StrUtils.java   |  320 ++
 .../solr/common/util/SuppressForbidden.java     |   33 +
 .../org/apache/solr/common/util/Template.java   |   66 +
 .../org/apache/solr/common/util/TextWriter.java |  231 +
 .../org/apache/solr/common/util/TimeSource.java |  256 +
 .../org/apache/solr/common/util/URLUtil.java    |   49 +
 .../java/org/apache/solr/common/util/Utils.java |  692 +++
 .../solr/common/util/ValidatingJsonMap.java     |  355 ++
 .../apache/solr/common/util/WriteableValue.java |   25 +
 .../java/org/apache/solr/common/util/XML.java   |  156 +
 .../apache/solr/common/util/XMLErrorLogger.java |   83 +
 .../apache/solr/common/util/package-info.java   |   23 +
 solr/solrj/src/main/java/overview.html          |   21 +
 .../resources/META-INF/services/java.sql.Driver |   16 +
 .../resources/apispec/autoscaling.Commands.json |  199 +
 .../resources/apispec/autoscaling.history.json  |   61 +
 .../resources/apispec/cluster.Commands.json     |  174 +
 .../main/resources/apispec/cluster.aliases.json |   12 +
 .../apispec/cluster.commandstatus.delete.json   |   10 +
 .../apispec/cluster.commandstatus.json          |   20 +
 .../apispec/cluster.configs.Commands.json       |   34 +
 .../apispec/cluster.configs.delete.json         |   12 +
 .../main/resources/apispec/cluster.configs.json |   12 +
 .../src/main/resources/apispec/cluster.json     |   14 +
 .../main/resources/apispec/cluster.nodes.json   |   12 +
 .../cluster.security.BasicAuth.Commands.json    |   23 +
 ...cluster.security.RuleBasedAuthorization.json |  129 +
 ...luster.security.authentication.Commands.json |   12 +
 .../cluster.security.authentication.json        |   12 +
 ...cluster.security.authorization.Commands.json |   13 +
 .../apispec/cluster.security.authorization.json |   13 +
 .../resources/apispec/collections.Commands.json |  298 +
 .../collections.collection.Commands.json        |  193 +
 .../collections.collection.Commands.modify.json |   43 +
 .../collections.collection.Commands.reload.json |   11 +
 .../apispec/collections.collection.delete.json  |   13 +
 .../apispec/collections.collection.json         |   19 +
 .../collections.collection.shards.Commands.json |  129 +
 ...ctions.collection.shards.shard.Commands.json |   24 +
 ...lections.collection.shards.shard.delete.json |   27 +
 ....collection.shards.shard.replica.delete.json |   39 +
 .../src/main/resources/apispec/collections.json |   13 +
 .../resources/apispec/core.RealtimeGet.json     |   26 +
 .../apispec/core.SchemaEdit.addCopyField.json   |   27 +
 .../apispec/core.SchemaEdit.addField.json       |   98 +
 .../core.SchemaEdit.addFieldType.analyzers.json |   51 +
 .../apispec/core.SchemaEdit.addFieldType.json   |   53 +
 .../core.SchemaEdit.deleteCopyField.json        |   19 +
 .../core.SchemaEdit.deleteDynamicField.json     |   12 +
 .../apispec/core.SchemaEdit.deleteField.json    |   12 +
 .../core.SchemaEdit.deleteFieldType.json        |   14 +
 .../main/resources/apispec/core.SchemaEdit.json |   47 +
 .../apispec/core.SchemaRead.copyFields.json     |   26 +
 ...ore.SchemaRead.dynamicFields_fieldTypes.json |   20 +
 .../apispec/core.SchemaRead.fields.json         |   34 +
 .../main/resources/apispec/core.SchemaRead.json |   17 +
 .../src/main/resources/apispec/core.Update.json |   17 +
 ...g.Commands.addRequestHandler.properties.json |   25 +
 .../apispec/core.config.Commands.generic.json   |   19 +
 .../resources/apispec/core.config.Commands.json |  215 +
 .../core.config.Commands.runtimeLib.json        |   23 +
 .../apispec/core.config.Params.Commands.json    |   31 +
 .../resources/apispec/core.config.Params.json   |   13 +
 .../src/main/resources/apispec/core.config.json |   18 +
 .../resources/apispec/core.system.blob.json     |   20 +
 .../apispec/core.system.blob.upload.json        |   12 +
 .../main/resources/apispec/cores.Commands.json  |   85 +
 .../main/resources/apispec/cores.Status.json    |   20 +
 .../resources/apispec/cores.core.Commands.json  |  136 +
 .../apispec/cores.core.Commands.split.json      |   34 +
 .../src/main/resources/apispec/emptySpec.json   |   11 +
 .../main/resources/apispec/metrics.history.json |   23 +
 .../main/resources/apispec/node.Commands.json   |   24 +
 .../src/main/resources/apispec/node.Info.json   |   11 +
 .../src/main/resources/apispec/node.invoke.json |   16 +
 .../resources/META-INF/services/java.sql.Driver |   16 -
 .../resources/apispec/autoscaling.Commands.json |  199 -
 .../resources/apispec/autoscaling.history.json  |   61 -
 .../src/resources/apispec/cluster.Commands.json |  174 -
 .../src/resources/apispec/cluster.aliases.json  |   12 -
 .../apispec/cluster.commandstatus.delete.json   |   10 -
 .../apispec/cluster.commandstatus.json          |   20 -
 .../apispec/cluster.configs.Commands.json       |   34 -
 .../apispec/cluster.configs.delete.json         |   12 -
 .../src/resources/apispec/cluster.configs.json  |   12 -
 solr/solrj/src/resources/apispec/cluster.json   |   14 -
 .../src/resources/apispec/cluster.nodes.json    |   12 -
 .../cluster.security.BasicAuth.Commands.json    |   23 -
 ...cluster.security.RuleBasedAuthorization.json |  129 -
 ...luster.security.authentication.Commands.json |   12 -
 .../cluster.security.authentication.json        |   12 -
 ...cluster.security.authorization.Commands.json |   13 -
 .../apispec/cluster.security.authorization.json |   13 -
 .../resources/apispec/collections.Commands.json |  298 -
 .../collections.collection.Commands.json        |  193 -
 .../collections.collection.Commands.modify.json |   43 -
 .../collections.collection.Commands.reload.json |   11 -
 .../apispec/collections.collection.delete.json  |   13 -
 .../apispec/collections.collection.json         |   19 -
 .../collections.collection.shards.Commands.json |  129 -
 ...ctions.collection.shards.shard.Commands.json |   24 -
 ...lections.collection.shards.shard.delete.json |   27 -
 ....collection.shards.shard.replica.delete.json |   39 -
 .../src/resources/apispec/collections.json      |   13 -
 .../src/resources/apispec/core.RealtimeGet.json |   26 -
 .../apispec/core.SchemaEdit.addCopyField.json   |   27 -
 .../apispec/core.SchemaEdit.addField.json       |   98 -
 .../core.SchemaEdit.addFieldType.analyzers.json |   51 -
 .../apispec/core.SchemaEdit.addFieldType.json   |   53 -
 .../core.SchemaEdit.deleteCopyField.json        |   19 -
 .../core.SchemaEdit.deleteDynamicField.json     |   12 -
 .../apispec/core.SchemaEdit.deleteField.json    |   12 -
 .../core.SchemaEdit.deleteFieldType.json        |   14 -
 .../src/resources/apispec/core.SchemaEdit.json  |   47 -
 .../apispec/core.SchemaRead.copyFields.json     |   26 -
 ...ore.SchemaRead.dynamicFields_fieldTypes.json |   20 -
 .../apispec/core.SchemaRead.fields.json         |   34 -
 .../src/resources/apispec/core.SchemaRead.json  |   17 -
 .../src/resources/apispec/core.Update.json      |   17 -
 ...g.Commands.addRequestHandler.properties.json |   25 -
 .../apispec/core.config.Commands.generic.json   |   19 -
 .../resources/apispec/core.config.Commands.json |  215 -
 .../core.config.Commands.runtimeLib.json        |   23 -
 .../apispec/core.config.Params.Commands.json    |   31 -
 .../resources/apispec/core.config.Params.json   |   13 -
 .../src/resources/apispec/core.config.json      |   18 -
 .../src/resources/apispec/core.system.blob.json |   20 -
 .../apispec/core.system.blob.upload.json        |   12 -
 .../src/resources/apispec/cores.Commands.json   |   85 -
 .../src/resources/apispec/cores.Status.json     |   20 -
 .../resources/apispec/cores.core.Commands.json  |  136 -
 .../apispec/cores.core.Commands.split.json      |   34 -
 solr/solrj/src/resources/apispec/emptySpec.json |   11 -
 .../src/resources/apispec/metrics.history.json  |   23 -
 .../src/resources/apispec/node.Commands.json    |   24 -
 solr/solrj/src/resources/apispec/node.Info.json |   11 -
 .../src/resources/apispec/node.invoke.json      |   16 -
 solr/solrj/src/test-files/solrj/README          |   21 -
 solr/solrj/src/test-files/solrj/books.csv       |   11 -
 solr/solrj/src/test-files/solrj/docs1.xml       |   56 -
 solr/solrj/src/test-files/solrj/docs2.xml       |   77 -
 .../src/test-files/solrj/javabin_backcompat.bin |  Bin 169 -> 0 bytes
 .../solrj/javabin_backcompat_child_docs.bin     |    1 -
 .../solrj/sampleClusteringResponse.xml          |  112 -
 .../test-files/solrj/sampleGroupResponse.xml    |  384 --
 .../solrj/sampleIntervalFacetsResponse.xml      |  206 -
 .../solrj/sampleRangeFacetResponse.xml          |   44 -
 .../solrj/sampleSimpleGroupResponse.xml         |  101 -
 .../solr/autoscaling/testAddMissingReplica.json |  123 -
 .../testAutoScalingHandlerFailure.json          |  141 -
 ...tAutoscalingPreferencesUsedWithNoPolicy.json |   53 -
 .../testComputePlanAfterNodeAdded.json          |   16 -
 .../solr/autoscaling/testCoresSuggestions.json  |   17 -
 .../testCreateCollectionWithEmptyPolicy.json    |   20 -
 .../solr/autoscaling/testDiskSpaceHint.json     |   16 -
 .../solr/autoscaling/testEqualOnNonNode.json    |   83 -
 .../solr/autoscaling/testFreeDiskDeviation.json |   35 -
 .../autoscaling/testFreeDiskSuggestions.json    |   27 -
 .../autoscaling/testFreediskPercentage.json     |   25 -
 .../autoscaling/testMoveReplicaSuggester.json   |   15 -
 .../testMoveReplicasInMultipleCollections.json  |   88 -
 .../solrj/solr/autoscaling/testPolicy.json      |   41 -
 .../solr/autoscaling/testPortSuggestions.json   |   22 -
 .../testReplicaCountSuggestions.json            |   15 -
 .../solr/autoscaling/testReplicaPercentage.json |   46 -
 .../autoscaling/testReplicaZonesPercentage.json |   15 -
 .../testScheduledTriggerFailure.json            |   52 -
 .../solrj/solr/autoscaling/testSortError.json   |  225 -
 .../autoscaling/testSuggestionsRebalance2.json  |  130 -
 .../testSuggestionsRebalanceOnly.json           |  105 -
 .../autoscaling/testSysPropSuggestions.json     |  119 -
 .../autoscaling/testSyspropSuggestions1.json    |   24 -
 .../autoscaling/testUtilizeNodeFailure.json     |   69 -
 .../autoscaling/testUtilizeNodeFailure2.json    |   66 -
 .../solr/autoscaling/testViolationOutput.json   |   22 -
 .../solr/autoscaling/testWithCollection.json    |   21 -
 .../testWithCollectionMoveReplica.json          |   28 -
 .../testWithCollectionMoveVsAddSuggestions.json |   49 -
 .../testWithCollectionSuggestions.json          |   21 -
 .../collection1/conf/schema-replication1.xml    |   39 -
 .../solrj/solr/collection1/conf/schema-sql.xml  |  623 ---
 .../solrj/solr/collection1/conf/schema.xml      |  593 --
 .../conf/solrconfig-managed-schema.xml          |   32 -
 .../solr/collection1/conf/solrconfig-slave1.xml |   57 -
 .../solr/collection1/conf/solrconfig-sql.xml    |   72 -
 .../solrj/solr/collection1/conf/solrconfig.xml  |   60 -
 .../configset-1/conf/schema-minimal.xml         |   21 -
 .../configset-1/conf/solrconfig-minimal.xml     |   57 -
 .../solr/configsets/configset-2/conf/schema.xml |   21 -
 .../configsets/configset-2/conf/solrconfig.xml  |   50 -
 .../solrj/solr/configsets/ml/conf/schema.xml    |   77 -
 .../solr/configsets/ml/conf/solrconfig.xml      |   51 -
 .../solr/configsets/shared/conf/schema.xml      |   57 -
 .../solr/configsets/shared/conf/solrconfig.xml  |   56 -
 .../configsets/shared/conf/stopwords-en.txt     |   16 -
 .../configsets/shared/conf/stopwords-fr.txt     |   16 -
 .../solr/configsets/streaming/conf/schema.xml   |  621 ---
 .../configsets/streaming/conf/solrconfig.xml    |   57 -
 .../solrj/solr/crazy-path-to-schema.xml         |   49 -
 .../test-files/solrj/solr/multicore/README.txt  |    7 -
 .../solrj/solr/multicore/core0/conf/schema.xml  |   33 -
 .../solr/multicore/core0/conf/solrconfig.xml    |   78 -
 .../solrj/solr/multicore/core0/core.properties  |    0
 .../solrj/solr/multicore/core1/conf/schema.xml  |   34 -
 .../solr/multicore/core1/conf/solrconfig.xml    |   78 -
 .../solrj/solr/multicore/core1/core.properties  |    0
 .../solr/multicore/exampledocs/ipod_other.xml   |   34 -
 .../solr/multicore/exampledocs/ipod_video.xml   |   22 -
 .../test-files/solrj/solr/multicore/solr.xml    |   22 -
 .../src/test-files/solrj/solr/multicore/zoo.cfg |   17 -
 .../solr/shared/collection1/core.properties     |    1 -
 .../solrj/solr/shared/core0/core.properties     |    8 -
 .../solrj/solr/shared/core1/core.properties     |    6 -
 .../src/test-files/solrj/solr/shared/solr.xml   |   42 -
 solr/solrj/src/test-files/solrj/solr/solr.xml   |   46 -
 .../src/test-files/solrj/updateReq_4_5.bin      |  Bin 290 -> 0 bytes
 .../UsingSolrJRefGuideExamplesTest.java         |  286 +
 .../ref_guide_examples/ZkConfigFilesTest.java   |   94 +
 ...ollectionAdminRequestRequiredParamsTest.java |  218 +
 .../apache/solr/client/solrj/GetByIdTest.java   |  114 +
 .../solr/client/solrj/LargeVolumeTestBase.java  |  132 +
 .../solrj/MergeIndexesExampleTestBase.java      |  181 +
 .../client/solrj/SolrExampleBinaryTest.java     |   56 +
 .../solr/client/solrj/SolrExampleTests.java     | 2182 ++++++++
 .../solr/client/solrj/SolrExampleTestsBase.java |  269 +
 .../solr/client/solrj/SolrExampleXMLTest.java   |   49 +
 .../solr/client/solrj/SolrExceptionTest.java    |   64 +
 .../apache/solr/client/solrj/SolrQueryTest.java |  459 ++
 .../client/solrj/SolrSchemalessExampleTest.java |  149 +
 .../solr/client/solrj/StartSolrJetty.java       |   71 +
 .../solr/client/solrj/TestBatchUpdate.java      |  137 +
 .../solr/client/solrj/TestLBHttpSolrClient.java |  333 ++
 .../client/solrj/TestSolrJErrorHandling.java    |  376 ++
 .../solrj/beans/TestDocumentObjectBinder.java   |  296 +
 .../solrj/cloud/autoscaling/TestPolicy.java     | 2691 ++++++++++
 .../solrj/cloud/autoscaling/TestPolicy2.java    |  410 ++
 .../AbstractEmbeddedSolrServerTestCase.java     |  110 +
 .../client/solrj/embedded/JettyWebappTest.java  |  120 +
 .../embedded/LargeVolumeBinaryJettyTest.java    |   33 +
 .../solrj/embedded/LargeVolumeEmbeddedTest.java |   27 +
 .../solrj/embedded/LargeVolumeJettyTest.java    |   30 +
 .../embedded/MergeIndexesEmbeddedTest.java      |   64 +
 .../solrj/embedded/SolrExampleEmbeddedTest.java |   34 +
 .../solrj/embedded/SolrExampleJettyTest.java    |  104 +
 .../SolrExampleStreamingBinaryTest.java         |   37 +
 .../embedded/SolrExampleStreamingTest.java      |  147 +
 .../solrj/embedded/TestEmbeddedSolrServer.java  |   71 +
 .../solrj/embedded/TestSolrProperties.java      |  132 +
 .../solrj/impl/BasicHttpSolrClientTest.java     |  835 +++
 .../solrj/impl/CloudSolrClientBadInputTest.java |   73 +
 .../solrj/impl/CloudSolrClientBuilderTest.java  |  106 +
 .../solrj/impl/CloudSolrClientCacheTest.java    |  190 +
 .../CloudSolrClientMultiConstructorTest.java    |  113 +
 .../solrj/impl/CloudSolrClientRetryTest.java    |   79 +
 .../client/solrj/impl/CloudSolrClientTest.java  |  977 ++++
 .../ConcurrentUpdateSolrClientBadInputTest.java |   91 +
 .../ConcurrentUpdateSolrClientBuilderTest.java  |   42 +
 ...rentUpdateSolrClientMultiCollectionTest.java |   94 +
 .../impl/ConcurrentUpdateSolrClientTest.java    |  349 ++
 .../client/solrj/impl/HttpClientUtilTest.java   |  112 +
 .../solrj/impl/HttpSolrClientBadInputTest.java  |   93 +
 .../solrj/impl/HttpSolrClientBuilderTest.java   |   76 +
 .../solrj/impl/HttpSolrClientConPoolTest.java   |  188 +
 .../impl/HttpSolrClientSSLAuthConPoolTest.java  |   40 +
 .../impl/LBHttpSolrClientBadInputTest.java      |   89 +
 .../solrj/impl/LBHttpSolrClientBuilderTest.java |   65 +
 .../client/solrj/impl/LBHttpSolrClientTest.java |   63 +
 .../solrj/impl/SolrPortAwareCookieSpecTest.java |  196 +
 .../impl/TestCloudSolrClientConnections.java    |   90 +
 .../apache/solr/client/solrj/io/TestLang.java   |   99 +
 .../solrj/io/graph/GraphExpressionTest.java     |  976 ++++
 .../solr/client/solrj/io/graph/GraphTest.java   |  257 +
 .../client/solrj/io/sql/JdbcDriverTest.java     |   84 +
 .../solr/client/solrj/io/sql/JdbcTest.java      |  900 ++++
 .../client/solrj/io/stream/JDBCStreamTest.java  |  692 +++
 .../solrj/io/stream/MathExpressionTest.java     | 5067 ++++++++++++++++++
 .../solrj/io/stream/RecordCountStream.java      |  130 +
 .../io/stream/SelectWithEvaluatorsTest.java     |  266 +
 .../solrj/io/stream/StreamDecoratorTest.java    | 4068 ++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java   | 2602 +++++++++
 .../stream/StreamExpressionToExpessionTest.java |  445 ++
 .../StreamExpressionToExplanationTest.java      |  271 +
 .../client/solrj/io/stream/StreamingTest.java   | 2524 +++++++++
 .../stream/eval/AbsoluteValueEvaluatorTest.java |  128 +
 .../solrj/io/stream/eval/AddEvaluatorTest.java  |  331 ++
 .../solrj/io/stream/eval/AndEvaluatorTest.java  |  123 +
 .../io/stream/eval/AppendEvaluatorTest.java     |   62 +
 .../io/stream/eval/ArcCosineEvaluatorTest.java  |   91 +
 .../io/stream/eval/ArcSineEvaluatorTest.java    |   92 +
 .../io/stream/eval/ArcTangentEvaluatorTest.java |   91 +
 .../io/stream/eval/ArrayEvaluatorTest.java      |  155 +
 .../solrj/io/stream/eval/AscEvaluatorTest.java  |  107 +
 .../io/stream/eval/CeilingEvaluatorTest.java    |   97 +
 .../io/stream/eval/CoalesceEvaluatorTest.java   |  112 +
 .../stream/eval/ConversionEvaluatorsTest.java   |  129 +
 .../stream/eval/CorrelationEvaluatorTest.java   |   55 +
 .../io/stream/eval/CosineEvaluatorTest.java     |   91 +
 .../io/stream/eval/CubedRootEvaluatorTest.java  |   91 +
 .../CumulativeProbabilityEvaluatorTest.java     |   56 +
 .../io/stream/eval/DivideEvaluatorTest.java     |  164 +
 .../EmpiricalDistributionEvaluatorTest.java     |   57 +
 .../io/stream/eval/EqualToEvaluatorTest.java    |  263 +
 .../stream/eval/ExclusiveOrEvaluatorTest.java   |  123 +
 .../io/stream/eval/FieldValueEvaluatorTest.java |  114 +
 .../io/stream/eval/FloorEvaluatorTest.java      |   94 +
 .../eval/GreaterThanEqualToEvaluatorTest.java   |  249 +
 .../stream/eval/GreaterThanEvaluatorTest.java   |  249 +
 .../eval/HyperbolicCosineEvaluatorTest.java     |   92 +
 .../eval/HyperbolicSineEvaluatorTest.java       |   92 +
 .../eval/HyperbolicTangentEvaluatorTest.java    |   92 +
 .../io/stream/eval/LengthEvaluatorTest.java     |  119 +
 .../eval/LessThanEqualToEvaluatorTest.java      |  256 +
 .../io/stream/eval/LessThanEvaluatorTest.java   |  249 +
 .../io/stream/eval/ModuloEvaluatorTest.java     |  164 +
 .../io/stream/eval/MultiplyEvaluatorTest.java   |  192 +
 .../io/stream/eval/NaturalLogEvaluatorTest.java |   99 +
 .../eval/NormalDistributionEvaluatorTest.java   |   54 +
 .../io/stream/eval/NormalizeEvaluatorTest.java  |   74 +
 .../solrj/io/stream/eval/NotEvaluatorTest.java  |   80 +
 .../solrj/io/stream/eval/OrEvaluatorTest.java   |  123 +
 .../io/stream/eval/PowerEvaluatorTest.java      |  119 +
 .../io/stream/eval/RawValueEvaluatorTest.java   |   69 +
 .../io/stream/eval/RecursiveEvaluatorTest.java  |   85 +
 .../io/stream/eval/RegressionEvaluatorTest.java |   63 +
 .../io/stream/eval/ReverseEvaluatorTest.java    |   57 +
 .../io/stream/eval/RoundEvaluatorTest.java      |   95 +
 .../solrj/io/stream/eval/SineEvaluatorTest.java |   92 +
 .../io/stream/eval/SquareRootEvaluatorTest.java |   92 +
 .../io/stream/eval/SubtractEvaluatorTest.java   |  188 +
 .../io/stream/eval/TangentEvaluatorTest.java    |   92 +
 .../io/stream/eval/TemporalEvaluatorsTest.java  |  302 ++
 .../eval/UniformDistributionEvaluatorTest.java  |   55 +
 .../solrj/io/stream/eval/UuidEvaluatorTest.java |   52 +
 .../stream/expr/StreamExpressionParserTest.java |  102 +
 .../io/stream/ops/ConcatOperationTest.java      |  289 +
 .../solrj/io/stream/ops/OperationsTest.java     |  250 +
 .../solr/client/solrj/request/SchemaTest.java   |  905 ++++
 .../solr/client/solrj/request/SolrPingTest.java |   84 +
 .../request/TestCollectionAdminRequest.java     |   91 +
 .../request/TestConfigSetAdminRequest.java      |   73 +
 .../client/solrj/request/TestCoreAdmin.java     |  353 ++
 .../request/TestDelegationTokenRequest.java     |   70 +
 .../client/solrj/request/TestUpdateRequest.java |   68 +
 .../solrj/request/TestUpdateRequestCodec.java   |  259 +
 .../solrj/request/TestV1toV2ApiMapper.java      |   72 +
 .../client/solrj/request/TestV2Request.java     |  102 +
 .../solrj/response/AnlysisResponseBaseTest.java |  133 +
 .../response/DocumentAnalysisResponseTest.java  |  151 +
 .../client/solrj/response/FacetFieldTest.java   |   32 +
 .../response/FieldAnalysisResponseTest.java     |  122 +
 .../solrj/response/NoOpResponseParserTest.java  |  129 +
 .../solrj/response/QueryResponseTest.java       |  302 ++
 .../solrj/response/TermsResponseTest.java       |   76 +
 .../solrj/response/TestClusteringResponse.java  |   72 +
 .../response/TestDelegationTokenResponse.java   |  140 +
 .../solrj/response/TestSpellCheckResponse.java  |  193 +
 .../solrj/response/TestSuggesterResponse.java   |  127 +
 .../solr/client/solrj/util/ClientUtilsTest.java |   36 +
 .../apache/solr/common/SolrDocumentTest.java    |  215 +
 .../solr/common/TestToleratedUpdateError.java   |  196 +
 .../solr/common/cloud/SolrZkClientTest.java     |  153 +
 .../cloud/TestCloudCollectionsListeners.java    |  310 ++
 .../cloud/TestCollectionStateWatchers.java      |  310 ++
 .../solr/common/cloud/TestZkConfigManager.java  |  219 +
 .../common/params/CommonAdminParamsTest.java    |   30 +
 .../solr/common/params/CommonParamsTest.java    |   36 +
 .../common/params/ModifiableSolrParamsTest.java |  157 +
 .../solr/common/params/ShardParamsTest.java     |   81 +
 .../solr/common/params/SolrParamTest.java       |  323 ++
 .../solr/common/util/ContentStreamTest.java     |  198 +
 .../solr/common/util/JsonValidatorTest.java     |  200 +
 .../apache/solr/common/util/NamedListTest.java  |  211 +
 .../solr/common/util/TestFastInputStream.java   |   93 +
 .../apache/solr/common/util/TestFastWriter.java |  125 +
 .../org/apache/solr/common/util/TestHash.java   |  100 +
 .../solr/common/util/TestJavaBinCodec.java      |  574 ++
 .../solr/common/util/TestJsonRecordReader.java  |  733 +++
 .../solr/common/util/TestNamedListCodec.java    |  285 +
 .../apache/solr/common/util/TestPathTrie.java   |   61 +
 .../apache/solr/common/util/TestRetryUtil.java  |   71 +
 .../solr/common/util/TestSolrJsonWriter.java    |   60 +
 .../apache/solr/common/util/TestTimeSource.java |   54 +
 .../solr/common/util/TestValidatingJsonMap.java |   52 +
 .../solr/common/util/TestXMLEscaping.java       |   67 +
 .../apache/solr/common/util/URLUtilTest.java    |   37 +
 .../UsingSolrJRefGuideExamplesTest.java         |  286 -
 .../ref_guide_examples/ZkConfigFilesTest.java   |   94 -
 ...ollectionAdminRequestRequiredParamsTest.java |  218 -
 .../apache/solr/client/solrj/GetByIdTest.java   |  114 -
 .../solr/client/solrj/LargeVolumeTestBase.java  |  132 -
 .../solrj/MergeIndexesExampleTestBase.java      |  181 -
 .../client/solrj/SolrExampleBinaryTest.java     |   56 -
 .../solr/client/solrj/SolrExampleTests.java     | 2182 --------
 .../solr/client/solrj/SolrExampleTestsBase.java |  269 -
 .../solr/client/solrj/SolrExampleXMLTest.java   |   49 -
 .../solr/client/solrj/SolrExceptionTest.java    |   64 -
 .../apache/solr/client/solrj/SolrQueryTest.java |  459 --
 .../client/solrj/SolrSchemalessExampleTest.java |  149 -
 .../solr/client/solrj/StartSolrJetty.java       |   71 -
 .../solr/client/solrj/TestBatchUpdate.java      |  137 -
 .../solr/client/solrj/TestLBHttpSolrClient.java |  333 --
 .../client/solrj/TestSolrJErrorHandling.java    |  376 --
 .../solrj/beans/TestDocumentObjectBinder.java   |  296 -
 .../solrj/cloud/autoscaling/TestPolicy.java     | 2691 ----------
 .../solrj/cloud/autoscaling/TestPolicy2.java    |  410 --
 .../AbstractEmbeddedSolrServerTestCase.java     |  110 -
 .../client/solrj/embedded/JettyWebappTest.java  |  120 -
 .../embedded/LargeVolumeBinaryJettyTest.java    |   33 -
 .../solrj/embedded/LargeVolumeEmbeddedTest.java |   27 -
 .../solrj/embedded/LargeVolumeJettyTest.java    |   30 -
 .../embedded/MergeIndexesEmbeddedTest.java      |   64 -
 .../solrj/embedded/SolrExampleEmbeddedTest.java |   34 -
 .../solrj/embedded/SolrExampleJettyTest.java    |  104 -
 .../SolrExampleStreamingBinaryTest.java         |   37 -
 .../embedded/SolrExampleStreamingTest.java      |  147 -
 .../solrj/embedded/TestEmbeddedSolrServer.java  |   71 -
 .../solrj/embedded/TestSolrProperties.java      |  132 -
 .../solrj/impl/BasicHttpSolrClientTest.java     |  835 ---
 .../solrj/impl/CloudSolrClientBadInputTest.java |   73 -
 .../solrj/impl/CloudSolrClientBuilderTest.java  |  106 -
 .../solrj/impl/CloudSolrClientCacheTest.java    |  190 -
 .../CloudSolrClientMultiConstructorTest.java    |  113 -
 .../solrj/impl/CloudSolrClientRetryTest.java    |   79 -
 .../client/solrj/impl/CloudSolrClientTest.java  |  977 ----
 .../ConcurrentUpdateSolrClientBadInputTest.java |   91 -
 .../ConcurrentUpdateSolrClientBuilderTest.java  |   42 -
 ...rentUpdateSolrClientMultiCollectionTest.java |   94 -
 .../impl/ConcurrentUpdateSolrClientTest.java    |  349 --
 .../client/solrj/impl/HttpClientUtilTest.java   |  112 -
 .../solrj/impl/HttpSolrClientBadInputTest.java  |   93 -
 .../solrj/impl/HttpSolrClientBuilderTest.java   |   76 -
 .../solrj/impl/HttpSolrClientConPoolTest.java   |  188 -
 .../impl/HttpSolrClientSSLAuthConPoolTest.java  |   40 -
 .../impl/LBHttpSolrClientBadInputTest.java      |   89 -
 .../solrj/impl/LBHttpSolrClientBuilderTest.java |   65 -
 .../client/solrj/impl/LBHttpSolrClientTest.java |   63 -
 .../solrj/impl/SolrPortAwareCookieSpecTest.java |  196 -
 .../impl/TestCloudSolrClientConnections.java    |   90 -
 .../apache/solr/client/solrj/io/TestLang.java   |   99 -
 .../solrj/io/graph/GraphExpressionTest.java     |  976 ----
 .../solr/client/solrj/io/graph/GraphTest.java   |  257 -
 .../client/solrj/io/sql/JdbcDriverTest.java     |   84 -
 .../solr/client/solrj/io/sql/JdbcTest.java      |  900 ----
 .../client/solrj/io/stream/JDBCStreamTest.java  |  692 ---
 .../solrj/io/stream/MathExpressionTest.java     | 5067 ------------------
 .../solrj/io/stream/RecordCountStream.java      |  130 -
 .../io/stream/SelectWithEvaluatorsTest.java     |  266 -
 .../solrj/io/stream/StreamDecoratorTest.java    | 4068 --------------
 .../solrj/io/stream/StreamExpressionTest.java   | 2602 ---------
 .../stream/StreamExpressionToExpessionTest.java |  445 --
 .../StreamExpressionToExplanationTest.java      |  271 -
 .../client/solrj/io/stream/StreamingTest.java   | 2524 ---------
 .../stream/eval/AbsoluteValueEvaluatorTest.java |  128 -
 .../solrj/io/stream/eval/AddEvaluatorTest.java  |  331 --
 .../solrj/io/stream/eval/AndEvaluatorTest.java  |  123 -
 .../io/stream/eval/AppendEvaluatorTest.java     |   62 -
 .../io/stream/eval/ArcCosineEvaluatorTest.java  |   91 -
 .../io/stream/eval/ArcSineEvaluatorTest.java    |   92 -
 .../io/stream/eval/ArcTangentEvaluatorTest.java |   91 -
 .../io/stream/eval/ArrayEvaluatorTest.java      |  155 -
 .../solrj/io/stream/eval/AscEvaluatorTest.java  |  107 -
 .../io/stream/eval/CeilingEvaluatorTest.java    |   97 -
 .../io/stream/eval/CoalesceEvaluatorTest.java   |  112 -
 .../stream/eval/ConversionEvaluatorsTest.java   |  129 -
 .../stream/eval/CorrelationEvaluatorTest.java   |   55 -
 .../io/stream/eval/CosineEvaluatorTest.java     |   91 -
 .../io/stream/eval/CubedRootEvaluatorTest.java  |   91 -
 .../CumulativeProbabilityEvaluatorTest.java     |   56 -
 .../io/stream/eval/DivideEvaluatorTest.java     |  164 -
 .../EmpiricalDistributionEvaluatorTest.java     |   57 -
 .../io/stream/eval/EqualToEvaluatorTest.java    |  263 -
 .../stream/eval/ExclusiveOrEvaluatorTest.java   |  123 -
 .../io/stream/eval/FieldValueEvaluatorTest.java |  114 -
 .../io/stream/eval/FloorEvaluatorTest.java      |   94 -
 .../eval/GreaterThanEqualToEvaluatorTest.java   |  249 -
 .../stream/eval/GreaterThanEvaluatorTest.java   |  249 -
 .../eval/HyperbolicCosineEvaluatorTest.java     |   92 -
 .../eval/HyperbolicSineEvaluatorTest.java       |   92 -
 .../eval/HyperbolicTangentEvaluatorTest.java    |   92 -
 .../io/stream/eval/LengthEvaluatorTest.java     |  119 -
 .../eval/LessThanEqualToEvaluatorTest.java      |  256 -
 .../io/stream/eval/LessThanEvaluatorTest.java   |  249 -
 .../io/stream/eval/ModuloEvaluatorTest.java     |  164 -
 .../io/stream/eval/MultiplyEvaluatorTest.java   |  192 -
 .../io/stream/eval/NaturalLogEvaluatorTest.java |   99 -
 .../eval/NormalDistributionEvaluatorTest.java   |   54 -
 .../io/stream/eval/NormalizeEvaluatorTest.java  |   74 -
 .../solrj/io/stream/eval/NotEvaluatorTest.java  |   80 -
 .../solrj/io/stream/eval/OrEvaluatorTest.java   |  123 -
 .../io/stream/eval/PowerEvaluatorTest.java      |  119 -
 .../io/stream/eval/RawValueEvaluatorTest.java   |   69 -
 .../io/stream/eval/RecursiveEvaluatorTest.java  |   85 -
 .../io/stream/eval/RegressionEvaluatorTest.java |   63 -
 .../io/stream/eval/ReverseEvaluatorTest.java    |   57 -
 .../io/stream/eval/RoundEvaluatorTest.java      |   95 -
 .../solrj/io/stream/eval/SineEvaluatorTest.java |   92 -
 .../io/stream/eval/SquareRootEvaluatorTest.java |   92 -
 .../io/stream/eval/SubtractEvaluatorTest.java   |  188 -
 .../io/stream/eval/TangentEvaluatorTest.java    |   92 -
 .../io/stream/eval/TemporalEvaluatorsTest.java  |  302 --
 .../eval/UniformDistributionEvaluatorTest.java  |   55 -
 .../solrj/io/stream/eval/UuidEvaluatorTest.java |   52 -
 .../stream/expr/StreamExpressionParserTest.java |  102 -
 .../io/stream/ops/ConcatOperationTest.java      |  289 -
 .../solrj/io/stream/ops/OperationsTest.java     |  250 -
 .../solr/client/solrj/request/SchemaTest.java   |  905 ----
 .../solr/client/solrj/request/SolrPingTest.java |   84 -
 .../request/TestCollectionAdminRequest.java     |   91 -
 .../request/TestConfigSetAdminRequest.java      |   73 -
 .../client/solrj/request/TestCoreAdmin.java     |  353 --
 .../request/TestDelegationTokenRequest.java     |   70 -
 .../client/solrj/request/TestUpdateRequest.java |   68 -
 .../solrj/request/TestUpdateRequestCodec.java   |  259 -
 .../solrj/request/TestV1toV2ApiMapper.java      |   72 -
 .../client/solrj/request/TestV2Request.java     |  102 -
 .../solrj/response/AnlysisResponseBaseTest.java |  133 -
 .../response/DocumentAnalysisResponseTest.java  |  151 -
 .../client/solrj/response/FacetFieldTest.java   |   32 -
 .../response/FieldAnalysisResponseTest.java     |  122 -
 .../solrj/response/NoOpResponseParserTest.java  |  129 -
 .../solrj/response/QueryResponseTest.java       |  302 --
 .../solrj/response/TermsResponseTest.java       |   76 -
 .../solrj/response/TestClusteringResponse.java  |   72 -
 .../response/TestDelegationTokenResponse.java   |  140 -
 .../solrj/response/TestSpellCheckResponse.java  |  193 -
 .../solrj/response/TestSuggesterResponse.java   |  127 -
 .../solr/client/solrj/util/ClientUtilsTest.java |   36 -
 .../apache/solr/common/SolrDocumentTest.java    |  215 -
 .../solr/common/TestToleratedUpdateError.java   |  196 -
 .../solr/common/cloud/SolrZkClientTest.java     |  153 -
 .../cloud/TestCloudCollectionsListeners.java    |  310 --
 .../cloud/TestCollectionStateWatchers.java      |  310 --
 .../solr/common/cloud/TestZkConfigManager.java  |  219 -
 .../common/params/CommonAdminParamsTest.java    |   30 -
 .../solr/common/params/CommonParamsTest.java    |   36 -
 .../common/params/ModifiableSolrParamsTest.java |  157 -
 .../solr/common/params/ShardParamsTest.java     |   81 -
 .../solr/common/params/SolrParamTest.java       |  323 --
 .../solr/common/util/ContentStreamTest.java     |  198 -
 .../solr/common/util/JsonValidatorTest.java     |  200 -
 .../apache/solr/common/util/NamedListTest.java  |  211 -
 .../solr/common/util/TestFastInputStream.java   |   93 -
 .../apache/solr/common/util/TestFastWriter.java |  125 -
 .../org/apache/solr/common/util/TestHash.java   |  100 -
 .../solr/common/util/TestJavaBinCodec.java      |  574 --
 .../solr/common/util/TestJsonRecordReader.java  |  733 ---
 .../solr/common/util/TestNamedListCodec.java    |  285 -
 .../apache/solr/common/util/TestPathTrie.java   |   61 -
 .../apache/solr/common/util/TestRetryUtil.java  |   71 -
 .../solr/common/util/TestSolrJsonWriter.java    |   60 -
 .../apache/solr/common/util/TestTimeSource.java |   54 -
 .../solr/common/util/TestValidatingJsonMap.java |   52 -
 .../solr/common/util/TestXMLEscaping.java       |   67 -
 .../apache/solr/common/util/URLUtilTest.java    |   37 -
 solr/solrj/src/test/resources/solrj/README      |   21 +
 solr/solrj/src/test/resources/solrj/books.csv   |   11 +
 solr/solrj/src/test/resources/solrj/docs1.xml   |   56 +
 solr/solrj/src/test/resources/solrj/docs2.xml   |   77 +
 .../test/resources/solrj/javabin_backcompat.bin |  Bin 0 -> 169 bytes
 .../solrj/javabin_backcompat_child_docs.bin     |    1 +
 .../solrj/sampleClusteringResponse.xml          |  112 +
 .../resources/solrj/sampleGroupResponse.xml     |  384 ++
 .../solrj/sampleIntervalFacetsResponse.xml      |  206 +
 .../solrj/sampleRangeFacetResponse.xml          |   44 +
 .../solrj/sampleSimpleGroupResponse.xml         |  101 +
 .../solr/autoscaling/testAddMissingReplica.json |  123 +
 .../testAutoScalingHandlerFailure.json          |  141 +
 ...tAutoscalingPreferencesUsedWithNoPolicy.json |   53 +
 .../testComputePlanAfterNodeAdded.json          |   16 +
 .../solr/autoscaling/testCoresSuggestions.json  |   17 +
 .../testCreateCollectionWithEmptyPolicy.json    |   20 +
 .../solr/autoscaling/testDiskSpaceHint.json     |   16 +
 .../solr/autoscaling/testEqualOnNonNode.json    |   83 +
 .../solr/autoscaling/testFreeDiskDeviation.json |   35 +
 .../autoscaling/testFreeDiskSuggestions.json    |   27 +
 .../autoscaling/testFreediskPercentage.json     |   25 +
 .../autoscaling/testMoveReplicaSuggester.json   |   15 +
 .../testMoveReplicasInMultipleCollections.json  |   88 +
 .../solrj/solr/autoscaling/testPolicy.json      |   41 +
 .../solr/autoscaling/testPortSuggestions.json   |   22 +
 .../testReplicaCountSuggestions.json            |   15 +
 .../solr/autoscaling/testReplicaPercentage.json |   46 +
 .../autoscaling/testReplicaZonesPercentage.json |   15 +
 .../testScheduledTriggerFailure.json            |   52 +
 .../solrj/solr/autoscaling/testSortError.json   |  225 +
 .../autoscaling/testSuggestionsRebalance2.json  |  130 +
 .../testSuggestionsRebalanceOnly.json           |  105 +
 .../autoscaling/testSysPropSuggestions.json     |  119 +
 .../autoscaling/testSyspropSuggestions1.json    |   24 +
 .../autoscaling/testUtilizeNodeFailure.json     |   69 +
 .../autoscaling/testUtilizeNodeFailure2.json    |   66 +
 .../solr/autoscaling/testViolationOutput.json   |   22 +
 .../solr/autoscaling/testWithCollection.json    |   21 +
 .../testWithCollectionMoveReplica.json          |   28 +
 .../testWithCollectionMoveVsAddSuggestions.json |   49 +
 .../testWithCollectionSuggestions.json          |   21 +
 .../collection1/conf/schema-replication1.xml    |   39 +
 .../solrj/solr/collection1/conf/schema-sql.xml  |  623 +++
 .../solrj/solr/collection1/conf/schema.xml      |  593 ++
 .../conf/solrconfig-managed-schema.xml          |   32 +
 .../solr/collection1/conf/solrconfig-slave1.xml |   57 +
 .../solr/collection1/conf/solrconfig-sql.xml    |   72 +
 .../solrj/solr/collection1/conf/solrconfig.xml  |   60 +
 .../configset-1/conf/schema-minimal.xml         |   21 +
 .../configset-1/conf/solrconfig-minimal.xml     |   57 +
 .../solr/configsets/configset-2/conf/schema.xml |   21 +
 .../configsets/configset-2/conf/solrconfig.xml  |   50 +
 .../solrj/solr/configsets/ml/conf/schema.xml    |   77 +
 .../solr/configsets/ml/conf/solrconfig.xml      |   51 +
 .../solr/configsets/shared/conf/schema.xml      |   57 +
 .../solr/configsets/shared/conf/solrconfig.xml  |   56 +
 .../configsets/shared/conf/stopwords-en.txt     |   16 +
 .../configsets/shared/conf/stopwords-fr.txt     |   16 +
 .../solr/configsets/streaming/conf/schema.xml   |  621 +++
 .../configsets/streaming/conf/solrconfig.xml    |   57 +
 .../solrj/solr/crazy-path-to-schema.xml         |   49 +
 .../resources/solrj/solr/multicore/README.txt   |    7 +
 .../solrj/solr/multicore/core0/conf/schema.xml  |   33 +
 .../solr/multicore/core0/conf/solrconfig.xml    |   78 +
 .../solrj/solr/multicore/core0/core.properties  |    0
 .../solrj/solr/multicore/core1/conf/schema.xml  |   34 +
 .../solr/multicore/core1/conf/solrconfig.xml    |   78 +
 .../solrj/solr/multicore/core1/core.properties  |    0
 .../solr/multicore/exampledocs/ipod_other.xml   |   34 +
 .../solr/multicore/exampledocs/ipod_video.xml   |   22 +
 .../resources/solrj/solr/multicore/solr.xml     |   22 +
 .../test/resources/solrj/solr/multicore/zoo.cfg |   17 +
 .../solr/shared/collection1/core.properties     |    1 +
 .../solrj/solr/shared/core0/core.properties     |    8 +
 .../solrj/solr/shared/core1/core.properties     |    6 +
 .../test/resources/solrj/solr/shared/solr.xml   |   42 +
 .../src/test/resources/solrj/solr/solr.xml      |   46 +
 .../src/test/resources/solrj/updateReq_4_5.bin  |  Bin 0 -> 290 bytes
 solr/test-framework/build.gradle                |   13 +
 .../solr/BaseDistributedSearchTestCase.java     | 1147 ----
 .../src/java/org/apache/solr/JSONTestUtil.java  |  449 --
 .../apache/solr/SolrIgnoredThreadsFilter.java   |   65 -
 .../java/org/apache/solr/SolrJettyTestBase.java |  230 -
 .../java/org/apache/solr/SolrTestCaseHS.java    |  571 --
 .../java/org/apache/solr/SolrTestCaseJ4.java    | 2838 ----------
 .../solr/analysis/MockCharFilterFactory.java    |   44 -
 .../solr/analysis/MockTokenFilterFactory.java   |   64 -
 .../solr/analysis/MockTokenizerFactory.java     |   58 -
 .../analysis/StringMockSolrResourceLoader.java  |   56 -
 .../java/org/apache/solr/analysis/package.html  |   23 -
 .../solr/cloud/AbstractDistribZkTestBase.java   |  336 --
 .../cloud/AbstractFullDistribZkTestBase.java    | 2279 --------
 .../apache/solr/cloud/AbstractZkTestCase.java   |  186 -
 .../java/org/apache/solr/cloud/ChaosMonkey.java |  735 ---
 .../org/apache/solr/cloud/CloudInspectUtil.java |  241 -
 .../org/apache/solr/cloud/ConfigRequest.java    |   55 -
 .../java/org/apache/solr/cloud/IpTables.java    |   76 -
 .../apache/solr/cloud/MiniSolrCloudCluster.java |  591 --
 .../org/apache/solr/cloud/MockSolrZkClient.java |   44 -
 .../apache/solr/cloud/MockZkStateReader.java    |   39 -
 .../solr/cloud/MultiSolrCloudTestCase.java      |  107 -
 .../java/org/apache/solr/cloud/SocketProxy.java |  460 --
 .../apache/solr/cloud/SolrCloudTestCase.java    |  423 --
 .../solr/cloud/StoppableCommitThread.java       |   69 -
 .../solr/cloud/StoppableIndexingThread.java     |  192 -
 .../solr/cloud/StoppableSearchThread.java       |   83 -
 .../org/apache/solr/cloud/ZkTestServer.java     |  657 ---
 .../src/java/org/apache/solr/cloud/package.html |   23 -
 .../solr/core/AbstractBadConfigTestBase.java    |   84 -
 .../solr/core/MockConcurrentMergeScheduler.java |   35 -
 .../apache/solr/core/MockDirectoryFactory.java  |   96 -
 .../solr/core/MockFSDirectoryFactory.java       |   73 -
 .../src/java/org/apache/solr/core/package.html  |   23 -
 .../component/TrackingShardHandlerFactory.java  |  290 -
 .../apache/solr/handler/component/package.html  |   24 -
 .../src/java/org/apache/solr/package.html       |   23 -
 .../processor/BufferingRequestProcessor.java    |   64 -
 .../processor/UpdateProcessorTestBase.java      |  168 -
 .../apache/solr/update/processor/package.html   |   23 -
 .../apache/solr/util/BadHdfsThreadsFilter.java  |   37 -
 .../solr/util/BadMrClusterThreadsFilter.java    |   37 -
 .../solr/util/BadZookeeperThreadsFilter.java    |   34 -
 .../org/apache/solr/util/BaseTestHarness.java   |  282 -
 .../org/apache/solr/util/DOMUtilTestBase.java   |   55 -
 .../org/apache/solr/util/ExternalPaths.java     |   78 -
 .../src/java/org/apache/solr/util/LogLevel.java |  102 -
 .../apache/solr/util/RESTfulServerProvider.java |   20 -
 .../solr/util/RandomForceMergePolicy.java       |   37 -
 .../util/RandomForceMergePolicyFactory.java     |   37 -
 .../org/apache/solr/util/RandomMergePolicy.java |   45 -
 .../solr/util/RandomMergePolicyFactory.java     |   35 -
 .../java/org/apache/solr/util/RandomizeSSL.java |  174 -
 .../apache/solr/util/ReadOnlyCoresLocator.java  |   50 -
 .../java/org/apache/solr/util/RestTestBase.java |  560 --
 .../org/apache/solr/util/RestTestHarness.java   |  226 -
 .../util/RevertDefaultThreadHandlerRule.java    |   54 -
 .../org/apache/solr/util/SSLTestConfig.java     |  351 --
 .../java/org/apache/solr/util/TestHarness.java  |  475 --
 .../src/java/org/apache/solr/util/package.html  |   23 -
 solr/test-framework/src/java/overview.html      |   21 -
 .../solr/BaseDistributedSearchTestCase.java     | 1147 ++++
 .../main/java/org/apache/solr/JSONTestUtil.java |  449 ++
 .../apache/solr/SolrIgnoredThreadsFilter.java   |   65 +
 .../java/org/apache/solr/SolrJettyTestBase.java |  230 +
 .../java/org/apache/solr/SolrTestCaseHS.java    |  571 ++
 .../java/org/apache/solr/SolrTestCaseJ4.java    | 2838 ++++++++++
 .../solr/analysis/MockCharFilterFactory.java    |   44 +
 .../solr/analysis/MockTokenFilterFactory.java   |   64 +
 .../solr/analysis/MockTokenizerFactory.java     |   58 +
 .../analysis/StringMockSolrResourceLoader.java  |   56 +
 .../java/org/apache/solr/analysis/package.html  |   23 +
 .../solr/cloud/AbstractDistribZkTestBase.java   |  336 ++
 .../cloud/AbstractFullDistribZkTestBase.java    | 2279 ++++++++
 .../apache/solr/cloud/AbstractZkTestCase.java   |  186 +
 .../java/org/apache/solr/cloud/ChaosMonkey.java |  735 +++
 .../org/apache/solr/cloud/CloudInspectUtil.java |  241 +
 .../org/apache/solr/cloud/ConfigRequest.java    |   55 +
 .../java/org/apache/solr/cloud/IpTables.java    |   76 +
 .../apache/solr/cloud/MiniSolrCloudCluster.java |  591 ++
 .../org/apache/solr/cloud/MockSolrZkClient.java |   44 +
 .../apache/solr/cloud/MockZkStateReader.java    |   39 +
 .../solr/cloud/MultiSolrCloudTestCase.java      |  107 +
 .../java/org/apache/solr/cloud/SocketProxy.java |  460 ++
 .../apache/solr/cloud/SolrCloudTestCase.java    |  423 ++
 .../solr/cloud/StoppableCommitThread.java       |   69 +
 .../solr/cloud/StoppableIndexingThread.java     |  192 +
 .../solr/cloud/StoppableSearchThread.java       |   83 +
 .../org/apache/solr/cloud/ZkTestServer.java     |  657 +++
 .../java/org/apache/solr/cloud/package.html     |   23 +
 .../solr/core/AbstractBadConfigTestBase.java    |   84 +
 .../solr/core/MockConcurrentMergeScheduler.java |   35 +
 .../apache/solr/core/MockDirectoryFactory.java  |   96 +
 .../solr/core/MockFSDirectoryFactory.java       |   73 +
 .../main/java/org/apache/solr/core/package.html |   23 +
 .../component/TrackingShardHandlerFactory.java  |  290 +
 .../apache/solr/handler/component/package.html  |   24 +
 .../src/main/java/org/apache/solr/package.html  |   23 +
 .../processor/BufferingRequestProcessor.java    |   64 +
 .../processor/UpdateProcessorTestBase.java      |  168 +
 .../apache/solr/update/processor/package.html   |   23 +
 .../apache/solr/util/BadHdfsThreadsFilter.java  |   37 +
 .../solr/util/BadMrClusterThreadsFilter.java    |   37 +
 .../solr/util/BadZookeeperThreadsFilter.java    |   34 +
 .../org/apache/solr/util/BaseTestHarness.java   |  282 +
 .../org/apache/solr/util/DOMUtilTestBase.java   |   55 +
 .../org/apache/solr/util/ExternalPaths.java     |   78 +
 .../java/org/apache/solr/util/LogLevel.java     |  102 +
 .../apache/solr/util/RESTfulServerProvider.java |   20 +
 .../solr/util/RandomForceMergePolicy.java       |   37 +
 .../util/RandomForceMergePolicyFactory.java     |   37 +
 .../org/apache/solr/util/RandomMergePolicy.java |   45 +
 .../solr/util/RandomMergePolicyFactory.java     |   35 +
 .../java/org/apache/solr/util/RandomizeSSL.java |  174 +
 .../apache/solr/util/ReadOnlyCoresLocator.java  |   50 +
 .../java/org/apache/solr/util/RestTestBase.java |  560 ++
 .../org/apache/solr/util/RestTestHarness.java   |  226 +
 .../util/RevertDefaultThreadHandlerRule.java    |   54 +
 .../org/apache/solr/util/SSLTestConfig.java     |  351 ++
 .../java/org/apache/solr/util/TestHarness.java  |  475 ++
 .../main/java/org/apache/solr/util/package.html |   23 +
 solr/test-framework/src/main/java/overview.html |   21 +
 ...estConfig.hostname-and-ip-missmatch.keystore |  Bin 0 -> 2246 bytes
 .../resources/SSLTestConfig.testing.keystore    |  Bin 0 -> 2207 bytes
 .../src/main/resources/create-keystores.sh      |   37 +
 ...estConfig.hostname-and-ip-missmatch.keystore |  Bin 2246 -> 0 bytes
 .../resources/SSLTestConfig.testing.keystore    |  Bin 2207 -> 0 bytes
 .../src/resources/create-keystores.sh           |   37 -
 .../apache/solr/TestLogLevelAnnotations.java    |   43 +
 .../apache/solr/cloud/JettySolrRunnerTest.java  |   66 +
 .../solr/cloud/MiniSolrCloudClusterTest.java    |  105 +
 .../apache/solr/TestLogLevelAnnotations.java    |   43 -
 .../apache/solr/cloud/JettySolrRunnerTest.java  |   66 -
 .../solr/cloud/MiniSolrCloudClusterTest.java    |  105 -
 7338 files changed, 735800 insertions(+), 735478 deletions(-)
----------------------------------------------------------------------



[38/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
deleted file mode 100644
index 1fb3d9e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.ZoneOffset;
-import java.time.format.DateTimeFormatter;
-import java.time.format.DateTimeFormatterBuilder;
-import java.time.temporal.ChronoField;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-import java.util.function.Supplier;
-
-import com.google.common.base.Objects;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.RequiredSolrParams;
-import org.apache.solr.util.DateMathParser;
-import org.apache.solr.util.TimeZoneUtils;
-
-import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
-import static org.apache.solr.common.params.CommonParams.TZ;
-
-/**
- * Holds configuration for a routed alias, and some common code and constants.
- *
- * @see CreateAliasCmd
- * @see MaintainRoutedAliasCmd
- * @see org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor
- */
-public class TimeRoutedAlias {
-
-  // These are parameter names to routed alias creation, AND are stored as metadata with the alias.
-  public static final String ROUTER_PREFIX = "router.";
-  public static final String ROUTER_TYPE_NAME = ROUTER_PREFIX + "name";
-  public static final String ROUTER_FIELD = ROUTER_PREFIX + "field";
-  public static final String ROUTER_START = ROUTER_PREFIX + "start";
-  public static final String ROUTER_INTERVAL = ROUTER_PREFIX + "interval";
-  public static final String ROUTER_MAX_FUTURE = ROUTER_PREFIX + "maxFutureMs";
-  public static final String ROUTER_PREEMPTIVE_CREATE_MATH = ROUTER_PREFIX + "preemptiveCreateMath";
-  public static final String ROUTER_AUTO_DELETE_AGE = ROUTER_PREFIX + "autoDeleteAge";
-  public static final String CREATE_COLLECTION_PREFIX = "create-collection.";
-  // plus TZ and NAME
-
-  /**
-   * Parameters required for creating a routed alias
-   */
-  public static final List<String> REQUIRED_ROUTER_PARAMS = Collections.unmodifiableList(Arrays.asList(
-      CommonParams.NAME,
-      ROUTER_TYPE_NAME,
-      ROUTER_FIELD,
-      ROUTER_START,
-      ROUTER_INTERVAL));
-
-  /**
-   * Optional parameters for creating a routed alias excluding parameters for collection creation.
-   */
-  //TODO lets find a way to remove this as it's harder to maintain than required list
-  public static final List<String> OPTIONAL_ROUTER_PARAMS = Collections.unmodifiableList(Arrays.asList(
-      ROUTER_MAX_FUTURE,
-      ROUTER_AUTO_DELETE_AGE,
-      ROUTER_PREEMPTIVE_CREATE_MATH,
-      TZ)); // kinda special
-
-  static Predicate<String> PARAM_IS_PROP =
-      key -> key.equals(TZ) ||
-          (key.startsWith(ROUTER_PREFIX) && !key.equals(ROUTER_START)) || //TODO reconsider START special case
-          key.startsWith(CREATE_COLLECTION_PREFIX);
-
-  public static final String ROUTED_ALIAS_NAME_CORE_PROP = "routedAliasName"; // core prop
-
-  // This format must be compatible with collection name limitations
-  private static final DateTimeFormatter DATE_TIME_FORMATTER = new DateTimeFormatterBuilder()
-      .append(DateTimeFormatter.ISO_LOCAL_DATE).appendPattern("[_HH[_mm[_ss]]]") //brackets mean optional
-      .parseDefaulting(ChronoField.HOUR_OF_DAY, 0)
-      .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0)
-      .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0)
-      .toFormatter(Locale.ROOT).withZone(ZoneOffset.UTC); // deliberate -- collection names disregard TZ
-
-  public static Instant parseInstantFromCollectionName(String aliasName, String collection) {
-    final String dateTimePart = collection.substring(aliasName.length() + 1);
-    return DATE_TIME_FORMATTER.parse(dateTimePart, Instant::from);
-  }
-
-  public static String formatCollectionNameFromInstant(String aliasName, Instant timestamp) {
-    String nextCollName = DATE_TIME_FORMATTER.format(timestamp);
-    for (int i = 0; i < 3; i++) { // chop off seconds, minutes, hours
-      if (nextCollName.endsWith("_00")) {
-        nextCollName = nextCollName.substring(0, nextCollName.length()-3);
-      }
-    }
-    assert DATE_TIME_FORMATTER.parse(nextCollName, Instant::from).equals(timestamp);
-    return aliasName + "_" + nextCollName;
-  }
-
-
-  //
-  // Instance data and methods
-  //
-
-  private final String aliasName;
-  private final String routeField;
-  private final String intervalMath; // ex: +1DAY
-  private final long maxFutureMs;
-  private final String preemptiveCreateMath;
-  private final String autoDeleteAgeMath; // ex: /DAY-30DAYS  *optional*
-  private final TimeZone timeZone;
-
-  public TimeRoutedAlias(String aliasName, Map<String, String> aliasMetadata) {
-    this.aliasName = aliasName;
-    final MapSolrParams params = new MapSolrParams(aliasMetadata); // for convenience
-    final RequiredSolrParams required = params.required();
-    if (!"time".equals(required.get(ROUTER_TYPE_NAME))) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only 'time' routed aliases is supported right now.");
-    }
-    routeField = required.get(ROUTER_FIELD);
-    intervalMath = required.get(ROUTER_INTERVAL);
-
-    //optional:
-    maxFutureMs = params.getLong(ROUTER_MAX_FUTURE, TimeUnit.MINUTES.toMillis(10));
-    // the date math configured is an interval to be subtracted from the most recent collection's time stamp
-    String pcmTmp = params.get(ROUTER_PREEMPTIVE_CREATE_MATH);
-    preemptiveCreateMath = pcmTmp != null ? (pcmTmp.startsWith("-") ? pcmTmp : "-" + pcmTmp) : null;
-    autoDeleteAgeMath = params.get(ROUTER_AUTO_DELETE_AGE); // no default
-    timeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
-
-    // More validation:
-
-    // check that the date math is valid
-    final Date now = new Date();
-    try {
-      final Date after = new DateMathParser(now, timeZone).parseMath(intervalMath);
-      if (!after.after(now)) {
-        throw new SolrException(BAD_REQUEST, "duration must add to produce a time in the future");
-      }
-    } catch (Exception e) {
-      throw new SolrException(BAD_REQUEST, "bad " + TimeRoutedAlias.ROUTER_INTERVAL + ", " + e, e);
-    }
-
-    if (autoDeleteAgeMath != null) {
-      try {
-        final Date before =  new DateMathParser(now, timeZone).parseMath(autoDeleteAgeMath);
-        if (now.before(before)) {
-          throw new SolrException(BAD_REQUEST, "duration must round or subtract to produce a time in the past");
-        }
-      } catch (Exception e) {
-        throw new SolrException(BAD_REQUEST, "bad " + TimeRoutedAlias.ROUTER_AUTO_DELETE_AGE + ", " + e, e);
-      }
-    }
-    if (preemptiveCreateMath != null) {
-      try {
-        new DateMathParser().parseMath(preemptiveCreateMath);
-      } catch (ParseException e) {
-        throw new SolrException(BAD_REQUEST, "Invalid date math for preemptiveCreateMath:" + preemptiveCreateMath);
-      }
-    }
-
-    if (maxFutureMs < 0) {
-      throw new SolrException(BAD_REQUEST, ROUTER_MAX_FUTURE + " must be >= 0");
-    }
-  }
-
-  public String getAliasName() {
-    return aliasName;
-  }
-
-  public String getRouteField() {
-    return routeField;
-  }
-
-  public String getIntervalMath() {
-    return intervalMath;
-  }
-
-  public long getMaxFutureMs() {
-    return maxFutureMs;
-  }
-
-  public String getPreemptiveCreateWindow() {
-    return preemptiveCreateMath;
-  }
-
-  public String getAutoDeleteAgeMath() {
-    return autoDeleteAgeMath;
-  }
-
-  public TimeZone getTimeZone() {
-    return timeZone;
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this)
-        .add("aliasName", aliasName)
-        .add("routeField", routeField)
-        .add("intervalMath", intervalMath)
-        .add("maxFutureMs", maxFutureMs)
-        .add("preemptiveCreateMath", preemptiveCreateMath)
-        .add("autoDeleteAgeMath", autoDeleteAgeMath)
-        .add("timeZone", timeZone)
-        .toString();
-  }
-
-  /** Parses the timestamp from the collection list and returns them in reverse sorted order (most recent 1st) */
-  public List<Map.Entry<Instant,String>> parseCollections(Aliases aliases, Supplier<SolrException> aliasNotExist) {
-    final List<String> collections = aliases.getCollectionAliasListMap().get(aliasName);
-    if (collections == null) {
-      throw aliasNotExist.get();
-    }
-    // note: I considered TreeMap but didn't like the log(N) just to grab the most recent when we use it later
-    List<Map.Entry<Instant,String>> result = new ArrayList<>(collections.size());
-    for (String collection : collections) {
-      Instant colStartTime = parseInstantFromCollectionName(aliasName, collection);
-      result.add(new AbstractMap.SimpleImmutableEntry<>(colStartTime, collection));
-    }
-    result.sort((e1, e2) -> e2.getKey().compareTo(e1.getKey())); // reverse sort by key
-    return result;
-  }
-
-  /** Computes the timestamp of the next collection given the timestamp of the one before. */
-  public Instant computeNextCollTimestamp(Instant fromTimestamp) {
-    final Instant nextCollTimestamp =
-        DateMathParser.parseMath(Date.from(fromTimestamp), "NOW" + intervalMath, timeZone).toInstant();
-    assert nextCollTimestamp.isAfter(fromTimestamp);
-    return nextCollTimestamp;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
deleted file mode 100644
index 818b16f..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.params.AutoScalingParams.NODE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class UtilizeNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public UtilizeNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ocmh.checkRequired(message, NODE);
-    String nodeName = message.getStr(NODE);
-    String async = message.getStr(ASYNC);
-    AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
-
-    //first look for any violation that may use this replica
-    List<ZkNodeProps> requests = new ArrayList<>();
-    //first look for suggestions if any
-    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, ocmh.overseer.getSolrCloudManager());
-    for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
-      log.info("op: " + suggestionInfo.getOperation());
-      String coll = null;
-      List<String> pieces = StrUtils.splitSmart(suggestionInfo.getOperation().getPath(), '/');
-      if (pieces.size() > 1) {
-        coll = pieces.get(2);
-      } else {
-        continue;
-      }
-      log.info("coll: " + coll);
-      if (suggestionInfo.getOperation() instanceof V2Request) {
-        String targetNode = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/targetNode");
-        if (Objects.equals(targetNode, nodeName)) {
-          String replica = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/replica");
-          requests.add(new ZkNodeProps(COLLECTION_PROP, coll,
-              CollectionParams.TARGET_NODE, targetNode,
-              ASYNC, async,
-              REPLICA_PROP, replica));
-        }
-      }
-    }
-    executeAll(requests);
-    PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(ocmh.overseer.getSolrCloudManager());
-    Policy.Session session = sessionWrapper.get();
-    Suggester initialsuggester = session.getSuggester(MOVEREPLICA)
-        .hint(Suggester.Hint.TARGET_NODE, nodeName);
-    Suggester suggester = null;
-    for (; ; ) {
-      suggester = session.getSuggester(MOVEREPLICA)
-          .hint(Suggester.Hint.TARGET_NODE, nodeName);
-      SolrRequest request = suggester.getSuggestion();
-      if (requests.size() > 10) {
-        log.info("too_many_suggestions");
-        PolicyHelper.logState(ocmh.overseer.getSolrCloudManager(), initialsuggester);
-        break;
-      }
-      log.info("SUGGESTION: {}", request);
-      if (request == null) break;
-      session = suggester.getSession();
-      requests.add(new ZkNodeProps(COLLECTION_PROP, request.getParams().get(COLLECTION_PROP),
-          CollectionParams.TARGET_NODE, request.getParams().get(CollectionParams.TARGET_NODE),
-          REPLICA_PROP, request.getParams().get(REPLICA_PROP),
-          ASYNC, request.getParams().get(ASYNC)));
-    }
-    log.info("total_suggestions: {}", requests.size());
-    if (requests.size() == 0) {
-      PolicyHelper.logState(ocmh.overseer.getSolrCloudManager(), initialsuggester);
-    }
-    sessionWrapper.returnSession(session);
-    try {
-      executeAll(requests);
-    } finally {
-      sessionWrapper.release();
-    }
-  }
-
-  private void executeAll(List<ZkNodeProps> requests) throws Exception {
-    if (requests.isEmpty()) return;
-    for (ZkNodeProps props : requests) {
-      NamedList result = new NamedList();
-      ocmh.commandMap.get(MOVEREPLICA)
-          .call(ocmh.overseer.getSolrCloudManager().getClusterStateProvider().getClusterState(),
-              props,
-              result);
-    }
-    requests.clear();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
deleted file mode 100644
index 651d4fe..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Package related to internal implementations of the SolrCloud collections api
- */
-package org.apache.solr.cloud.api.collections;
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/ActionContext.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ActionContext.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ActionContext.java
deleted file mode 100644
index 8487d3d..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ActionContext.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.common.MapWriter;
-
-/**
- * Provides additional context for the TriggerAction such as the trigger instance on
- * which the action is being executed as well as helper methods to pass computed information along
- * to the next action
- */
-public class ActionContext implements MapWriter {
-
-  private final SolrCloudManager cloudManager;
-  private final AutoScaling.Trigger source;
-  private final Map<String, Object> properties;
-
-  public ActionContext(SolrCloudManager cloudManager, AutoScaling.Trigger source, Map<String, Object> properties) {
-    this.cloudManager = cloudManager;
-    this.source = source;
-    this.properties = properties;
-  }
-
-  public SolrCloudManager getCloudManager() {
-    return cloudManager;
-  }
-
-  public AutoScaling.Trigger getSource() {
-    return source;
-  }
-
-  public Map<String, Object> getProperties()  {
-    return properties;
-  }
-
-  public Object getProperty(String name)  {
-    return properties != null ? properties.get(name) : null;
-  }
-
-  @Override
-  public void writeMap(EntryWriter ew) throws IOException {
-    ew.put("source", source.getName());
-    if (properties != null) {
-      for (Map.Entry<String, Object> entry : properties.entrySet()) {
-        ew.put("properties." + entry.getKey(), entry.getValue());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
deleted file mode 100644
index fdd3474..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-
-import java.io.IOException;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.NoneSuggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.impl.ClusterStateProvider;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkStateReader;
-
-public class AutoAddReplicasPlanAction extends ComputePlanAction {
-
-  @Override
-  protected Suggester getSuggester(Policy.Session session, TriggerEvent event, ActionContext context, SolrCloudManager cloudManager) throws IOException {
-    // for backward compatibility
-    ClusterStateProvider stateProvider = cloudManager.getClusterStateProvider();
-    String autoAddReplicas = stateProvider.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, (String) null);
-    if (autoAddReplicas != null && autoAddReplicas.equals("false")) {
-      return NoneSuggester.get(session);
-    }
-
-    Suggester suggester = super.getSuggester(session, event, context, cloudManager);
-    ClusterState clusterState;
-    try {
-      clusterState = stateProvider.getClusterState();
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception getting cluster state", e);
-    }
-
-    boolean anyCollections = false;
-    for (DocCollection collection: clusterState.getCollectionsMap().values()) {
-      if (collection.getAutoAddReplicas()) {
-        anyCollections = true;
-        suggester.hint(Suggester.Hint.COLL, collection.getName());
-      }
-    }
-
-    if (!anyCollections) return NoneSuggester.get(session);
-    return suggester;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
deleted file mode 100644
index 93f449a..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-
-public class AutoScaling {
-
-  /**
-   * Implementation of this interface is used for processing events generated by a trigger.
-   */
-  public interface TriggerEventProcessor {
-
-    /**
-     * This method is executed for events produced by {@link Trigger#run()}.
-     *
-     * @param event a subclass of {@link TriggerEvent}
-     * @return true if the processor was ready to perform actions on the event, false
-     * otherwise. If false was returned then callers should assume the event was discarded.
-     */
-    boolean process(TriggerEvent event);
-  }
-
-  /**
-   * Interface for a Solr trigger. Each trigger implements Runnable and Closeable interface. A trigger
-   * is scheduled using a {@link java.util.concurrent.ScheduledExecutorService} so it is executed as
-   * per a configured schedule to check whether the trigger is ready to fire. The {@link AutoScaling.Trigger#setProcessor(AutoScaling.TriggerEventProcessor)}
-   * method should be used to set a processor which is used by implementation of this class whenever
-   * ready.
-   * <p>
-   * As per the guarantees made by the {@link java.util.concurrent.ScheduledExecutorService} a trigger
-   * implementation is only ever called sequentially and therefore need not be thread safe. However, it
-   * is encouraged that implementations be immutable with the exception of the associated listener
-   * which can be get/set by a different thread than the one executing the trigger. Therefore, implementations
-   * should use appropriate synchronization around the listener.
-   * <p>
-   * When a trigger is ready to fire, it calls the {@link TriggerEventProcessor#process(TriggerEvent)} event
-   * with the proper trigger event object. If that method returns false then it should be interpreted to mean
-   * that Solr is not ready to process this trigger event and therefore we should retain the state and fire
-   * at the next invocation of the run() method.
-   */
-  public interface Trigger extends Closeable, Runnable {
-    /**
-     * Trigger name.
-     */
-    String getName();
-
-    /**
-     * Event type generated by this trigger.
-     */
-    TriggerEventType getEventType();
-
-    /** Returns true if this trigger is enabled. */
-    boolean isEnabled();
-
-    /** Trigger properties. */
-    Map<String, Object> getProperties();
-
-    /** Number of seconds to wait between fired events ("waitFor" property). */
-    int getWaitForSecond();
-
-    /** Actions to execute when event is fired. */
-    List<TriggerAction> getActions();
-
-    /** Set event processor to call when event is fired. */
-    void setProcessor(TriggerEventProcessor processor);
-
-    /** Get event processor. */
-    TriggerEventProcessor getProcessor();
-
-    /** Return true when this trigger is closed and cannot be used. */
-    boolean isClosed();
-
-    /** Set internal state of this trigger from another instance. */
-    void restoreState(Trigger old);
-
-    /** Save internal state of this trigger in ZooKeeper. */
-    void saveState();
-
-    /** Restore internal state of this trigger from ZooKeeper. */
-    void restoreState();
-
-    /**
-     * Called when trigger is created but before it's initialized or scheduled for use.
-     * This method should also verify that the trigger configuration parameters are correct. It may
-     * be called multiple times.
-     * @param properties configuration properties
-     * @throws TriggerValidationException contains details of invalid configuration parameters.
-     */
-    void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException;
-
-    /**
-     * Called before a trigger is scheduled. Any heavy object creation or initialisation should
-     * be done in this method instead of the Trigger's constructor.
-     */
-    void init() throws Exception;
-  }
-
-  /**
-   * Factory to produce instances of {@link Trigger}.
-   */
-  public static abstract class TriggerFactory implements Closeable {
-    protected boolean isClosed = false;
-
-    public abstract Trigger create(TriggerEventType type, String name, Map<String, Object> props) throws TriggerValidationException;
-
-    @Override
-    public void close() throws IOException {
-      synchronized (this) {
-        isClosed = true;
-      }
-    }
-  }
-
-  /**
-   * Default implementation of {@link TriggerFactory}.
-   */
-  public static class TriggerFactoryImpl extends TriggerFactory {
-
-    private final SolrCloudManager cloudManager;
-    private final SolrResourceLoader loader;
-
-    public TriggerFactoryImpl(SolrResourceLoader loader, SolrCloudManager cloudManager) {
-      Objects.requireNonNull(cloudManager);
-      Objects.requireNonNull(loader);
-      this.cloudManager = cloudManager;
-      this.loader = loader;
-    }
-
-    @Override
-    public synchronized Trigger create(TriggerEventType type, String name, Map<String, Object> props) throws TriggerValidationException {
-      if (isClosed) {
-        throw new AlreadyClosedException("TriggerFactory has already been closed, cannot create new triggers");
-      }
-      if (type == null) {
-        throw new IllegalArgumentException("Trigger type must not be null");
-      }
-      if (name == null || name.isEmpty()) {
-        throw new IllegalArgumentException("Trigger name must not be empty");
-      }
-      Trigger t;
-      switch (type) {
-        case NODEADDED:
-          t = new NodeAddedTrigger(name);
-          break;
-        case NODELOST:
-          t = new NodeLostTrigger(name);
-        break;
-        case SEARCHRATE:
-          t = new SearchRateTrigger(name);
-        break;
-        case METRIC:
-          t = new MetricTrigger(name);
-        break;
-        case SCHEDULED:
-          t = new ScheduledTrigger(name);
-        break;
-        case INDEXSIZE:
-          t = new IndexSizeTrigger(name);
-          break;
-        default:
-          throw new IllegalArgumentException("Unknown event type: " + type + " in trigger: " + name);
-      }
-      t.configure(loader, cloudManager, props);
-      return t;
-    }
-
-  }
-
-  public static final String AUTO_ADD_REPLICAS_TRIGGER_NAME = ".auto_add_replicas";
-
-  public static final String AUTO_ADD_REPLICAS_TRIGGER_DSL =
-      "    {" +
-      "        'name' : '" + AUTO_ADD_REPLICAS_TRIGGER_NAME + "'," +
-      "        'event' : 'nodeLost'," +
-      "        'waitFor' : -1," +
-      "        'enabled' : true," +
-      "        'actions' : [" +
-      "            {" +
-      "                'name':'auto_add_replicas_plan'," +
-      "                'class':'solr.AutoAddReplicasPlanAction'" +
-      "            }," +
-      "            {" +
-      "                'name':'execute_plan'," +
-      "                'class':'solr.ExecutePlanAction'" +
-      "            }" +
-      "        ]" +
-      "    }";
-
-  public static final Map<String, Object> AUTO_ADD_REPLICAS_TRIGGER_PROPS = (Map) Utils.fromJSONString(AUTO_ADD_REPLICAS_TRIGGER_DSL);
-
-  public static final String SCHEDULED_MAINTENANCE_TRIGGER_NAME = ".scheduled_maintenance";
-
-  public static final String SCHEDULED_MAINTENANCE_TRIGGER_DSL =
-          "    {" +
-          "        'name' : '" + SCHEDULED_MAINTENANCE_TRIGGER_NAME + "'," +
-          "        'event' : 'scheduled'," +
-          "        'startTime' : 'NOW'," +
-          "        'every' : '+1DAY'," +
-          "        'enabled' : true," +
-          "        'actions' : [" +
-          "            {" +
-          "                'name':'inactive_shard_plan'," +
-          "                'class':'solr.InactiveShardPlanAction'" +
-          "            }," +
-          "            {" +
-          "                'name':'execute_plan'," +
-          "                'class':'solr.ExecutePlanAction'" +
-          "            }" +
-          "        ]" +
-          "    }";
-
-  public static final Map<String, Object> SCHEDULED_MAINTENANCE_TRIGGER_PROPS = (Map) Utils.fromJSONString(SCHEDULED_MAINTENANCE_TRIGGER_DSL);
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
deleted file mode 100644
index 899c5cd..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
+++ /dev/null
@@ -1,698 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.Clause;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.Preference;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.MapWriter;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.RequestHandlerUtils;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.security.AuthorizationContext;
-import org.apache.solr.security.PermissionNameProvider;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static java.util.stream.Collectors.collectingAndThen;
-import static java.util.stream.Collectors.toSet;
-import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
-import static org.apache.solr.common.params.AutoScalingParams.*;
-import static org.apache.solr.common.params.CommonParams.JSON;
-
-/**
- * Handler for /cluster/autoscaling
- */
-public class AutoScalingHandler extends RequestHandlerBase implements PermissionNameProvider {
-  public static final String HANDLER_PATH = "/admin/autoscaling";
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected final SolrCloudManager cloudManager;
-  protected final SolrResourceLoader loader;
-  protected final AutoScaling.TriggerFactory triggerFactory;
-  private final List<Map<String, String>> DEFAULT_ACTIONS = new ArrayList<>(3);
-  private static Set<String> singletonCommands = Stream.of("set-cluster-preferences", "set-cluster-policy")
-      .collect(collectingAndThen(toSet(), Collections::unmodifiableSet));
-
-  private final TimeSource timeSource;
-
-  public AutoScalingHandler(SolrCloudManager cloudManager, SolrResourceLoader loader) {
-    this.cloudManager = cloudManager;
-    this.loader = loader;
-    this.triggerFactory = new AutoScaling.TriggerFactoryImpl(loader, cloudManager);
-    this.timeSource = cloudManager.getTimeSource();
-    Map<String, String> map = new HashMap<>(2);
-    map.put(NAME, "compute_plan");
-    map.put(CLASS, "solr.ComputePlanAction");
-    DEFAULT_ACTIONS.add(map);
-    map = new HashMap<>(2);
-    map.put(NAME, "execute_plan");
-    map.put(CLASS, "solr.ExecutePlanAction");
-    DEFAULT_ACTIONS.add(map);
-  }
-
-  @Override
-  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    try {
-      String httpMethod = (String) req.getContext().get("httpMethod");
-      RequestHandlerUtils.setWt(req, JSON);
-
-      if ("GET".equals(httpMethod)) {
-        String path = (String) req.getContext().get("path");
-        if (path == null) path = "/cluster/autoscaling";
-        List<String> parts = StrUtils.splitSmart(path, '/');
-        if (parts.get(0).isEmpty()) parts.remove(0);
-
-        if (parts.size() < 2 || parts.size() > 3) {
-          // invalid
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown path: " + path);
-        }
-
-        AutoScalingConfig autoScalingConf = cloudManager.getDistribStateManager().getAutoScalingConfig();
-        if (parts.size() == 2)  {
-          autoScalingConf.writeMap(new MapWriter.EntryWriter() {
-
-            @Override
-            public MapWriter.EntryWriter put(String k, Object v) throws IOException {
-              rsp.getValues().add(k, v);
-              return this;
-            }
-          });
-        } else if (parts.size() == 3) {
-          if (DIAGNOSTICS.equals(parts.get(2))) {
-            handleDiagnostics(rsp, autoScalingConf);
-          } else if (SUGGESTIONS.equals(parts.get(2))) {
-            handleSuggestions(rsp, autoScalingConf);
-          }
-        }
-      } else {
-        if (req.getContentStreams() == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No commands specified for autoscaling");
-        }
-        List<CommandOperation> ops = CommandOperation.readCommands(req.getContentStreams(), rsp.getValues(), singletonCommands);
-        if (ops == null) {
-          // errors have already been added to the response so there's nothing left to do
-          return;
-        }
-        processOps(req, rsp, ops);
-      }
-    } catch (Exception e) {
-      rsp.getValues().add("result", "failure");
-      throw e;
-    } finally {
-      RequestHandlerUtils.addExperimentalFormatWarning(rsp);
-    }
-  }
-
-
-  private void handleSuggestions(SolrQueryResponse rsp, AutoScalingConfig autoScalingConf) throws IOException {
-    rsp.getValues().add("suggestions",
-        PolicyHelper.getSuggestions(autoScalingConf, cloudManager));
-  }
-
-  public void processOps(SolrQueryRequest req, SolrQueryResponse rsp, List<CommandOperation> ops)
-      throws KeeperException, InterruptedException, IOException {
-    while (true) {
-      AutoScalingConfig initialConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-      AutoScalingConfig currentConfig = initialConfig;
-      for (CommandOperation op : ops) {
-        switch (op.name) {
-          case CMD_SET_TRIGGER:
-            currentConfig = handleSetTrigger(req, rsp, op, currentConfig);
-            break;
-          case CMD_REMOVE_TRIGGER:
-            currentConfig = handleRemoveTrigger(req, rsp, op, currentConfig);
-            break;
-          case CMD_SET_LISTENER:
-            currentConfig = handleSetListener(req, rsp, op, currentConfig);
-            break;
-          case CMD_REMOVE_LISTENER:
-            currentConfig = handleRemoveListener(req, rsp, op, currentConfig);
-            break;
-          case CMD_SUSPEND_TRIGGER:
-            currentConfig = handleSuspendTrigger(req, rsp, op, currentConfig);
-            break;
-          case CMD_RESUME_TRIGGER:
-            currentConfig = handleResumeTrigger(req, rsp, op, currentConfig);
-            break;
-          case CMD_SET_POLICY:
-            currentConfig = handleSetPolicies(req, rsp, op, currentConfig);
-            break;
-          case CMD_REMOVE_POLICY:
-            currentConfig = handleRemovePolicy(req, rsp, op, currentConfig);
-            break;
-          case CMD_SET_CLUSTER_PREFERENCES:
-            currentConfig = handleSetClusterPreferences(req, rsp, op, currentConfig);
-            break;
-          case CMD_SET_CLUSTER_POLICY:
-            currentConfig = handleSetClusterPolicy(req, rsp, op, currentConfig);
-            break;
-          case CMD_SET_PROPERTIES:
-            currentConfig = handleSetProperties(req, rsp, op, currentConfig);
-            break;
-          default:
-            op.addError("Unknown command: " + op.name);
-        }
-      }
-      List errs = CommandOperation.captureErrors(ops);
-      if (!errs.isEmpty()) {
-        throw new ApiBag.ExceptionWithErrObject(SolrException.ErrorCode.BAD_REQUEST, "Error in command payload", errs);
-      }
-
-      if (!currentConfig.equals(initialConfig)) {
-        // update in ZK
-        if (setAutoScalingConfig(currentConfig)) {
-          break;
-        } else {
-          // someone else updated the config, get the latest one and re-apply our ops
-          rsp.getValues().add("retry", "initialVersion=" + initialConfig.getZkVersion());
-          continue;
-        }
-      } else {
-        // no changes
-        break;
-      }
-    }
-    rsp.getValues().add("result", "success");
-  }
-
-  private AutoScalingConfig handleSetProperties(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op, AutoScalingConfig currentConfig) {
-    Map<String, Object> map = op.getDataMap() == null ? Collections.emptyMap() : op.getDataMap();
-    Map<String, Object> configProps = new HashMap<>(currentConfig.getProperties());
-    configProps.putAll(map);
-    // remove a key which is set to null
-    map.forEach((k, v) -> {
-      if (v == null)  configProps.remove(k);
-    });
-    return currentConfig.withProperties(configProps);
-  }
-
-  private void handleDiagnostics(SolrQueryResponse rsp, AutoScalingConfig autoScalingConf) throws IOException {
-    Policy policy = autoScalingConf.getPolicy();
-    rsp.getValues().add("diagnostics", PolicyHelper.getDiagnostics(policy, cloudManager));
-  }
-
-  private AutoScalingConfig handleSetClusterPolicy(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                                   AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException {
-    List<Map<String, Object>> clusterPolicy = (List<Map<String, Object>>) op.getCommandData();
-    if (clusterPolicy == null || !(clusterPolicy instanceof List)) {
-      op.addError("set-cluster-policy expects an array of objects");
-      return currentConfig;
-    }
-    List<Clause> cp = null;
-    try {
-      cp = clusterPolicy.stream().map(Clause::create).collect(Collectors.toList());
-    } catch (Exception e) {
-      op.addError(e.getMessage());
-      return currentConfig;
-    }
-    Policy p = currentConfig.getPolicy().withClusterPolicy(cp);
-    currentConfig = currentConfig.withPolicy(p);
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleSetClusterPreferences(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                                        AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException {
-    List<Map<String, Object>> preferences = (List<Map<String, Object>>) op.getCommandData();
-    if (preferences == null || !(preferences instanceof List)) {
-      op.addError("A list of cluster preferences not found");
-      return currentConfig;
-    }
-    List<Preference> prefs = null;
-    try {
-      prefs = preferences.stream().map(Preference::new).collect(Collectors.toList());
-    } catch (Exception e) {
-      op.addError(e.getMessage());
-      return currentConfig;
-    }
-    Policy p = currentConfig.getPolicy().withClusterPreferences(prefs);
-    currentConfig = currentConfig.withPolicy(p);
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleRemovePolicy(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                               AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException {
-    String policyName = (String) op.getVal("");
-
-    if (op.hasError()) return currentConfig;
-
-    Map<String, List<Clause>> policies = currentConfig.getPolicy().getPolicies();
-    if (policies == null || !policies.containsKey(policyName)) {
-      op.addError("No policy exists with name: " + policyName);
-      return currentConfig;
-    }
-
-    cloudManager.getClusterStateProvider().getClusterState().forEachCollection(coll -> {
-      if (policyName.equals(coll.getPolicyName()))
-        op.addError(StrUtils.formatString("policy : {0} is being used by collection {1}", policyName, coll.getName()));
-    });
-    if (op.hasError()) return currentConfig;
-    policies = new HashMap<>(policies);
-    policies.remove(policyName);
-    Policy p = currentConfig.getPolicy().withPolicies(policies);
-    currentConfig = currentConfig.withPolicy(p);
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleSetPolicies(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                              AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException {
-    Map<String, Object> policiesMap = op.getDataMap();
-    for (Map.Entry<String, Object> policy : policiesMap.entrySet()) {
-      String policyName = policy.getKey();
-      if (policyName == null || policyName.trim().length() == 0) {
-        op.addError("The policy name cannot be null or empty");
-        return currentConfig;
-      }
-    }
-    Map<String, List<Clause>> currentClauses = new HashMap<>(currentConfig.getPolicy().getPolicies());
-    Map<String, List<Clause>> newClauses = null;
-    try {
-      newClauses = Policy.clausesFromMap((Map<String, List<Map<String, Object>>>) op.getCommandData(),
-          new ArrayList<>() );
-    } catch (Exception e) {
-      op.addError(e.getMessage());
-      return currentConfig;
-    }
-    currentClauses.putAll(newClauses);
-    Policy p = currentConfig.getPolicy().withPolicies(currentClauses);
-    currentConfig = currentConfig.withPolicy(p);
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleResumeTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                                AutoScalingConfig currentConfig) throws KeeperException, InterruptedException {
-    String triggerName = op.getStr(NAME);
-    if (op.hasError()) return currentConfig;
-    Map<String, AutoScalingConfig.TriggerConfig> triggers = currentConfig.getTriggerConfigs();
-    Set<String> changed = new HashSet<>();
-    if (!Policy.EACH.equals(triggerName) && !triggers.containsKey(triggerName)) {
-      op.addError("No trigger exists with name: " + triggerName);
-      return currentConfig;
-    }
-    Map<String, AutoScalingConfig.TriggerConfig> newTriggers = new HashMap<>();
-    for (Map.Entry<String, AutoScalingConfig.TriggerConfig> entry : triggers.entrySet()) {
-      if (Policy.EACH.equals(triggerName) || triggerName.equals(entry.getKey())) {
-        AutoScalingConfig.TriggerConfig trigger = entry.getValue();
-        if (!trigger.enabled) {
-          trigger = trigger.withEnabled(true);
-          newTriggers.put(entry.getKey(), trigger);
-          changed.add(entry.getKey());
-        } else {
-          newTriggers.put(entry.getKey(), entry.getValue());
-        }
-      } else {
-        newTriggers.put(entry.getKey(), entry.getValue());
-      }
-    }
-    rsp.getValues().add("changed", changed);
-    if (!changed.isEmpty()) {
-      currentConfig = currentConfig.withTriggerConfigs(newTriggers);
-    }
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleSuspendTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                                 AutoScalingConfig currentConfig) throws KeeperException, InterruptedException {
-    String triggerName = op.getStr(NAME);
-    if (op.hasError()) return currentConfig;
-    String timeout = op.getStr(TIMEOUT, null);
-    Date resumeTime = null;
-    if (timeout != null) {
-      try {
-        int timeoutSeconds = parseHumanTime(timeout);
-        resumeTime = new Date(TimeUnit.MILLISECONDS.convert(timeSource.getTimeNs(), TimeUnit.NANOSECONDS)
-            + TimeUnit.MILLISECONDS.convert(timeoutSeconds, TimeUnit.SECONDS));
-      } catch (IllegalArgumentException e) {
-        op.addError("Invalid 'timeout' value for suspend trigger: " + triggerName);
-        return currentConfig;
-      }
-    }
-
-    Map<String, AutoScalingConfig.TriggerConfig> triggers = currentConfig.getTriggerConfigs();
-    Set<String> changed = new HashSet<>();
-
-    if (!Policy.EACH.equals(triggerName) && !triggers.containsKey(triggerName)) {
-      op.addError("No trigger exists with name: " + triggerName);
-      return currentConfig;
-    }
-    Map<String, AutoScalingConfig.TriggerConfig> newTriggers = new HashMap<>();
-    for (Map.Entry<String, AutoScalingConfig.TriggerConfig> entry : triggers.entrySet()) {
-      if (Policy.EACH.equals(triggerName) || triggerName.equals(entry.getKey())) {
-        AutoScalingConfig.TriggerConfig trigger = entry.getValue();
-        if (trigger.enabled) {
-          trigger = trigger.withEnabled(false);
-          if (resumeTime != null) {
-            trigger = trigger.withProperty(RESUME_AT, resumeTime.getTime());
-          }
-          newTriggers.put(entry.getKey(), trigger);
-          changed.add(trigger.name);
-        } else {
-          newTriggers.put(entry.getKey(), entry.getValue());
-        }
-      } else {
-        newTriggers.put(entry.getKey(), entry.getValue());
-      }
-    }
-    rsp.getValues().add("changed", changed);
-    if (!changed.isEmpty()) {
-      currentConfig = currentConfig.withTriggerConfigs(newTriggers);
-    }
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleRemoveListener(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                    AutoScalingConfig currentConfig) throws KeeperException, InterruptedException {
-    String listenerName = op.getStr(NAME);
-
-    if (op.hasError()) return currentConfig;
-    Map<String, AutoScalingConfig.TriggerListenerConfig> listeners = currentConfig.getTriggerListenerConfigs();
-    if (listeners == null || !listeners.containsKey(listenerName)) {
-      op.addError("No listener exists with name: " + listenerName);
-      return currentConfig;
-    }
-    currentConfig = currentConfig.withoutTriggerListenerConfig(listenerName);
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleSetListener(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                 AutoScalingConfig currentConfig) throws KeeperException, InterruptedException {
-    String listenerName = op.getStr(NAME);
-    String triggerName = op.getStr(TRIGGER);
-    List<String> stageNames = op.getStrs(STAGE, Collections.emptyList());
-    String listenerClass = op.getStr(CLASS);
-    List<String> beforeActions = op.getStrs(BEFORE_ACTION, Collections.emptyList());
-    List<String> afterActions = op.getStrs(AFTER_ACTION, Collections.emptyList());
-
-    if (op.hasError()) return currentConfig;
-
-    Map<String, AutoScalingConfig.TriggerConfig> triggers = currentConfig.getTriggerConfigs();
-    if (triggers == null || !triggers.containsKey(triggerName)) {
-      op.addError("A trigger with the name " + triggerName + " does not exist");
-      return currentConfig;
-    }
-    AutoScalingConfig.TriggerConfig triggerConfig = triggers.get(triggerName);
-
-    if (stageNames.isEmpty() && beforeActions.isEmpty() && afterActions.isEmpty()) {
-      op.addError("Either 'stage' or 'beforeAction' or 'afterAction' must be specified");
-      return currentConfig;
-    }
-
-    for (String stage : stageNames) {
-      try {
-        TriggerEventProcessorStage.valueOf(stage);
-      } catch (IllegalArgumentException e) {
-        op.addError("Invalid stage name: " + stage);
-      }
-    }
-    if (op.hasError()) return currentConfig;
-
-    AutoScalingConfig.TriggerListenerConfig listenerConfig = new AutoScalingConfig.TriggerListenerConfig(listenerName, op.getValuesExcluding("name"));
-
-    // validate that we can load the listener class
-    // todo allow creation from blobstore
-    TriggerListener listener = null;
-    try {
-      listener = loader.newInstance(listenerClass, TriggerListener.class);
-      listener.configure(loader, cloudManager, listenerConfig);
-    } catch (TriggerValidationException e) {
-      log.warn("invalid listener configuration", e);
-      op.addError("invalid listener configuration: " + e.toString());
-      return currentConfig;
-    } catch (Exception e) {
-      log.warn("error loading listener class ", e);
-      op.addError("Listener not found: " + listenerClass + ". error message:" + e.getMessage());
-      return currentConfig;
-    } finally {
-      if (listener != null) {
-        IOUtils.closeQuietly(listener);
-      }
-    }
-
-    Set<String> actionNames = new HashSet<>();
-    actionNames.addAll(beforeActions);
-    actionNames.addAll(afterActions);
-    for (AutoScalingConfig.ActionConfig action : triggerConfig.actions) {
-      actionNames.remove(action.name);
-    }
-    if (!actionNames.isEmpty()) {
-      op.addError("The trigger '" + triggerName + "' does not have actions named: " + actionNames);
-      return currentConfig;
-    }
-    // todo - handle races between competing set-trigger and set-listener invocations
-    currentConfig = currentConfig.withTriggerListenerConfig(listenerConfig);
-    return currentConfig;
-  }
-
-  private AutoScalingConfig handleSetTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                             AutoScalingConfig currentConfig) throws KeeperException, InterruptedException {
-    // we're going to modify the op - use a copy
-    String triggerName = op.getStr(NAME);
-    String eventTypeStr = op.getStr(EVENT);
-
-    if (op.hasError()) return currentConfig;
-    TriggerEventType eventType = TriggerEventType.valueOf(eventTypeStr.trim().toUpperCase(Locale.ROOT));
-
-    String waitForStr = op.getStr(WAIT_FOR, null);
-
-    CommandOperation opCopy = new CommandOperation(op.name, Utils.getDeepCopy((Map) op.getCommandData(), 10));
-
-    if (waitForStr != null) {
-      int seconds = 0;
-      try {
-        seconds = parseHumanTime(waitForStr);
-      } catch (IllegalArgumentException e) {
-        op.addError("Invalid 'waitFor' value '" + waitForStr + "' in trigger: " + triggerName);
-        return currentConfig;
-      }
-      opCopy.getDataMap().put(WAIT_FOR, seconds);
-    }
-
-    Integer lowerBound = op.getInt(LOWER_BOUND, null);
-    Integer upperBound = op.getInt(UPPER_BOUND, null);
-
-    List<Map<String, String>> actions = (List<Map<String, String>>) op.getVal(ACTIONS);
-    if (actions == null) {
-      actions = DEFAULT_ACTIONS;
-      opCopy.getDataMap().put(ACTIONS, actions);
-    }
-
-    // validate that we can load all the actions
-    // todo allow creation from blobstore
-    for (Map<String, String> action : actions) {
-      if (!action.containsKey(NAME) || !action.containsKey(CLASS)) {
-        op.addError("No 'name' or 'class' specified for action: " + action);
-        return currentConfig;
-      }
-      String klass = action.get(CLASS);
-      try {
-        loader.findClass(klass, TriggerAction.class);
-      } catch (Exception e) {
-        log.warn("Could not load class : ", e);
-        op.addError("Action not found: " + klass + " " + e.getMessage());
-        return currentConfig;
-      }
-    }
-    AutoScalingConfig.TriggerConfig trigger = new AutoScalingConfig.TriggerConfig(triggerName, opCopy.getValuesExcluding("name"));
-    // validate trigger config
-    AutoScaling.Trigger t = null;
-    try {
-      t = triggerFactory.create(trigger.event, trigger.name, trigger.properties);
-    } catch (Exception e) {
-      op.addError("Error validating trigger config " + trigger.name + ": " + e.toString());
-      return currentConfig;
-    } finally {
-      if (t != null) {
-        IOUtils.closeQuietly(t);
-      }
-    }
-    currentConfig = currentConfig.withTriggerConfig(trigger);
-    // check that there's a default SystemLogListener, unless user specified another one
-    return withSystemLogListener(currentConfig, triggerName);
-  }
-
-  private static String fullName = SystemLogListener.class.getName();
-  private static String solrName = "solr." + SystemLogListener.class.getSimpleName();
-
-  static AutoScalingConfig withSystemLogListener(AutoScalingConfig autoScalingConfig, String triggerName) {
-    Map<String, AutoScalingConfig.TriggerListenerConfig> configs = autoScalingConfig.getTriggerListenerConfigs();
-    for (AutoScalingConfig.TriggerListenerConfig cfg : configs.values()) {
-      if (triggerName.equals(cfg.trigger)) {
-        // already has some listener config
-        return autoScalingConfig;
-      }
-    }
-    // need to add
-    Map<String, Object> properties = new HashMap<>();
-    properties.put(AutoScalingParams.CLASS, SystemLogListener.class.getName());
-    properties.put(AutoScalingParams.TRIGGER, triggerName);
-    properties.put(AutoScalingParams.STAGE, EnumSet.allOf(TriggerEventProcessorStage.class));
-    AutoScalingConfig.TriggerListenerConfig listener =
-        new AutoScalingConfig.TriggerListenerConfig(triggerName + CollectionAdminParams.SYSTEM_COLL, properties);
-    autoScalingConfig = autoScalingConfig.withTriggerListenerConfig(listener);
-    return autoScalingConfig;
-  }
-
-  private int parseHumanTime(String timeStr) {
-    char c = timeStr.charAt(timeStr.length() - 1);
-    long timeValue = Long.parseLong(timeStr.substring(0, timeStr.length() - 1));
-    int seconds;
-    switch (c) {
-      case 'h':
-        seconds = (int) TimeUnit.HOURS.toSeconds(timeValue);
-        break;
-      case 'm':
-        seconds = (int) TimeUnit.MINUTES.toSeconds(timeValue);
-        break;
-      case 's':
-        seconds = (int) timeValue;
-        break;
-      default:
-        throw new IllegalArgumentException("Invalid time value");
-    }
-    return seconds;
-  }
-
-  private AutoScalingConfig handleRemoveTrigger(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op,
-                                   AutoScalingConfig currentConfig) throws KeeperException, InterruptedException {
-    String triggerName = op.getStr(NAME);
-    boolean removeListeners = op.getBoolean(REMOVE_LISTENERS, false);
-
-    if (op.hasError()) return currentConfig;
-    Map<String, AutoScalingConfig.TriggerConfig> triggerConfigs = currentConfig.getTriggerConfigs();
-    if (!triggerConfigs.containsKey(triggerName)) {
-      op.addError("No trigger exists with name: " + triggerName);
-      return currentConfig;
-    }
-    triggerConfigs = new HashMap<>(triggerConfigs);
-    Set<String> activeListeners = new HashSet<>();
-    Map<String, AutoScalingConfig.TriggerListenerConfig> listeners = currentConfig.getTriggerListenerConfigs();
-    for (AutoScalingConfig.TriggerListenerConfig listener : listeners.values()) {
-      if (triggerName.equals(listener.trigger)) {
-        activeListeners.add(listener.name);
-      }
-    }
-    if (!activeListeners.isEmpty()) {
-      boolean onlySystemLog = false;
-      if (activeListeners.size() == 1) {
-        AutoScalingConfig.TriggerListenerConfig cfg = listeners.get(activeListeners.iterator().next());
-        if (SystemLogListener.class.getName().equals(cfg.listenerClass) ||
-            ("solr." + SystemLogListener.class.getSimpleName()).equals(cfg.listenerClass)) {
-          onlySystemLog = true;
-        }
-      }
-      if (removeListeners || onlySystemLog) {
-        listeners = new HashMap<>(listeners);
-        listeners.keySet().removeAll(activeListeners);
-      } else {
-        op.addError("Cannot remove trigger: " + triggerName + " because it has active listeners: " + activeListeners);
-        return currentConfig;
-      }
-    }
-    triggerConfigs.remove(triggerName);
-    currentConfig = currentConfig.withTriggerConfigs(triggerConfigs).withTriggerListenerConfigs(listeners);
-    return currentConfig;
-  }
-
-
-  private boolean setAutoScalingConfig(AutoScalingConfig currentConfig) throws KeeperException, InterruptedException, IOException {
-    verifyAutoScalingConf(currentConfig);
-    try {
-      cloudManager.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(currentConfig), currentConfig.getZkVersion());
-    } catch (BadVersionException bve) {
-      // somebody else has changed the configuration so we must retry
-      return false;
-    }
-    //log.debug("-- saved version " + currentConfig.getZkVersion() + ": " + currentConfig);
-    return true;
-  }
-
-  private void verifyAutoScalingConf(AutoScalingConfig autoScalingConf) throws IOException {
-    Policy.Session session = autoScalingConf.getPolicy()
-        .createSession(cloudManager);
-    log.debug("Verified autoscaling configuration");
-  }
-
-  @Override
-  public String getDescription() {
-    return "A handler for autoscaling configuration";
-  }
-
-  @Override
-  public Name getPermissionName(AuthorizationContext request) {
-    switch (request.getHttpMethod()) {
-      case "GET":
-        return Name.AUTOSCALING_READ_PERM;
-      case "POST":
-        return Name.AUTOSCALING_WRITE_PERM;
-      default:
-        return null;
-    }
-  }
-
-  @Override
-  public Collection<Api> getApis() {
-    return ApiBag.wrapRequestHandlers(this, "autoscaling.Commands");
-  }
-
-  @Override
-  public Boolean registerV2() {
-    return Boolean.TRUE;
-  }
-
-  @Override
-  public SolrRequestHandler getSubHandler(String path) {
-    if (path.equals("/diagnostics") || path.equals("/suggestions")) return this;
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
deleted file mode 100644
index 7103bf5..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.NoneSuggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.UnsupportedSuggester;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.autoscaling.TriggerEvent.NODE_NAMES;
-
-/**
- * This class is responsible for using the configured policy and preferences
- * with the hints provided by the trigger event to compute the required cluster operations.
- * <p>
- * The cluster operations computed here are put into the {@link ActionContext}'s properties
- * with the key name "operations". The value is a List of SolrRequest objects.
- */
-public class ComputePlanAction extends TriggerActionBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  Set<String> collections = new HashSet<>();
-
-  public ComputePlanAction() {
-    super();
-    TriggerUtils.validProperties(validProperties, "collections");
-  }
-
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    String colString = (String) properties.get("collections");
-    if (colString != null && !colString.isEmpty()) {
-      collections.addAll(StrUtils.splitSmart(colString, ','));
-    }
-  }
-
-  @Override
-  public void process(TriggerEvent event, ActionContext context) throws Exception {
-    log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
-    SolrCloudManager cloudManager = context.getCloudManager();
-    try {
-      AutoScalingConfig autoScalingConf = cloudManager.getDistribStateManager().getAutoScalingConfig();
-      if (autoScalingConf.isEmpty()) {
-        throw new Exception("Action: " + getName() + " executed but no policy is configured");
-      }
-      PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(cloudManager);
-      Policy.Session session = sessionWrapper.get();
-      ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
-      if (log.isTraceEnabled()) {
-        log.trace("-- session: {}", session);
-        log.trace("-- state: {}", clusterState);
-      }
-      try {
-        Suggester suggester = getSuggester(session, event, context, cloudManager);
-        int maxOperations = getMaxNumOps(event, autoScalingConf, clusterState);
-        int requestedOperations = getRequestedNumOps(event);
-        if (requestedOperations > maxOperations) {
-          log.warn("Requested number of operations {} higher than maximum {}, adjusting...",
-              requestedOperations, maxOperations);
-        }
-        int opCount = 0;
-        int opLimit = maxOperations;
-        if (requestedOperations > 0) {
-          opLimit = requestedOperations;
-        }
-        do {
-          // computing changes in large clusters may take a long time
-          if (Thread.currentThread().isInterrupted()) {
-            throw new InterruptedException("stopping - thread was interrupted");
-          }
-          SolrRequest operation = suggester.getSuggestion();
-          opCount++;
-          // prepare suggester for the next iteration
-          if (suggester.getSession() != null) {
-            session = suggester.getSession();
-          }
-          suggester = getSuggester(session, event, context, cloudManager);
-
-          // break on first null op
-          // unless a specific number of ops was requested
-          // uncomment the following to log too many operations
-          /*if (opCount > 10) {
-            PolicyHelper.logState(cloudManager, initialSuggester);
-          }*/
-
-          if (operation == null) {
-            if (requestedOperations < 0) {
-              //uncomment the following to log zero operations
-//              PolicyHelper.logState(cloudManager, initialSuggester);
-              break;
-            } else {
-              log.info("Computed plan empty, remained " + (opCount - opLimit) + " requested ops to try.");
-              continue;
-            }
-          }
-          log.debug("Computed Plan: {}", operation.getParams());
-          if (!collections.isEmpty()) {
-            String coll = operation.getParams().get(CoreAdminParams.COLLECTION);
-            if (coll != null && !collections.contains(coll)) {
-              // discard an op that doesn't affect our collections
-              log.debug("-- discarding due to collection={} not in {}", coll, collections);
-              continue;
-            }
-          }
-          Map<String, Object> props = context.getProperties();
-          props.compute("operations", (k, v) -> {
-            List<SolrRequest> operations = (List<SolrRequest>) v;
-            if (operations == null) operations = new ArrayList<>();
-            operations.add(operation);
-            return operations;
-          });
-        } while (opCount < opLimit);
-      } finally {
-        releasePolicySession(sessionWrapper, session);
-      }
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Unexpected exception while processing event: " + event, e);
-    }
-  }
-
-  private void releasePolicySession(PolicyHelper.SessionWrapper sessionWrapper, Policy.Session session) {
-    sessionWrapper.returnSession(session);
-    sessionWrapper.release();
-
-  }
-
-  protected int getMaxNumOps(TriggerEvent event, AutoScalingConfig autoScalingConfig, ClusterState clusterState) {
-    // estimate a maximum default limit that should be sufficient for most purposes:
-    // number of nodes * total number of replicas * 3
-    AtomicInteger totalRF = new AtomicInteger();
-    clusterState.forEachCollection(coll -> {
-      Integer rf = coll.getReplicationFactor();
-      if (rf == null) {
-        if (coll.getSlices().isEmpty()) {
-          rf = 1; // ???
-        } else {
-          rf = coll.getReplicas().size() / coll.getSlices().size();
-        }
-      }
-      totalRF.addAndGet(rf * coll.getSlices().size());
-    });
-    int totalMax = clusterState.getLiveNodes().size() * totalRF.get() * 3;
-    int maxOp = (Integer) autoScalingConfig.getProperties().getOrDefault(AutoScalingParams.MAX_COMPUTE_OPERATIONS, totalMax);
-    Object o = event.getProperty(AutoScalingParams.MAX_COMPUTE_OPERATIONS, maxOp);
-    try {
-      return Integer.parseInt(String.valueOf(o));
-    } catch (Exception e) {
-      log.warn("Invalid '" + AutoScalingParams.MAX_COMPUTE_OPERATIONS + "' event property: " + o + ", using default " + maxOp);
-      return maxOp;
-    }
-  }
-
-  protected int getRequestedNumOps(TriggerEvent event) {
-    Collection<TriggerEvent.Op> ops = (Collection<TriggerEvent.Op>) event.getProperty(TriggerEvent.REQUESTED_OPS, Collections.emptyList());
-    if (ops.isEmpty()) {
-      return -1;
-    } else {
-      return ops.size();
-    }
-  }
-
-  private static final String START = "__start__";
-
-  protected Suggester getSuggester(Policy.Session session, TriggerEvent event, ActionContext context, SolrCloudManager cloudManager) throws IOException {
-    Suggester suggester;
-    switch (event.getEventType()) {
-      case NODEADDED:
-        suggester = getNodeAddedSuggester(cloudManager, session, event);
-        break;
-      case NODELOST:
-        String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-        CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
-        switch (action) {
-          case MOVEREPLICA:
-            suggester = session.getSuggester(action)
-                .hint(Suggester.Hint.SRC_NODE, event.getProperty(NODE_NAMES));
-            break;
-          case DELETENODE:
-            int start = (Integer)event.getProperty(START, 0);
-            List<String> srcNodes = (List<String>) event.getProperty(NODE_NAMES);
-            if (srcNodes.isEmpty() || start >= srcNodes.size()) {
-              return NoneSuggester.get(session);
-            }
-            String sourceNode = srcNodes.get(start);
-            suggester = session.getSuggester(action)
-                .hint(Suggester.Hint.SRC_NODE, Collections.singletonList(sourceNode));
-            event.getProperties().put(START, ++start);
-            break;
-          case NONE:
-            return NoneSuggester.get(session);
-          default:
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unsupported preferredOperation: " + action.toLower() + " specified for node lost trigger");
-        }
-        break;
-      case SEARCHRATE:
-      case METRIC:
-      case INDEXSIZE:
-        List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>)event.getProperty(TriggerEvent.REQUESTED_OPS, Collections.emptyList());
-        int start = (Integer)event.getProperty(START, 0);
-        if (ops.isEmpty() || start >= ops.size()) {
-          return NoneSuggester.get(session);
-        }
-        TriggerEvent.Op op = ops.get(start);
-        suggester = session.getSuggester(op.getAction());
-        if (suggester instanceof UnsupportedSuggester) {
-          List<TriggerEvent.Op> unsupportedOps = (List<TriggerEvent.Op>)context.getProperties().computeIfAbsent(TriggerEvent.UNSUPPORTED_OPS, k -> new ArrayList<TriggerEvent.Op>());
-          unsupportedOps.add(op);
-        }
-        for (Map.Entry<Suggester.Hint, Object> e : op.getHints().entrySet()) {
-          suggester = suggester.hint(e.getKey(), e.getValue());
-        }
-        suggester = suggester.forceOperation(true);
-        event.getProperties().put(START, ++start);
-        break;
-      case SCHEDULED:
-        preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-        action = CollectionParams.CollectionAction.get(preferredOp);
-        suggester = session.getSuggester(action);
-        break;
-      default:
-        throw new UnsupportedOperationException("No support for events other than nodeAdded, nodeLost, searchRate, metric, scheduled and indexSize. Received: " + event.getEventType());
-    }
-    return suggester;
-  }
-
-  private Suggester getNodeAddedSuggester(SolrCloudManager cloudManager, Policy.Session session, TriggerEvent event) throws IOException {
-    String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-    CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
-
-    Suggester suggester = session.getSuggester(action)
-        .hint(Suggester.Hint.TARGET_NODE, event.getProperty(NODE_NAMES));
-    switch (action) {
-      case ADDREPLICA:
-        // add all collection/shard pairs and let policy engine figure out which one
-        // to place on the target node
-        // todo in future we can prune ineligible collection/shard pairs
-        ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
-        Set<Pair<String, String>> collShards = new HashSet<>();
-        clusterState.getCollectionStates().forEach((collectionName, collectionRef) -> {
-          DocCollection docCollection = collectionRef.get();
-          if (docCollection != null)  {
-            docCollection.getActiveSlices().stream()
-                .map(slice -> new Pair<>(collectionName, slice.getName()))
-                .forEach(collShards::add);
-          }
-        });
-        suggester.hint(Suggester.Hint.COLL_SHARD, collShards);
-        break;
-      case MOVEREPLICA:
-      case NONE:
-        break;
-      default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Unsupported preferredOperation=" + preferredOp + " for node added event");
-    }
-    return suggester;
-  }
-}


[43/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java b/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
deleted file mode 100644
index 7acdfef..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.TreeSet;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Predicate;
-
-import com.codahale.metrics.Timer;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkCmdExecutor;
-import org.apache.solr.common.util.Pair;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.Op;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A ZK-based distributed queue. Optimized for single-consumer,
- * multiple-producer: if there are multiple consumers on the same ZK queue,
- * the results should be correct but inefficient
- */
-public class ZkDistributedQueue implements DistributedQueue {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  static final String PREFIX = "qn-";
-
-  /**
-   * Theory of operation:
-   * <p>
-   * Under ordinary circumstances we neither watch nor poll for children in ZK.
-   * Instead we keep an in-memory list of known child names.  When the in-memory
-   * list is exhausted, we then fetch from ZK.
-   * <p>
-   * We only bother setting a child watcher when the queue has no children in ZK.
-   */
-  private static final Object _IMPLEMENTATION_NOTES = null;
-
-  final String dir;
-
-  final SolrZkClient zookeeper;
-
-  final Stats stats;
-
-  /**
-   * A lock that guards all of the mutable state that follows.
-   */
-  private final ReentrantLock updateLock = new ReentrantLock();
-
-  /**
-   * Contains the last set of children fetched from ZK. Elements are removed from the head of
-   * this in-memory set as they are consumed from the queue.  Due to the distributed nature
-   * of the queue, elements may appear in this set whose underlying nodes have been consumed in ZK.
-   * Therefore, methods like {@link #peek()} have to double-check actual node existence, and methods
-   * like {@link #poll()} must resolve any races by attempting to delete the underlying node.
-   */
-  private TreeSet<String> knownChildren = new TreeSet<>();
-
-  /**
-   * Used to wait on ZK changes to the child list; you must hold {@link #updateLock} before waiting on this condition.
-   */
-  private final Condition changed = updateLock.newCondition();
-
-  private boolean isDirty = true;
-
-  private int watcherCount = 0;
-
-  private final int maxQueueSize;
-
-  /**
-   * If {@link #maxQueueSize} is set, the number of items we can queue without rechecking the server.
-   */
-  private final AtomicInteger offerPermits = new AtomicInteger(0);
-
-  public ZkDistributedQueue(SolrZkClient zookeeper, String dir) {
-    this(zookeeper, dir, new Stats());
-  }
-
-  public ZkDistributedQueue(SolrZkClient zookeeper, String dir, Stats stats) {
-    this(zookeeper, dir, stats, 0);
-  }
-
-  public ZkDistributedQueue(SolrZkClient zookeeper, String dir, Stats stats, int maxQueueSize) {
-    this.dir = dir;
-
-    ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zookeeper.getZkClientTimeout());
-    try {
-      cmdExecutor.ensureExists(dir, zookeeper);
-    } catch (KeeperException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new SolrException(ErrorCode.SERVER_ERROR, e);
-    }
-
-    this.zookeeper = zookeeper;
-    this.stats = stats;
-    this.maxQueueSize = maxQueueSize;
-  }
-
-  /**
-   * Returns the data at the first element of the queue, or null if the queue is
-   * empty.
-   *
-   * @return data at the first element of the queue, or null.
-   */
-  @Override
-  public byte[] peek() throws KeeperException, InterruptedException {
-    Timer.Context time = stats.time(dir + "_peek");
-    try {
-      return firstElement();
-    } finally {
-      time.stop();
-    }
-  }
-
-  /**
-   * Returns the data at the first element of the queue, or null if the queue is
-   * empty and block is false.
-   *
-   * @param block if true, blocks until an element enters the queue
-   * @return data at the first element of the queue, or null.
-   */
-  @Override
-  public byte[] peek(boolean block) throws KeeperException, InterruptedException {
-    return block ? peek(Long.MAX_VALUE) : peek();
-  }
-
-  /**
-   * Returns the data at the first element of the queue, or null if the queue is
-   * empty after wait ms.
-   *
-   * @param wait max wait time in ms.
-   * @return data at the first element of the queue, or null.
-   */
-  @Override
-  public byte[] peek(long wait) throws KeeperException, InterruptedException {
-    Preconditions.checkArgument(wait > 0);
-    Timer.Context time;
-    if (wait == Long.MAX_VALUE) {
-      time = stats.time(dir + "_peek_wait_forever");
-    } else {
-      time = stats.time(dir + "_peek_wait" + wait);
-    }
-    updateLock.lockInterruptibly();
-    try {
-      long waitNanos = TimeUnit.MILLISECONDS.toNanos(wait);
-      while (waitNanos > 0) {
-        byte[] result = firstElement();
-        if (result != null) {
-          return result;
-        }
-        waitNanos = changed.awaitNanos(waitNanos);
-      }
-      return null;
-    } finally {
-      updateLock.unlock();
-      time.stop();
-    }
-  }
-
-  /**
-   * Attempts to remove the head of the queue and return it. Returns null if the
-   * queue is empty.
-   *
-   * @return Head of the queue or null.
-   */
-  @Override
-  public byte[] poll() throws KeeperException, InterruptedException {
-    Timer.Context time = stats.time(dir + "_poll");
-    try {
-      return removeFirst();
-    } finally {
-      time.stop();
-    }
-  }
-
-  /**
-   * Attempts to remove the head of the queue and return it.
-   *
-   * @return The former head of the queue
-   */
-  @Override
-  public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException {
-    Timer.Context time = stats.time(dir + "_remove");
-    try {
-      byte[] result = removeFirst();
-      if (result == null) {
-        throw new NoSuchElementException();
-      }
-      return result;
-    } finally {
-      time.stop();
-    }
-  }
-
-  public void remove(Collection<String> paths) throws KeeperException, InterruptedException {
-    if (paths.isEmpty()) return;
-    List<Op> ops = new ArrayList<>();
-    for (String path : paths) {
-      ops.add(Op.delete(dir + "/" + path, -1));
-    }
-    for (int from = 0; from < ops.size(); from += 1000) {
-      int to = Math.min(from + 1000, ops.size());
-      if (from < to) {
-        try {
-          zookeeper.multi(ops.subList(from, to), true);
-        } catch (KeeperException.NoNodeException e) {
-          // don't know which nodes are not exist, so try to delete one by one node
-          for (int j = from; j < to; j++) {
-            try {
-              zookeeper.delete(ops.get(j).getPath(), -1, true);
-            } catch (KeeperException.NoNodeException e2) {
-              log.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
-            }
-          }
-        }
-      }
-    }
-
-    int cacheSizeBefore = knownChildren.size();
-    knownChildren.removeAll(paths);
-    if (cacheSizeBefore - paths.size() == knownChildren.size() && knownChildren.size() != 0) {
-      stats.setQueueLength(knownChildren.size());
-    } else {
-      // There are elements get deleted but not present in the cache,
-      // the cache seems not valid anymore
-      knownChildren.clear();
-      isDirty = true;
-    }
-  }
-
-  /**
-   * Removes the head of the queue and returns it, blocks until it succeeds.
-   *
-   * @return The former head of the queue
-   */
-  @Override
-  public byte[] take() throws KeeperException, InterruptedException {
-    // Same as for element. Should refactor this.
-    Timer.Context timer = stats.time(dir + "_take");
-    updateLock.lockInterruptibly();
-    try {
-      while (true) {
-        byte[] result = removeFirst();
-        if (result != null) {
-          return result;
-        }
-        changed.await();
-      }
-    } finally {
-      updateLock.unlock();
-      timer.stop();
-    }
-  }
-
-  /**
-   * Inserts data into queue.  If there are no other queue consumers, the offered element
-   * will be immediately visible when this method returns.
-   */
-  @Override
-  public void offer(byte[] data) throws KeeperException, InterruptedException {
-    Timer.Context time = stats.time(dir + "_offer");
-    try {
-      while (true) {
-        try {
-          if (maxQueueSize > 0) {
-            if (offerPermits.get() <= 0 || offerPermits.getAndDecrement() <= 0) {
-              // If a max queue size is set, check it before creating a new queue item.
-              Stat stat = zookeeper.exists(dir, null, true);
-              if (stat == null) {
-                // jump to the code below, which tries to create dir if it doesn't exist
-                throw new KeeperException.NoNodeException();
-              }
-              int remainingCapacity = maxQueueSize - stat.getNumChildren();
-              if (remainingCapacity <= 0) {
-                throw new IllegalStateException("queue is full");
-              }
-
-              // Allow this client to push up to 1% of the remaining queue capacity without rechecking.
-              offerPermits.set(remainingCapacity / 100);
-            }
-          }
-
-          // Explicitly set isDirty here so that synchronous same-thread calls behave as expected.
-          // This will get set again when the watcher actually fires, but that's ok.
-          zookeeper.create(dir + "/" + PREFIX, data, CreateMode.PERSISTENT_SEQUENTIAL, true);
-          isDirty = true;
-          return;
-        } catch (KeeperException.NoNodeException e) {
-          try {
-            zookeeper.create(dir, new byte[0], CreateMode.PERSISTENT, true);
-          } catch (KeeperException.NodeExistsException ne) {
-            // someone created it
-          }
-        }
-      }
-    } finally {
-      time.stop();
-    }
-  }
-
-  public Stats getZkStats() {
-    return stats;
-  }
-
-  @Override
-  public Map<String, Object> getStats() {
-    if (stats == null) {
-      return Collections.emptyMap();
-    }
-    Map<String, Object> res = new HashMap<>();
-    res.put("queueLength", stats.getQueueLength());
-    final Map<String, Object> statsMap = new HashMap<>();
-    res.put("stats", statsMap);
-    stats.getStats().forEach((op, stat) -> {
-      final Map<String, Object> statMap = new HashMap<>();
-      statMap.put("success", stat.success.get());
-      statMap.put("errors", stat.errors.get());
-      final List<Map<String, Object>> failed = new ArrayList<>(stat.failureDetails.size());
-      statMap.put("failureDetails", failed);
-      stat.failureDetails.forEach(failedOp -> {
-        Map<String, Object> fo = new HashMap<>();
-        fo.put("req", failedOp.req);
-        fo.put("resp", failedOp.resp);
-      });
-      statsMap.put(op, statMap);
-    });
-    return res;
-  }
-
-  /**
-   * Returns the name if the first known child node, or {@code null} if the queue is empty.
-   * This is the only place {@link #knownChildren} is ever updated!
-   * The caller must double check that the actual node still exists, since the in-memory
-   * list is inherently stale.
-   */
-  private String firstChild(boolean remove, boolean refetchIfDirty) throws KeeperException, InterruptedException {
-    updateLock.lockInterruptibly();
-    try {
-      // We always return from cache first, the cache will be cleared if the node is not exist
-      if (!knownChildren.isEmpty() && !(isDirty && refetchIfDirty)) {
-        return remove ? knownChildren.pollFirst() : knownChildren.first();
-      }
-
-      if (!isDirty && knownChildren.isEmpty()) {
-        return null;
-      }
-
-      // Dirty, try to fetch an updated list of children from ZK.
-      // Only set a new watcher if there isn't already a watcher.
-      ChildWatcher newWatcher = (watcherCount == 0) ? new ChildWatcher() : null;
-      knownChildren = fetchZkChildren(newWatcher);
-      if (newWatcher != null) {
-        watcherCount++; // watcher was successfully set
-      }
-      isDirty = false;
-      if (knownChildren.isEmpty()) {
-        return null;
-      }
-      changed.signalAll();
-      return remove ? knownChildren.pollFirst() : knownChildren.first();
-    } finally {
-      updateLock.unlock();
-    }
-  }
-
-  /**
-   * Return the current set of children from ZK; does not change internal state.
-   */
-  TreeSet<String> fetchZkChildren(Watcher watcher) throws InterruptedException, KeeperException {
-    while (true) {
-      try {
-        TreeSet<String> orderedChildren = new TreeSet<>();
-
-        List<String> childNames = zookeeper.getChildren(dir, watcher, true);
-        stats.setQueueLength(childNames.size());
-        for (String childName : childNames) {
-          // Check format
-          if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
-            log.debug("Found child node with improper name: " + childName);
-            continue;
-          }
-          orderedChildren.add(childName);
-        }
-        return orderedChildren;
-      } catch (KeeperException.NoNodeException e) {
-        zookeeper.makePath(dir, false, true);
-        // go back to the loop and try again
-      }
-    }
-  }
-
-  /**
-   * Return the currently-known set of elements, using child names from memory. If no children are found, or no
-   * children pass {@code acceptFilter}, waits up to {@code waitMillis} for at least one child to become available.
-   * <p>
-   * Package-private to support {@link OverseerTaskQueue} specifically.</p>
-   */
-  @Override
-  public Collection<Pair<String, byte[]>> peekElements(int max, long waitMillis, Predicate<String> acceptFilter) throws KeeperException, InterruptedException {
-    List<String> foundChildren = new ArrayList<>();
-    long waitNanos = TimeUnit.MILLISECONDS.toNanos(waitMillis);
-    boolean first = true;
-    while (true) {
-      // Trigger a refresh, but only force it if this is not the first iteration.
-      firstChild(false, !first);
-
-      updateLock.lockInterruptibly();
-      try {
-        for (String child : knownChildren) {
-          if (acceptFilter.test(child)) {
-            foundChildren.add(child);
-          }
-        }
-        if (!foundChildren.isEmpty()) {
-          break;
-        }
-        if (waitNanos <= 0) {
-          break;
-        }
-
-        // If this is our first time through, force a refresh before waiting.
-        if (first) {
-          first = false;
-          continue;
-        }
-
-        waitNanos = changed.awaitNanos(waitNanos);
-      } finally {
-        updateLock.unlock();
-      }
-
-      if (!foundChildren.isEmpty()) {
-        break;
-      }
-    }
-
-    // Technically we could restart the method if we fail to actually obtain any valid children
-    // from ZK, but this is a super rare case, and the latency of the ZK fetches would require
-    // much more sophisticated waitNanos tracking.
-    List<Pair<String, byte[]>> result = new ArrayList<>();
-    for (String child : foundChildren) {
-      if (result.size() >= max) {
-        break;
-      }
-      try {
-        byte[] data = zookeeper.getData(dir + "/" + child, null, null, true);
-        result.add(new Pair<>(child, data));
-      } catch (KeeperException.NoNodeException e) {
-        // Another client deleted the node first, remove the in-memory and continue.
-        updateLock.lockInterruptibly();
-        try {
-          knownChildren.remove(child);
-        } finally {
-          updateLock.unlock();
-        }
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Return the head of the queue without modifying the queue.
-   *
-   * @return the data at the head of the queue.
-   */
-  private byte[] firstElement() throws KeeperException, InterruptedException {
-    while (true) {
-      String firstChild = firstChild(false, false);
-      if (firstChild == null) {
-        return null;
-      }
-      try {
-        return zookeeper.getData(dir + "/" + firstChild, null, null, true);
-      } catch (KeeperException.NoNodeException e) {
-        // Another client deleted the node first, remove the in-memory and retry.
-        updateLock.lockInterruptibly();
-        try {
-          // Efficient only for single-consumer
-          knownChildren.clear();
-          isDirty = true;
-        } finally {
-          updateLock.unlock();
-        }
-      }
-    }
-  }
-
-  private byte[] removeFirst() throws KeeperException, InterruptedException {
-    while (true) {
-      String firstChild = firstChild(true, false);
-      if (firstChild == null) {
-        return null;
-      }
-      try {
-        String path = dir + "/" + firstChild;
-        byte[] result = zookeeper.getData(path, null, null, true);
-        zookeeper.delete(path, -1, true);
-        stats.setQueueLength(knownChildren.size());
-        return result;
-      } catch (KeeperException.NoNodeException e) {
-        // Another client deleted the node first, remove the in-memory and retry.
-        updateLock.lockInterruptibly();
-        try {
-          // Efficient only for single-consumer
-          knownChildren.clear();
-          isDirty = true;
-        } finally {
-          updateLock.unlock();
-        }
-      }
-    }
-  }
-
-  @VisibleForTesting int watcherCount() throws InterruptedException {
-    updateLock.lockInterruptibly();
-    try {
-      return watcherCount;
-    } finally {
-      updateLock.unlock();
-    }
-  }
-
-  @VisibleForTesting boolean isDirty() throws InterruptedException {
-    updateLock.lockInterruptibly();
-    try {
-      return isDirty;
-    } finally {
-      updateLock.unlock();
-    }
-  }
-
-  @VisibleForTesting class ChildWatcher implements Watcher {
-
-    @Override
-    public void process(WatchedEvent event) {
-      // session events are not change events, and do not remove the watcher; except for Expired
-      if (Event.EventType.None.equals(event.getType()) && !Event.KeeperState.Expired.equals(event.getState())) {
-        return;
-      }
-      updateLock.lock();
-      try {
-        isDirty = true;
-        watcherCount--;
-        // optimistically signal any waiters that the queue may not be empty now, so they can wake up and retry
-        changed.signalAll();
-      } finally {
-        updateLock.unlock();
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueueFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueueFactory.java b/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueueFactory.java
deleted file mode 100644
index 1cf79e5..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueueFactory.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
-import org.apache.solr.common.cloud.SolrZkClient;
-
-/**
- * Implementation of {@link DistributedQueueFactory} that uses ZooKeeper.
- */
-public class ZkDistributedQueueFactory implements DistributedQueueFactory {
-  private final SolrZkClient zkClient;
-
-  public ZkDistributedQueueFactory(SolrZkClient zkClient) {
-    this.zkClient = zkClient;
-  }
-  @Override
-  public DistributedQueue makeQueue(String path) throws IOException {
-    return new ZkDistributedQueue(zkClient, path);
-  }
-
-  @Override
-  public void removeQueue(String path) throws IOException {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
deleted file mode 100644
index bcbb347..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
+++ /dev/null
@@ -1,627 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.ObjectReleaseTracker;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class used for interact with a ZK term node.
- * Each ZK term node relates to a shard of a collection and have this format (in json)
- * <p>
- * <code>
- * {
- *   "replicaNodeName1" : 1,
- *   "replicaNodeName2" : 2,
- *   ..
- * }
- * </code>
- * <p>
- * The values correspond to replicas are called terms.
- * Only replicas with highest term value are considered up to date and be able to become leader and serve queries.
- * <p>
- * Terms can only updated in two strict ways:
- * <ul>
- * <li>A replica sets its term equals to leader's term
- * <li>The leader increase its term and some other replicas by 1
- * </ul>
- * This class should not be reused after {@link org.apache.zookeeper.Watcher.Event.KeeperState#Expired} event
- */
-public class ZkShardTerms implements AutoCloseable{
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final Object writingLock = new Object();
-  private final String collection;
-  private final String shard;
-  private final String znodePath;
-  private final SolrZkClient zkClient;
-  private final Set<CoreTermWatcher> listeners = new HashSet<>();
-  private final AtomicBoolean isClosed = new AtomicBoolean(false);
-
-  private Terms terms;
-
-  // Listener of a core for shard's term change events
-  interface CoreTermWatcher {
-    // return true if the listener wanna to be triggered in the next time
-    boolean onTermChanged(Terms terms);
-  }
-
-  public ZkShardTerms(String collection, String shard, SolrZkClient zkClient) {
-    this.znodePath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/terms/" + shard;
-    this.collection = collection;
-    this.shard = shard;
-    this.zkClient = zkClient;
-    ensureTermNodeExist();
-    refreshTerms();
-    retryRegisterWatcher();
-    ObjectReleaseTracker.track(this);
-  }
-
-  /**
-   * Ensure that leader's term is higher than some replica's terms
-   * @param leader coreNodeName of leader
-   * @param replicasNeedingRecovery set of replicas in which their terms should be lower than leader's term
-   */
-  public void ensureTermsIsHigher(String leader, Set<String> replicasNeedingRecovery) {
-    if (replicasNeedingRecovery.isEmpty()) return;
-
-    Terms newTerms;
-    while( (newTerms = terms.increaseTerms(leader, replicasNeedingRecovery)) != null) {
-      if (forceSaveTerms(newTerms)) return;
-    }
-  }
-
-  /**
-   * Can this replica become leader?
-   * @param coreNodeName of the replica
-   * @return true if this replica can become leader, false if otherwise
-   */
-  public boolean canBecomeLeader(String coreNodeName) {
-    return terms.canBecomeLeader(coreNodeName);
-  }
-
-  /**
-   * Should leader skip sending updates to this replica?
-   * @param coreNodeName of the replica
-   * @return true if this replica has term equals to leader's term, false if otherwise
-   */
-  public boolean skipSendingUpdatesTo(String coreNodeName) {
-    return !terms.haveHighestTermValue(coreNodeName);
-  }
-
-  /**
-   * Did this replica registered its term? This is a sign to check f
-   * @param coreNodeName of the replica
-   * @return true if this replica registered its term, false if otherwise
-   */
-  public boolean registered(String coreNodeName) {
-    return terms.getTerm(coreNodeName) != null;
-  }
-
-  public void close() {
-    // no watcher will be registered
-    isClosed.set(true);
-    synchronized (listeners) {
-      listeners.clear();
-    }
-    ObjectReleaseTracker.release(this);
-  }
-
-  // package private for testing, only used by tests
-  Map<String, Long> getTerms() {
-    synchronized (writingLock) {
-      return new HashMap<>(terms.values);
-    }
-  }
-
-  /**
-   * Add a listener so the next time the shard's term get updated, listeners will be called
-   */
-  void addListener(CoreTermWatcher listener) {
-    synchronized (listeners) {
-      listeners.add(listener);
-    }
-  }
-
-  /**
-   * Remove the coreNodeName from terms map and also remove any expired listeners
-   * @return Return true if this object should not be reused
-   */
-  boolean removeTerm(CoreDescriptor cd) {
-    int numListeners;
-    synchronized (listeners) {
-      // solrcore already closed
-      listeners.removeIf(coreTermWatcher -> !coreTermWatcher.onTermChanged(terms));
-      numListeners = listeners.size();
-    }
-    return removeTerm(cd.getCloudDescriptor().getCoreNodeName()) || numListeners == 0;
-  }
-
-  // package private for testing, only used by tests
-  // return true if this object should not be reused
-  boolean removeTerm(String coreNodeName) {
-    Terms newTerms;
-    while ( (newTerms = terms.removeTerm(coreNodeName)) != null) {
-      try {
-        if (saveTerms(newTerms)) return false;
-      } catch (KeeperException.NoNodeException e) {
-        return true;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Register a replica's term (term value will be 0).
-   * If a term is already associate with this replica do nothing
-   * @param coreNodeName of the replica
-   */
-  void registerTerm(String coreNodeName) {
-    Terms newTerms;
-    while ( (newTerms = terms.registerTerm(coreNodeName)) != null) {
-      if (forceSaveTerms(newTerms)) break;
-    }
-  }
-
-  /**
-   * Set a replica's term equals to leader's term, and remove recovering flag of a replica.
-   * This call should only be used by {@link org.apache.solr.common.params.CollectionParams.CollectionAction#FORCELEADER}
-   * @param coreNodeName of the replica
-   */
-  public void setTermEqualsToLeader(String coreNodeName) {
-    Terms newTerms;
-    while ( (newTerms = terms.setTermEqualsToLeader(coreNodeName)) != null) {
-      if (forceSaveTerms(newTerms)) break;
-    }
-  }
-
-  public void setTermToZero(String coreNodeName) {
-    Terms newTerms;
-    while ( (newTerms = terms.setTermToZero(coreNodeName)) != null) {
-      if (forceSaveTerms(newTerms)) break;
-    }
-  }
-
-  /**
-   * Mark {@code coreNodeName} as recovering
-   */
-  public void startRecovering(String coreNodeName) {
-    Terms newTerms;
-    while ( (newTerms = terms.startRecovering(coreNodeName)) != null) {
-      if (forceSaveTerms(newTerms)) break;
-    }
-  }
-
-  /**
-   * Mark {@code coreNodeName} as finished recovering
-   */
-  public void doneRecovering(String coreNodeName) {
-    Terms newTerms;
-    while ( (newTerms = terms.doneRecovering(coreNodeName)) != null) {
-      if (forceSaveTerms(newTerms)) break;
-    }
-  }
-
-  public boolean isRecovering(String name) {
-    return terms.values.containsKey(name + "_recovering");
-  }
-
-
-  /**
-   * When first updates come in, all replicas have some data now,
-   * so we must switch from term 0 (registered) to 1 (have some data)
-   */
-  public void ensureHighestTermsAreNotZero() {
-    Terms newTerms;
-    while ( (newTerms = terms.ensureHighestTermsAreNotZero()) != null) {
-      if (forceSaveTerms(newTerms)) break;
-    }
-  }
-
-  public long getHighestTerm() {
-    return terms.getMaxTerm();
-  }
-
-  public long getTerm(String coreNodeName) {
-    Long term = terms.getTerm(coreNodeName);
-    return term == null? -1 : term;
-  }
-
-  // package private for testing, only used by tests
-  int getNumListeners() {
-    synchronized (listeners) {
-      return listeners.size();
-    }
-  }
-
-  /**
-   * Set new terms to ZK.
-   * In case of correspond ZK term node is not created, create it
-   * @param newTerms to be set
-   * @return true if terms is saved successfully to ZK, false if otherwise
-   */
-  private boolean forceSaveTerms(Terms newTerms) {
-    try {
-      return saveTerms(newTerms);
-    } catch (KeeperException.NoNodeException e) {
-      ensureTermNodeExist();
-      return false;
-    }
-  }
-
-  /**
-   * Set new terms to ZK, the version of new terms must match the current ZK term node
-   * @param newTerms to be set
-   * @return true if terms is saved successfully to ZK, false if otherwise
-   * @throws KeeperException.NoNodeException correspond ZK term node is not created
-   */
-  private boolean saveTerms(Terms newTerms) throws KeeperException.NoNodeException {
-    byte[] znodeData = Utils.toJSON(newTerms.values);
-    try {
-      Stat stat = zkClient.setData(znodePath, znodeData, newTerms.version, true);
-      setNewTerms(new Terms(newTerms.values, stat.getVersion()));
-      log.info("Successful update of terms at {} to {}", znodePath, newTerms);
-      return true;
-    } catch (KeeperException.BadVersionException e) {
-      log.info("Failed to save terms, version is not a match, retrying");
-      refreshTerms();
-    } catch (KeeperException.NoNodeException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error while saving shard term for collection: " + collection, e);
-    }
-    return false;
-  }
-
-  /**
-   * Create correspond ZK term node
-   */
-  private void ensureTermNodeExist() {
-    String path = "/collections/"+collection+ "/terms";
-    try {
-      if (!zkClient.exists(path, true)) {
-        try {
-          zkClient.makePath(path, true);
-        } catch (KeeperException.NodeExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-      path += "/"+shard;
-      if (!zkClient.exists(path, true)) {
-        try {
-          Map<String, Long> initialTerms = new HashMap<>();
-          zkClient.create(path, Utils.toJSON(initialTerms), CreateMode.PERSISTENT, true);
-        } catch (KeeperException.NodeExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-    }  catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating shard term node in Zookeeper for collection: " + collection, e);
-    } catch (KeeperException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating shard term node in Zookeeper for collection: " + collection, e);
-    }
-  }
-
-  /**
-   * Fetch latest terms from ZK
-   */
-  public void refreshTerms() {
-    Terms newTerms;
-    try {
-      Stat stat = new Stat();
-      byte[] data = zkClient.getData(znodePath, null, stat, true);
-      newTerms = new Terms((Map<String, Long>) Utils.fromJSON(data), stat.getVersion());
-    } catch (KeeperException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error updating shard term for collection: " + collection, e);
-    } catch (InterruptedException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error updating shard term for collection: " + collection, e);
-    }
-
-    setNewTerms(newTerms);
-  }
-
-  /**
-   * Retry register a watcher to the correspond ZK term node
-   */
-  private void retryRegisterWatcher() {
-    while (!isClosed.get()) {
-      try {
-        registerWatcher();
-        return;
-      } catch (KeeperException.SessionExpiredException | KeeperException.AuthFailedException e) {
-        isClosed.set(true);
-        log.error("Failed watching shard term for collection: {} due to unrecoverable exception", collection, e);
-        return;
-      } catch (KeeperException e) {
-        log.warn("Failed watching shard term for collection: {}, retrying!", collection, e);
-        try {
-          zkClient.getConnectionManager().waitForConnected(zkClient.getZkClientTimeout());
-        } catch (TimeoutException te) {
-          if (Thread.interrupted()) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error watching shard term for collection: " + collection, te);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Register a watcher to the correspond ZK term node
-   */
-  private void registerWatcher() throws KeeperException {
-    Watcher watcher = event -> {
-      // session events are not change events, and do not remove the watcher
-      if (Watcher.Event.EventType.None == event.getType()) {
-        return;
-      }
-      retryRegisterWatcher();
-      // Some events may be missed during register a watcher, so it is safer to refresh terms after registering watcher
-      refreshTerms();
-    };
-    try {
-      // exists operation is faster than getData operation
-      zkClient.exists(znodePath, watcher, true);
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error watching shard term for collection: " + collection, e);
-    }
-  }
-
-
-  /**
-   * Atomically update {@link ZkShardTerms#terms} and call listeners
-   * @param newTerms to be set
-   */
-  private void setNewTerms(Terms newTerms) {
-    boolean isChanged = false;
-    synchronized (writingLock) {
-      if (terms == null || newTerms.version > terms.version) {
-        terms = newTerms;
-        isChanged = true;
-      }
-    }
-    if (isChanged) onTermUpdates(newTerms);
-  }
-
-  private void onTermUpdates(Terms newTerms) {
-    synchronized (listeners) {
-      listeners.removeIf(coreTermWatcher -> !coreTermWatcher.onTermChanged(newTerms));
-    }
-  }
-
-  /**
-   * Hold values of terms, this class is immutable
-   */
-  static class Terms {
-    private final Map<String, Long> values;
-    private final long maxTerm;
-    // ZK node version
-    private final int version;
-
-    public Terms () {
-      this(new HashMap<>(), 0);
-    }
-
-    public Terms(Map<String, Long> values, int version) {
-      this.values = values;
-      this.version = version;
-      if (values.isEmpty()) this.maxTerm = 0;
-      else this.maxTerm = Collections.max(values.values());
-    }
-
-    /**
-     * Can {@code coreNodeName} become leader?
-     * @param coreNodeName of the replica
-     * @return true if {@code coreNodeName} can become leader, false if otherwise
-     */
-    boolean canBecomeLeader(String coreNodeName) {
-      return haveHighestTermValue(coreNodeName) && !values.containsKey(coreNodeName + "_recovering");
-    }
-
-    /**
-     * Is {@code coreNodeName}'s term highest?
-     * @param coreNodeName of the replica
-     * @return true if term of {@code coreNodeName} is highest
-     */
-    boolean haveHighestTermValue(String coreNodeName) {
-      if (values.isEmpty()) return true;
-      long maxTerm = Collections.max(values.values());
-      return values.getOrDefault(coreNodeName, 0L) == maxTerm;
-    }
-
-    Long getTerm(String coreNodeName) {
-      return values.get(coreNodeName);
-    }
-
-    /**
-     * Return a new {@link Terms} in which term of {@code leader} is higher than {@code replicasNeedingRecovery}
-     * @param leader coreNodeName of leader
-     * @param replicasNeedingRecovery set of replicas in which their terms should be lower than leader's term
-     * @return null if term of {@code leader} is already higher than {@code replicasNeedingRecovery}
-     */
-    Terms increaseTerms(String leader, Set<String> replicasNeedingRecovery) {
-      if (!values.containsKey(leader)) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Can not find leader's term " + leader);
-      }
-
-      boolean changed = false;
-      boolean foundReplicasInLowerTerms = false;
-
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      long leaderTerm = newValues.get(leader);
-      for (String key : newValues.keySet()) {
-        if (replicasNeedingRecovery.contains(key)) foundReplicasInLowerTerms = true;
-        if (Objects.equals(newValues.get(key), leaderTerm)) {
-          if(skipIncreaseTermOf(key, replicasNeedingRecovery)) {
-            changed = true;
-          } else {
-            newValues.put(key, leaderTerm+1);
-          }
-        }
-      }
-
-      // We should skip the optimization if there are no replicasNeedingRecovery present in local terms,
-      // this may indicate that the current value is stale
-      if (!changed && foundReplicasInLowerTerms) return null;
-      return new Terms(newValues, version);
-    }
-
-    private boolean skipIncreaseTermOf(String key, Set<String> replicasNeedingRecovery) {
-      if (key.endsWith("_recovering")) {
-        key = key.substring(0, key.length() - "_recovering".length());
-        return replicasNeedingRecovery.contains(key);
-      }
-      return replicasNeedingRecovery.contains(key);
-    }
-
-    /**
-     * Return a new {@link Terms} in which highest terms are not zero
-     * @return null if highest terms are already larger than zero
-     */
-    Terms ensureHighestTermsAreNotZero() {
-      if (maxTerm > 0) return null;
-      else {
-        HashMap<String, Long> newValues = new HashMap<>(values);
-        for (String replica : values.keySet()) {
-          newValues.put(replica, 1L);
-        }
-        return new Terms(newValues, version);
-      }
-    }
-
-    /**
-     * Return a new {@link Terms} in which term of {@code coreNodeName} is removed
-     * @param coreNodeName of the replica
-     * @return null if term of {@code coreNodeName} is already not exist
-     */
-    Terms removeTerm(String coreNodeName) {
-      if (!values.containsKey(coreNodeName)) return null;
-
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      newValues.remove(coreNodeName);
-      return new Terms(newValues, version);
-    }
-
-    /**
-     * Return a new {@link Terms} in which the associate term of {@code coreNodeName} is not null
-     * @param coreNodeName of the replica
-     * @return null if term of {@code coreNodeName} is already exist
-     */
-    Terms registerTerm(String coreNodeName) {
-      if (values.containsKey(coreNodeName)) return null;
-
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      newValues.put(coreNodeName, 0L);
-      return new Terms(newValues, version);
-    }
-
-    Terms setTermToZero(String coreNodeName) {
-      if (values.getOrDefault(coreNodeName, -1L) == 0) {
-        return null;
-      }
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      newValues.put(coreNodeName, 0L);
-      return new Terms(newValues, version);
-    }
-
-    /**
-     * Return a new {@link Terms} in which the term of {@code coreNodeName} is max
-     * @param coreNodeName of the replica
-     * @return null if term of {@code coreNodeName} is already maximum
-     */
-    Terms setTermEqualsToLeader(String coreNodeName) {
-      long maxTerm = getMaxTerm();
-      if (values.get(coreNodeName) == maxTerm) return null;
-
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      newValues.put(coreNodeName, maxTerm);
-      newValues.remove(coreNodeName+"_recovering");
-      return new Terms(newValues, version);
-    }
-
-    long getMaxTerm() {
-      return maxTerm;
-    }
-
-    /**
-     * Mark {@code coreNodeName} as recovering
-     * @param coreNodeName of the replica
-     * @return null if {@code coreNodeName} is already marked as doing recovering
-     */
-    Terms startRecovering(String coreNodeName) {
-      long maxTerm = getMaxTerm();
-      if (values.get(coreNodeName) == maxTerm)
-        return null;
-
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      if (!newValues.containsKey(coreNodeName+"_recovering")) {
-        long currentTerm = newValues.getOrDefault(coreNodeName, 0L);
-        // by keeping old term, we will have more information in leader election
-        newValues.put(coreNodeName+"_recovering", currentTerm);
-      }
-      newValues.put(coreNodeName, maxTerm);
-      return new Terms(newValues, version);
-    }
-
-    /**
-     * Mark {@code coreNodeName} as finished recovering
-     * @param coreNodeName of the replica
-     * @return null if term of {@code coreNodeName} is already finished doing recovering
-     */
-    Terms doneRecovering(String coreNodeName) {
-      if (!values.containsKey(coreNodeName+"_recovering")) {
-        return null;
-      }
-
-      HashMap<String, Long> newValues = new HashMap<>(values);
-      newValues.remove(coreNodeName+"_recovering");
-      return new Terms(newValues, version);
-    }
-
-    @Override
-    public String toString() {
-      return "Terms{" +
-          "values=" + values +
-          ", version=" + version +
-          '}';
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java b/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
deleted file mode 100644
index 5f32ef2..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.core.SolrResourceNotFoundException;
-import org.apache.solr.schema.ZkIndexSchemaReader;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * ResourceLoader that works with ZooKeeper.
- *
- */
-public class ZkSolrResourceLoader extends SolrResourceLoader {
-
-  private final String configSetZkPath;
-  private ZkController zkController;
-  private ZkIndexSchemaReader zkIndexSchemaReader;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public ZkSolrResourceLoader(Path instanceDir, String configSet, ZkController zooKeeperController) {
-    super(instanceDir);
-    this.zkController = zooKeeperController;
-    configSetZkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + configSet;
-  }
-
-  /**
-   * <p>
-   * This loader will first attempt to load resources from ZooKeeper, but if not found
-   * will delegate to the context classloader when possible,
-   * otherwise it will attempt to resolve resources using any jar files found in
-   * the "lib/" directory in the specified instance directory.
-   */
-  public ZkSolrResourceLoader(Path instanceDir, String configSet, ClassLoader parent,
-      Properties coreProperties, ZkController zooKeeperController) {
-    super(instanceDir, parent, coreProperties);
-    this.zkController = zooKeeperController;
-    configSetZkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + configSet;
-  }
-
-  /**
-   * Opens any resource by its name. By default, this will look in multiple
-   * locations to load the resource: $configDir/$resource from ZooKeeper.
-   * It will look for it in any jar
-   * accessible through the class loader if it cannot be found in ZooKeeper. 
-   * Override this method to customize loading resources.
-   * 
-   * @return the stream for the named resource
-   */
-  @Override
-  public InputStream openResource(String resource) throws IOException {
-    InputStream is;
-    String file = (".".equals(resource)) ? configSetZkPath : configSetZkPath + "/" + resource;
-    int maxTries = 10;
-    Exception exception = null;
-    while (maxTries -- > 0) {
-      try {
-        if (zkController.pathExists(file)) {
-          Stat stat = new Stat();
-          byte[] bytes = zkController.getZkClient().getData(file, null, stat, true);
-          return new ZkByteArrayInputStream(bytes, stat);
-        } else {
-          //Path does not exists. We only retry for session expired exceptions.
-          break;
-        }
-      } catch (KeeperException.SessionExpiredException e) {
-        exception = e;
-        if (!zkController.getCoreContainer().isShutDown()) {
-          // Retry in case of session expiry
-          try {
-            Thread.sleep(1000);
-            log.debug("Sleeping for 1s before retrying fetching resource=" + resource);
-          } catch (InterruptedException ie) {
-            Thread.currentThread().interrupt();
-            throw new IOException("Could not load resource=" + resource, ie);
-          }
-        }
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new IOException("Error opening " + file, e);
-      } catch (Exception e) {
-        throw new IOException("Error opening " + file, e);
-      }
-    }
-
-    if (exception != null) {
-      throw new IOException("We re-tried 10 times but was still unable to fetch resource=" + resource + " from ZK", exception);
-    }
-
-    try {
-      // delegate to the class loader (looking into $INSTANCE_DIR/lib jars)
-      is = classLoader.getResourceAsStream(resource.replace(File.separatorChar, '/'));
-    } catch (Exception e) {
-      throw new IOException("Error opening " + resource, e);
-    }
-    if (is == null) {
-      throw new SolrResourceNotFoundException("Can't find resource '" + resource
-          + "' in classpath or '" + configSetZkPath + "', cwd="
-          + System.getProperty("user.dir"));
-    }
-    return is;
-  }
-
-  public static class ZkByteArrayInputStream extends ByteArrayInputStream{
-
-    private final Stat stat;
-    public ZkByteArrayInputStream(byte[] buf, Stat stat) {
-      super(buf);
-      this.stat = stat;
-
-    }
-
-    public Stat getStat(){
-      return stat;
-    }
-  }
-
-  @Override
-  public String getConfigDir() {
-    throw new ZooKeeperException(
-        ErrorCode.SERVER_ERROR,
-        "ZkSolrResourceLoader does not support getConfigDir() - likely, what you are trying to do is not supported in ZooKeeper mode");
-  }
-  
-  @Override
-  public String[] listConfigDir() {
-    List<String> list;
-    try {
-      list = zkController.getZkClient().getChildren(configSetZkPath, null, true);
-    } catch (InterruptedException e) {
-      // Restore the interrupted status
-      Thread.currentThread().interrupt();
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-          "", e);
-    } catch (KeeperException e) {
-      log.error("", e);
-      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-          "", e);
-    }
-    return list.toArray(new String[0]);
-  }
-
-  public String getConfigSetZkPath() {
-    return configSetZkPath;
-  }
-  
-  public ZkController getZkController() {
-    return zkController;
-  }
-
-  public void setZkIndexSchemaReader(ZkIndexSchemaReader zkIndexSchemaReader) {
-    this.zkIndexSchemaReader = zkIndexSchemaReader;
-  }
-
-  public ZkIndexSchemaReader getZkIndexSchemaReader() { return zkIndexSchemaReader; }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
deleted file mode 100644
index 8b72cdf..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
+++ /dev/null
@@ -1,409 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.api.collections;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.stream.Collectors;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.cloud.ActiveReplicaWatcher;
-import org.apache.solr.cloud.Overseer;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
-import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * When AddReplica is called with this set to true, then we do not try to find node assignments
-   * for the add replica API. If set to true, a valid "node" should be specified.
-   */
-  public static final String SKIP_NODE_ASSIGNMENT = "skipNodeAssignment";
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    addReplica(state, message, results, null);
-  }
-
-  List<ZkNodeProps> addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws IOException, InterruptedException {
-    log.debug("addReplica() : {}", Utils.toJSONString(message));
-
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-
-    DocCollection coll = clusterState.getCollection(collectionName);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " does not exist");
-    }
-    if (coll.getSlice(shard) == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Collection: " + collectionName + " shard: " + shard + " does not exist");
-    }
-
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-    final String asyncId = message.getStr(ASYNC);
-
-    String node = message.getStr(CoreAdminParams.NODE);
-    String createNodeSetStr = message.getStr(CREATE_NODE_SET);
-
-    if (node != null && createNodeSetStr != null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Both 'node' and 'createNodeSet' parameters cannot be specified together.");
-    }
-
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-    boolean parallel = message.getBool("parallel", false);
-
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    EnumMap<Replica.Type, Integer> replicaTypesVsCount = new EnumMap<>(Replica.Type.class);
-    replicaTypesVsCount.put(Replica.Type.NRT, message.getInt(NRT_REPLICAS, replicaType == Replica.Type.NRT ? 1 : 0));
-    replicaTypesVsCount.put(Replica.Type.TLOG, message.getInt(TLOG_REPLICAS, replicaType == Replica.Type.TLOG ? 1 : 0));
-    replicaTypesVsCount.put(Replica.Type.PULL, message.getInt(PULL_REPLICAS, replicaType == Replica.Type.PULL ? 1 : 0));
-
-    int totalReplicas = 0;
-    for (Map.Entry<Replica.Type, Integer> entry : replicaTypesVsCount.entrySet()) {
-      totalReplicas += entry.getValue();
-    }
-    if (totalReplicas > 1)  {
-      if (message.getStr(CoreAdminParams.NAME) != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create " + totalReplicas + " replicas if 'name' parameter is specified");
-      }
-      if (message.getStr(CoreAdminParams.CORE_NODE_NAME) != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create " + totalReplicas + " replicas if 'coreNodeName' parameter is specified");
-      }
-    }
-
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    List<CreateReplica> createReplicas;
-    try {
-      createReplicas = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, replicaTypesVsCount, sessionWrapper)
-          .stream()
-          .map(replicaPosition -> assignReplicaDetails(ocmh.cloudManager, clusterState, message, replicaPosition))
-          .collect(Collectors.toList());
-    } finally {
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
-    }
-
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    // For tracking async calls.
-    Map<String,String> requestMap = new HashMap<>();
-
-    for (CreateReplica createReplica : createReplicas) {
-      assert createReplica.coreName != null;
-      ModifiableSolrParams params = getReplicaParams(clusterState, message, results, collectionName, coll, skipCreateReplicaInClusterState, asyncId, shardHandler, createReplica);
-      ocmh.sendShardRequest(createReplica.node, params, shardHandler, asyncId, requestMap);
-    }
-
-    Runnable runnable = () -> {
-      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
-      for (CreateReplica replica : createReplicas) {
-        ocmh.waitForCoreNodeName(collectionName, replica.node, replica.coreName);
-      }
-      if (onComplete != null) onComplete.run();
-    };
-
-    if (!parallel || waitForFinalState) {
-      if (waitForFinalState) {
-        SolrCloseableLatch latch = new SolrCloseableLatch(totalReplicas, ocmh);
-        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collectionName, null,
-            createReplicas.stream().map(createReplica -> createReplica.coreName).collect(Collectors.toList()), latch);
-        try {
-          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
-          runnable.run();
-          if (!latch.await(timeout, TimeUnit.SECONDS)) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
-          }
-        } finally {
-          zkStateReader.removeCollectionStateWatcher(collectionName, watcher);
-        }
-      } else {
-        runnable.run();
-      }
-    } else {
-      ocmh.tpe.submit(runnable);
-    }
-
-    return createReplicas.stream()
-        .map(createReplica -> new ZkNodeProps(
-            ZkStateReader.COLLECTION_PROP, createReplica.collectionName,
-            ZkStateReader.SHARD_ID_PROP, createReplica.sliceName,
-            ZkStateReader.CORE_NAME_PROP, createReplica.coreName,
-            ZkStateReader.NODE_NAME_PROP, createReplica.node
-        ))
-        .collect(Collectors.toList());
-  }
-
-  private ModifiableSolrParams getReplicaParams(ClusterState clusterState, ZkNodeProps message, NamedList results, String collectionName, DocCollection coll, boolean skipCreateReplicaInClusterState, String asyncId, ShardHandler shardHandler, CreateReplica createReplica) throws IOException, InterruptedException {
-    if (coll.getStr(WITH_COLLECTION) != null) {
-      String withCollectionName = coll.getStr(WITH_COLLECTION);
-      DocCollection withCollection = clusterState.getCollection(withCollectionName);
-      if (withCollection.getActiveSlices().size() > 1)  {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The `withCollection` must have only one shard, found: " + withCollection.getActiveSlices().size());
-      }
-      String withCollectionShard = withCollection.getActiveSlices().iterator().next().getName();
-
-      List<Replica> replicas = withCollection.getReplicas(createReplica.node);
-      if (replicas == null || replicas.isEmpty()) {
-        // create a replica of withCollection on the identified node before proceeding further
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
-            ZkStateReader.COLLECTION_PROP, withCollectionName,
-            ZkStateReader.SHARD_ID_PROP, withCollectionShard,
-            "node", createReplica.node,
-            // since we already computed node assignments (which include assigning a node for this withCollection replica) we want to skip the assignment step
-            SKIP_NODE_ASSIGNMENT, "true",
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to true because we want `withCollection` to be ready after this collection is created
-        addReplica(clusterState, props, results, null);
-      }
-    }
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (!Overseer.isLegacy(zkStateReader)) {
-      if (!skipCreateReplicaInClusterState) {
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collectionName,
-            ZkStateReader.SHARD_ID_PROP, createReplica.sliceName,
-            ZkStateReader.CORE_NAME_PROP, createReplica.coreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(createReplica.node),
-            ZkStateReader.NODE_NAME_PROP, createReplica.node,
-            ZkStateReader.REPLICA_TYPE, createReplica.replicaType.name());
-        if (createReplica.coreNodeName != null) {
-          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, createReplica.coreNodeName);
-        }
-        try {
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
-        }
-      }
-      params.set(CoreAdminParams.CORE_NODE_NAME,
-          ocmh.waitToSeeReplicasInState(collectionName, Collections.singletonList(createReplica.coreName)).get(createReplica.coreName).getName());
-    }
-
-    String configName = zkStateReader.readConfigName(collectionName);
-    String routeKey = message.getStr(ShardParams._ROUTE_);
-    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
-    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
-    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
-
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-    params.set(CoreAdminParams.NAME, createReplica.coreName);
-    params.set(COLL_CONF, configName);
-    params.set(CoreAdminParams.COLLECTION, collectionName);
-    params.set(CoreAdminParams.REPLICA_TYPE, createReplica.replicaType.name());
-    if (createReplica.sliceName != null) {
-      params.set(CoreAdminParams.SHARD, createReplica.sliceName);
-    } else if (routeKey != null) {
-      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
-      if (slices.isEmpty()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
-      } else {
-        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
-      }
-    } else {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
-    }
-    if (dataDir != null) {
-      params.set(CoreAdminParams.DATA_DIR, dataDir);
-    }
-    if (ulogDir != null) {
-      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
-    }
-    if (instanceDir != null) {
-      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
-    }
-    if (createReplica.coreNodeName != null) {
-      params.set(CoreAdminParams.CORE_NODE_NAME, createReplica.coreNodeName);
-    }
-    ocmh.addPropertyParams(message, params);
-
-    return params;
-  }
-
-  public static CreateReplica assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                 ZkNodeProps message, ReplicaPosition replicaPosition) {
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    String node = replicaPosition.node;
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    Replica.Type replicaType = replicaPosition.type;
-
-    if (StringUtils.isBlank(coreName)) {
-      coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
-    }
-
-    log.info("Node Identified {} for creating new replica of shard {} for collection {}", node, shard, collection);
-    if (!clusterState.liveNodesContain(node)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
-    }
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coreName == null) {
-      coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
-    } else if (!skipCreateReplicaInClusterState) {
-      //Validate that the core name is unique in that collection
-      for (Slice slice : coll.getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          String replicaCoreName = replica.getStr(CORE_NAME_PROP);
-          if (coreName.equals(replicaCoreName)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" +
-                " for this collection");
-          }
-        }
-      }
-    }
-    return new CreateReplica(collection, shard, node, replicaType, coreName, coreNodeName);
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                            String collectionName, ZkNodeProps message,
-                                                            EnumMap<Replica.Type, Integer> replicaTypeVsCount,
-                                                            AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-    boolean skipNodeAssignment = message.getBool(SKIP_NODE_ASSIGNMENT, false);
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    int numNrtReplicas = replicaTypeVsCount.get(Replica.Type.NRT);
-    int numPullReplicas = replicaTypeVsCount.get(Replica.Type.PULL);
-    int numTlogReplicas = replicaTypeVsCount.get(Replica.Type.TLOG);
-    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
-
-    String node = message.getStr(CoreAdminParams.NODE);
-    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
-    if (createNodeSetStr == null) {
-      if (node != null) {
-        message.getProperties().put(OverseerCollectionMessageHandler.CREATE_NODE_SET, node);
-        createNodeSetStr = node;
-      }
-    }
-
-    List<ReplicaPosition> positions = null;
-    if (!skipCreateReplicaInClusterState && !skipNodeAssignment) {
-
-      positions = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, numNrtReplicas,
-                    numTlogReplicas, numPullReplicas, createNodeSetStr, cloudManager);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    }
-
-    if (positions == null)  {
-      assert node != null;
-      if (node == null) {
-        // in case asserts are disabled
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "A node should have been identified to add replica but wasn't. Please inform solr developers at SOLR-9317");
-      }
-      // it is unlikely that multiple replicas have been requested to be created on
-      // the same node, but we've got to accommodate.
-      positions = new ArrayList<>(totalReplicas);
-      int i = 0;
-      for (Map.Entry<Replica.Type, Integer> entry : replicaTypeVsCount.entrySet()) {
-        for (int j = 0; j < entry.getValue(); j++) {
-          positions.add(new ReplicaPosition(sliceName, i++, entry.getKey(), node));
-        }
-      }
-    }
-    return positions;
-  }
-
-  /**
-   * A data structure to keep all information required to create a new replica in one place.
-   * Think of it as a typed ZkNodeProps for replica creation.
-   *
-   * This is <b>not</b> a public API and can be changed at any time without notice.
-   */
-  public static class CreateReplica {
-    public final String collectionName;
-    public final String sliceName;
-    public final String node;
-    public final Replica.Type replicaType;
-    public String coreName;
-    public String coreNodeName;
-
-    CreateReplica(String collectionName, String sliceName, String node, Replica.Type replicaType, String coreName, String coreNodeName) {
-      this.collectionName = collectionName;
-      this.sliceName = sliceName;
-      this.node = node;
-      this.replicaType = replicaType;
-      this.coreName = coreName;
-      this.coreNodeName = coreNodeName;
-    }
-  }
-
-}


[32/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/Config.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/Config.java b/solr/core/src/java/org/apache/solr/core/Config.java
deleted file mode 100644
index 11a381e..0000000
--- a/solr/core/src/java/org/apache/solr/core/Config.java
+++ /dev/null
@@ -1,493 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import javax.xml.namespace.QName;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMResult;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import javax.xml.xpath.XPathFactory;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.lucene.util.Version;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.XMLErrorLogger;
-import org.apache.solr.util.DOMUtil;
-import org.apache.solr.util.SystemIdResolver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.NamedNodeMap;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
-
-/**
- *
- */
-public class Config {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final XMLErrorLogger xmllog = new XMLErrorLogger(log);
-
-  static final XPathFactory xpathFactory = XPathFactory.newInstance();
-
-  private final Document doc;
-  private final Document origDoc; // with unsubstituted properties
-  private final String prefix;
-  private final String name;
-  private final SolrResourceLoader loader;
-  private int zkVersion = -1;
-
-  /**
-   * Builds a config from a resource name with no xpath prefix.
-   */
-  public Config(SolrResourceLoader loader, String name) throws ParserConfigurationException, IOException, SAXException 
-  {
-    this( loader, name, null, null );
-  }
-
-  public Config(SolrResourceLoader loader, String name, InputSource is, String prefix) throws ParserConfigurationException, IOException, SAXException 
-  {
-    this(loader, name, is, prefix, true);
-  }
-  /**
-   * Builds a config:
-   * <p>
-   * Note that the 'name' parameter is used to obtain a valid input stream if no valid one is provided through 'is'.
-   * If no valid stream is provided, a valid SolrResourceLoader instance should be provided through 'loader' so
-   * the resource can be opened (@see SolrResourceLoader#openResource); if no SolrResourceLoader instance is provided, a default one
-   * will be created.
-   * </p>
-   * <p>
-   * Consider passing a non-null 'name' parameter in all use-cases since it is used for logging &amp; exception reporting.
-   * </p>
-   * @param loader the resource loader used to obtain an input stream if 'is' is null
-   * @param name the resource name used if the input stream 'is' is null
-   * @param is the resource as a SAX InputSource
-   * @param prefix an optional prefix that will be prepended to all non-absolute xpath expressions
-   */
-  public Config(SolrResourceLoader loader, String name, InputSource is, String prefix, boolean substituteProps) throws ParserConfigurationException, IOException, SAXException
-  {
-    if( loader == null ) {
-      loader = new SolrResourceLoader(SolrResourceLoader.locateSolrHome());
-    }
-    this.loader = loader;
-    this.name = name;
-    this.prefix = (prefix != null && !prefix.endsWith("/"))? prefix + '/' : prefix;
-    try {
-      javax.xml.parsers.DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
-
-      if (is == null) {
-        InputStream in = loader.openConfig(name);
-        if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
-          zkVersion = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
-          log.debug("loaded config {} with version {} ",name,zkVersion);
-        }
-        is = new InputSource(in);
-        is.setSystemId(SystemIdResolver.createSystemIdFromResourceName(name));
-      }
-
-      // only enable xinclude, if a SystemId is available
-      if (is.getSystemId() != null) {
-        try {
-          dbf.setXIncludeAware(true);
-          dbf.setNamespaceAware(true);
-        } catch(UnsupportedOperationException e) {
-          log.warn(name + " XML parser doesn't support XInclude option");
-        }
-      }
-      
-      final DocumentBuilder db = dbf.newDocumentBuilder();
-      db.setEntityResolver(new SystemIdResolver(loader));
-      db.setErrorHandler(xmllog);
-      try {
-        doc = db.parse(is);
-        origDoc = copyDoc(doc);
-      } finally {
-        // some XML parsers are broken and don't close the byte stream (but they should according to spec)
-        IOUtils.closeQuietly(is.getByteStream());
-      }
-      if (substituteProps) {
-        DOMUtil.substituteProperties(doc, getSubstituteProperties());
-      }
-    } catch (ParserConfigurationException | SAXException | TransformerException e)  {
-      SolrException.log(log, "Exception during parsing file: " + name, e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-    }
-  }
-
-  /*
-     * Assert that assertCondition is true.
-     * If not, prints reason as log warning.
-     * If failCondition is true, then throw exception instead of warning
-     */
-  public static void assertWarnOrFail(String reason, boolean assertCondition, boolean failCondition) {
-    if (assertCondition) {
-      return;
-    } else if (failCondition) {
-      throw new SolrException(SolrException.ErrorCode.FORBIDDEN, reason);
-    } else {
-      log.warn(reason);
-    }
-  }
-
-  protected Properties getSubstituteProperties() {
-    return loader.getCoreProperties();
-  }
-
-  public Config(SolrResourceLoader loader, String name, Document doc) {
-    this.prefix = null;
-    this.doc = doc;
-    try {
-      this.origDoc = copyDoc(doc);
-    } catch (TransformerException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-    }
-    this.name = name;
-    this.loader = loader;
-  }
-
-  
-  private static Document copyDoc(Document doc) throws TransformerException {
-    TransformerFactory tfactory = TransformerFactory.newInstance();
-    Transformer tx = tfactory.newTransformer();
-    DOMSource source = new DOMSource(doc);
-    DOMResult result = new DOMResult();
-    tx.transform(source, result);
-    return (Document) result.getNode();
-  }
-  
-  /**
-   * @since solr 1.3
-   */
-  public SolrResourceLoader getResourceLoader()
-  {
-    return loader;
-  }
-
-  /**
-   * @since solr 1.3
-   */
-  public String getResourceName() {
-    return name;
-  }
-
-  public String getName() {
-    return name;
-  }
-  
-  public Document getDocument() {
-    return doc;
-  }
-
-  public XPath getXPath() {
-    return xpathFactory.newXPath();
-  }
-
-  private String normalize(String path) {
-    return (prefix==null || path.startsWith("/")) ? path : prefix+path;
-  }
-  
-  public void substituteProperties() {
-    DOMUtil.substituteProperties(doc, getSubstituteProperties());
-  }
-
-
-  public Object evaluate(String path, QName type) {
-    XPath xpath = xpathFactory.newXPath();
-    try {
-      String xstr=normalize(path);
-
-      // TODO: instead of prepending /prefix/, we could do the search rooted at /prefix...
-      Object o = xpath.evaluate(xstr, doc, type);
-      return o;
-
-    } catch (XPathExpressionException e) {
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,"Error in xpath:" + path +" for " + name,e);
-    }
-  }
-
-  public Node getNode(String path, boolean errifMissing) {
-    return getNode(path, doc, errifMissing);
-  }
-
-  public Node getUnsubstitutedNode(String path, boolean errIfMissing) {
-    return getNode(path, origDoc, errIfMissing);
-  }
-
-  public Node getNode(String path, Document doc, boolean errIfMissing) {
-    XPath xpath = xpathFactory.newXPath();
-    String xstr = normalize(path);
-
-    try {
-      NodeList nodes = (NodeList)xpath.evaluate(xstr, doc, 
-                                                XPathConstants.NODESET);
-      if (nodes==null || 0 == nodes.getLength() ) {
-        if (errIfMissing) {
-          throw new RuntimeException(name + " missing "+path);
-        } else {
-          log.debug(name + " missing optional " + path);
-          return null;
-        }
-      }
-      if ( 1 < nodes.getLength() ) {
-        throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-                                 name + " contains more than one value for config path: " + path);
-      }
-      Node nd = nodes.item(0);
-      log.trace(name + ":" + path + "=" + nd);
-      return nd;
-
-    } catch (XPathExpressionException e) {
-      SolrException.log(log,"Error in xpath",e);
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,"Error in xpath:" + xstr + " for " + name,e);
-    } catch (SolrException e) {
-      throw(e);
-    } catch (Exception e) {
-      SolrException.log(log,"Error in xpath",e);
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,"Error in xpath:" + xstr+ " for " + name,e);
-    }
-  }
-
-  public NodeList getNodeList(String path, boolean errIfMissing) {
-    XPath xpath = xpathFactory.newXPath();
-    String xstr = normalize(path);
-
-    try {
-      NodeList nodeList = (NodeList)xpath.evaluate(xstr, doc, XPathConstants.NODESET);
-
-      if (null == nodeList) {
-        if (errIfMissing) {
-          throw new RuntimeException(name + " missing "+path);
-        } else {
-          log.debug(name + " missing optional " + path);
-          return null;
-        }
-      }
-
-      log.trace(name + ":" + path + "=" + nodeList);
-      return nodeList;
-
-    } catch (XPathExpressionException e) {
-      SolrException.log(log,"Error in xpath",e);
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,"Error in xpath:" + xstr + " for " + name,e);
-    } catch (SolrException e) {
-      throw(e);
-    } catch (Exception e) {
-      SolrException.log(log,"Error in xpath",e);
-      throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,"Error in xpath:" + xstr+ " for " + name,e);
-    }
-  }
-
-  /**
-   * Returns the set of attributes on the given element that are not among the given knownAttributes,
-   * or null if all attributes are known.
-   */
-  public Set<String> getUnknownAttributes(Element element, String... knownAttributes) {
-    Set<String> knownAttributeSet = new HashSet<>(Arrays.asList(knownAttributes));
-    Set<String> unknownAttributeSet = null;
-    NamedNodeMap attributes = element.getAttributes();
-    for (int i = 0 ; i < attributes.getLength() ; ++i) {
-      final String attributeName = attributes.item(i).getNodeName();
-      if ( ! knownAttributeSet.contains(attributeName)) {
-        if (null == unknownAttributeSet) {
-          unknownAttributeSet = new HashSet<>();
-        }
-        unknownAttributeSet.add(attributeName);
-      }
-    }
-    return unknownAttributeSet;
-  }
-
-  /**
-   * Logs an error and throws an exception if any of the element(s) at the given elementXpath
-   * contains an attribute name that is not among knownAttributes. 
-   */
-  public void complainAboutUnknownAttributes(String elementXpath, String... knownAttributes) {
-    SortedMap<String,SortedSet<String>> problems = new TreeMap<>();
-    NodeList nodeList = getNodeList(elementXpath, false);
-    for (int i = 0 ; i < nodeList.getLength() ; ++i) {
-      Element element = (Element)nodeList.item(i);
-      Set<String> unknownAttributes = getUnknownAttributes(element, knownAttributes);
-      if (null != unknownAttributes) {
-        String elementName = element.getNodeName();
-        SortedSet<String> allUnknownAttributes = problems.get(elementName);
-        if (null == allUnknownAttributes) {
-          allUnknownAttributes = new TreeSet<>();
-          problems.put(elementName, allUnknownAttributes);
-        }
-        allUnknownAttributes.addAll(unknownAttributes);
-      }
-    }
-    if (problems.size() > 0) {
-      StringBuilder message = new StringBuilder();
-      for (Map.Entry<String,SortedSet<String>> entry : problems.entrySet()) {
-        if (message.length() > 0) {
-          message.append(", ");
-        }
-        message.append('<');
-        message.append(entry.getKey());
-        for (String attributeName : entry.getValue()) {
-          message.append(' ');
-          message.append(attributeName);
-          message.append("=\"...\"");
-        }
-        message.append('>');
-      }
-      message.insert(0, "Unknown attribute(s) on element(s): ");
-      String msg = message.toString();
-      SolrException.log(log, msg);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg);
-    }
-  }
-
-  public String getVal(String path, boolean errIfMissing) {
-    Node nd = getNode(path,errIfMissing);
-    if (nd==null) return null;
-
-    String txt = DOMUtil.getText(nd);
-
-    log.debug(name + ' '+path+'='+txt);
-    return txt;
-
-    /******
-    short typ = nd.getNodeType();
-    if (typ==Node.ATTRIBUTE_NODE || typ==Node.TEXT_NODE) {
-      return nd.getNodeValue();
-    }
-    return nd.getTextContent();
-    ******/
-  }
-
-
-  public String get(String path) {
-    return getVal(path,true);
-  }
-
-  public String get(String path, String def) {
-    String val = getVal(path, false);
-    if (val == null || val.length() == 0) {
-      return def;
-    }
-    return val;
-  }
-
-  public int getInt(String path) {
-    return Integer.parseInt(getVal(path, true));
-  }
-
-  public int getInt(String path, int def) {
-    String val = getVal(path, false);
-    return val!=null ? Integer.parseInt(val) : def;
-  }
-
-  public boolean getBool(String path) {
-    return Boolean.parseBoolean(getVal(path, true));
-  }
-
-  public boolean getBool(String path, boolean def) {
-    String val = getVal(path, false);
-    return val!=null ? Boolean.parseBoolean(val) : def;
-  }
-
-  public float getFloat(String path) {
-    return Float.parseFloat(getVal(path, true));
-  }
-
-  public float getFloat(String path, float def) {
-    String val = getVal(path, false);
-    return val!=null ? Float.parseFloat(val) : def;
-  }
-
-
-  public double getDouble(String path){
-     return Double.parseDouble(getVal(path, true));
-   }
-
-   public double getDouble(String path, double def) {
-     String val = getVal(path, false);
-     return val!=null ? Double.parseDouble(val) : def;
-   }
-   
-   public Version getLuceneVersion(String path) {
-     return parseLuceneVersionString(getVal(path, true));
-   }
-   
-   public Version getLuceneVersion(String path, Version def) {
-     String val = getVal(path, false);
-     return val!=null ? parseLuceneVersionString(val) : def;
-   }
-  
-  private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false);
-  
-  public static final Version parseLuceneVersionString(final String matchVersion) {
-    final Version version;
-    try {
-      version = Version.parseLeniently(matchVersion);
-    } catch (ParseException pe) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-        "Invalid luceneMatchVersion.  Should be of the form 'V.V.V' (e.g. 4.8.0)", pe);
-    }
-    
-    if (version == Version.LATEST && !versionWarningAlreadyLogged.getAndSet(true)) {
-      log.warn(
-        "You should not use LATEST as luceneMatchVersion property: "+
-        "if you use this setting, and then Solr upgrades to a newer release of Lucene, "+
-        "sizable changes may happen. If precise back compatibility is important "+
-        "then you should instead explicitly specify an actual Lucene version."
-      );
-    }
-    
-    return version;
-  }
-
-  /**If this config is loaded from zk the version is relevant other wise -1 is returned
-   */
-  public int getZnodeVersion(){
-    return zkVersion;
-  }
-
-  public Config getOriginalConfig() {
-    return new Config(loader, null, origDoc);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/ConfigOverlay.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigOverlay.java b/solr/core/src/java/org/apache/solr/core/ConfigOverlay.java
deleted file mode 100644
index 3472999..0000000
--- a/solr/core/src/java/org/apache/solr/core/ConfigOverlay.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.common.MapSerializable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.noggit.CharArr;
-import org.noggit.JSONWriter;
-
-/**
- * This class encapsulates the config overlay json file. It is immutable
- * and any edit operations performed on tbhis gives a new copy of the object
- * with the changed value
- */
-public class ConfigOverlay implements MapSerializable {
-  private final int znodeVersion;
-  private final Map<String, Object> data;
-  private Map<String, Object> props;
-  private Map<String, Object> userProps;
-
-  public ConfigOverlay(Map<String, Object> jsonObj, int znodeVersion) {
-    if (jsonObj == null) jsonObj = Collections.EMPTY_MAP;
-    this.znodeVersion = znodeVersion;
-    data = Collections.unmodifiableMap(jsonObj);
-    props = (Map<String, Object>) data.get("props");
-    if (props == null) props = Collections.EMPTY_MAP;
-    userProps = (Map<String, Object>) data.get("userProps");
-    if (userProps == null) userProps = Collections.EMPTY_MAP;
-  }
-
-  public Object getXPathProperty(String xpath) {
-    return getXPathProperty(xpath, true);
-  }
-
-  public Object getXPathProperty(String xpath, boolean onlyPrimitive) {
-    List<String> hierarchy = checkEditable(xpath, true, false);
-    if (hierarchy == null) return null;
-    return Utils.getObjectByPath(props, onlyPrimitive, hierarchy);
-  }
-
-  public ConfigOverlay setUserProperty(String key, Object val) {
-    Map copy = new LinkedHashMap(userProps);
-    copy.put(key, val);
-    Map<String, Object> jsonObj = new LinkedHashMap<>(this.data);
-    jsonObj.put("userProps", copy);
-    return new ConfigOverlay(jsonObj, znodeVersion);
-  }
-
-  public ConfigOverlay unsetUserProperty(String key) {
-    if (!userProps.containsKey(key)) return this;
-    Map copy = new LinkedHashMap(userProps);
-    copy.remove(key);
-    Map<String, Object> jsonObj = new LinkedHashMap<>(this.data);
-    jsonObj.put("userProps", copy);
-    return new ConfigOverlay(jsonObj, znodeVersion);
-  }
-
-  public ConfigOverlay setProperty(String name, Object val) {
-    List<String> hierarchy = checkEditable(name, false, true);
-    Map deepCopy = (Map) Utils.fromJSON(Utils.toJSON(props));
-    Map obj = deepCopy;
-    for (int i = 0; i < hierarchy.size(); i++) {
-      String s = hierarchy.get(i);
-      if (i < hierarchy.size() - 1) {
-        if (obj.get(s) == null || (!(obj.get(s) instanceof Map))) {
-          obj.put(s, new LinkedHashMap<>());
-        }
-        obj = (Map) obj.get(s);
-      } else {
-        obj.put(s, val);
-      }
-    }
-
-    Map<String, Object> jsonObj = new LinkedHashMap<>(this.data);
-    jsonObj.put("props", deepCopy);
-
-    return new ConfigOverlay(jsonObj, znodeVersion);
-  }
-
-
-  public static final String NOT_EDITABLE = "''{0}'' is not an editable property";
-
-  private List<String> checkEditable(String propName, boolean isXPath, boolean failOnError) {
-    LinkedList<String> hierarchy = new LinkedList<>();
-    if (!isEditableProp(propName, isXPath, hierarchy)) {
-      if (failOnError)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, StrUtils.formatString(NOT_EDITABLE, propName));
-      else return null;
-    }
-    return hierarchy;
-
-  }
-
-  public ConfigOverlay unsetProperty(String name) {
-    List<String> hierarchy = checkEditable(name, false, true);
-    Map deepCopy = (Map) Utils.fromJSON(Utils.toJSON(props));
-    Map obj = deepCopy;
-    for (int i = 0; i < hierarchy.size(); i++) {
-      String s = hierarchy.get(i);
-      if (i < hierarchy.size() - 1) {
-        if (obj.get(s) == null || (!(obj.get(s) instanceof Map))) {
-          return this;
-        }
-        obj = (Map) obj.get(s);
-      } else {
-        obj.remove(s);
-      }
-    }
-
-    Map<String, Object> jsonObj = new LinkedHashMap<>(this.data);
-    jsonObj.put("props", deepCopy);
-
-    return new ConfigOverlay(jsonObj, znodeVersion);
-  }
-
-  public byte[] toByteArray() {
-    return Utils.toJSON(data);
-  }
-
-
-  public int getZnodeVersion() {
-    return znodeVersion;
-  }
-
-  @Override
-  public String toString() {
-    CharArr out = new CharArr();
-    try {
-      new JSONWriter(out, 2).write(data);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-    return out.toString();
-  }
-
-
-  public static final String RESOURCE_NAME = "configoverlay.json";
-
-  /*private static final Long STR_ATTR = 0L;
-  private static final Long STR_NODE = 1L;
-  private static final Long BOOL_ATTR = 10L;
-  private static final Long BOOL_NODE = 11L;
-  private static final Long INT_ATTR = 20L;
-  private static final Long INT_NODE = 21L;
-  private static final Long FLOAT_ATTR = 30L;
-  private static final Long FLOAT_NODE = 31L;*/
-  //The path maps to the xml xpath and value of 1 means it is a tag with a string value and value
-  // of 0 means it is an attribute with string value
-
-  private static Map editable_prop_map = (Map) Utils.fromJSONResource("EditableSolrConfigAttributes.json");
-
-  public static boolean isEditableProp(String path, boolean isXpath, List<String> hierarchy) {
-    return !(checkEditable(path, isXpath, hierarchy) == null);
-  }
-
-
-  public static Class checkEditable(String path, boolean isXpath, List<String> hierarchy) {
-    List<String> parts = StrUtils.splitSmart(path, isXpath ? '/' : '.');
-    Object obj = editable_prop_map;
-    for (int i = 0; i < parts.size(); i++) {
-      String part = parts.get(i);
-      boolean isAttr = isXpath && part.startsWith("@");
-      if (isAttr) {
-        part = part.substring(1);
-      }
-      if (hierarchy != null) hierarchy.add(part);
-      if (obj == null) return null;
-      if (i == parts.size() - 1) {
-        if (obj instanceof Map) {
-          Map map = (Map) obj;
-          Object o = map.get(part);
-          return checkType(o, isXpath, isAttr);
-        }
-        return null;
-      }
-      obj = ((Map) obj).get(part);
-    }
-    return null;
-  }
-
-  static Class[] types = new Class[]{String.class, Boolean.class, Integer.class, Float.class};
-
-  private static Class checkType(Object o, boolean isXpath, boolean isAttr) {
-    if (o instanceof Long) {
-      Long aLong = (Long) o;
-      int ten = aLong.intValue() / 10;
-      int one = aLong.intValue() % 10;
-      if (isXpath && isAttr && one != 0) return null;
-      return types[ten];
-    } else {
-      return null;
-    }
-  }
-
-  public Map<String, String> getEditableSubProperties(String xpath) {
-    Object o = Utils.getObjectByPath(props, false, StrUtils.splitSmart(xpath, '/'));
-    if (o instanceof Map) {
-      return (Map) o;
-    } else {
-      return null;
-    }
-  }
-
-  public Map<String, Object> getUserProps() {
-    return userProps;
-  }
-
-  @Override
-  public Map<String, Object> toMap(Map<String, Object> map) {
-    map.put(ZNODEVER, znodeVersion);
-    map.putAll(data);
-    return map;
-  }
-
-  public Map<String, Map> getNamedPlugins(String typ) {
-    Map<String, Map> reqHandlers = (Map<String, Map>) data.get(typ);
-    if (reqHandlers == null) return Collections.EMPTY_MAP;
-    return Collections.unmodifiableMap(reqHandlers);
-  }
-
-
-  public ConfigOverlay addNamedPlugin(Map<String, Object> info, String typ) {
-    Map dataCopy = Utils.getDeepCopy(data, 4);
-    Map reqHandler = (Map) dataCopy.get(typ);
-    if (reqHandler == null) dataCopy.put(typ, reqHandler = new LinkedHashMap());
-    reqHandler.put(info.get(CoreAdminParams.NAME), info);
-    return new ConfigOverlay(dataCopy, this.znodeVersion);
-  }
-
-  public ConfigOverlay deleteNamedPlugin(String name, String typ) {
-    Map dataCopy = Utils.getDeepCopy(data, 4);
-    Map reqHandler = (Map) dataCopy.get(typ);
-    if (reqHandler == null) return this;
-    reqHandler.remove(name);
-    return new ConfigOverlay(dataCopy, this.znodeVersion);
-
-  }
-
-  public static final String ZNODEVER = "znodeVersion";
-  public static final String NAME = "overlay";
-
-  public static void main(String[] args) {
-    System.out.println(Utils.toJSONString(editable_prop_map));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/ConfigSet.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSet.java b/solr/core/src/java/org/apache/solr/core/ConfigSet.java
deleted file mode 100644
index e0c9fe4..0000000
--- a/solr/core/src/java/org/apache/solr/core/ConfigSet.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.schema.IndexSchema;
-
-/**
- * Stores a core's configuration in the form of a SolrConfig and IndexSchema
- */
-public class ConfigSet {
-
-  private final String name;
-
-  private final SolrConfig solrconfig;
-
-  private final IndexSchema indexSchema;
-
-  private final NamedList properties;
-
-  private final boolean trusted;
-
-  public ConfigSet(String name, SolrConfig solrConfig, IndexSchema indexSchema,
-      NamedList properties, boolean trusted) {
-    this.name = name;
-    this.solrconfig = solrConfig;
-    this.indexSchema = indexSchema;
-    this.properties = properties;
-    this.trusted = trusted;
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public SolrConfig getSolrConfig() {
-    return solrconfig;
-  }
-
-  public IndexSchema getIndexSchema() {
-    return indexSchema;
-  }
-
-  public NamedList getProperties() {
-    return properties;
-  }
-  
-  public boolean isTrusted() {
-    return trusted;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/ConfigSetProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSetProperties.java b/solr/core/src/java/org/apache/solr/core/ConfigSetProperties.java
deleted file mode 100644
index 77cd272..0000000
--- a/solr/core/src/java/org/apache/solr/core/ConfigSetProperties.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.InputStreamReader;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.util.Map;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.NamedList;
-
-import org.noggit.JSONParser;
-import org.noggit.ObjectBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-public class ConfigSetProperties {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String DEFAULT_FILENAME = "configsetprops.json";
-  public static final String IMMUTABLE_CONFIGSET_ARG = "immutable";
-
-  /**
-   * Return the properties associated with the ConfigSet (e.g. immutable)
-   *
-   * @param loader the resource loader
-   * @param name the name of the config set properties file
-   * @return the properties in a NamedList
-   */
-  public static NamedList readFromResourceLoader(SolrResourceLoader loader, String name) {
-    InputStreamReader reader;
-    try {
-      reader = new InputStreamReader(loader.openResource(name), StandardCharsets.UTF_8);
-    } catch (SolrResourceNotFoundException ex) {
-      log.debug("Did not find ConfigSet properties, assuming default properties: " + ex.getMessage());
-      return null;
-    } catch (Exception ex) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to load reader for ConfigSet properties: " + name, ex);
-    }
-
-    try {
-      return readFromInputStream(reader);
-    } finally {
-      IOUtils.closeQuietly(reader);
-    }
-  }
-
-  public static NamedList readFromInputStream(InputStreamReader reader) {
-    try {
-      JSONParser jsonParser = new JSONParser(reader);
-      Object object = ObjectBuilder.getVal(jsonParser);
-      if (!(object instanceof Map)) {
-        final String objectClass = object == null ? "null" : object.getClass().getName();
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid JSON type " + objectClass + ", expected Map");
-      }
-      return new NamedList((Map)object);
-    } catch (Exception ex) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to load ConfigSet properties", ex);
-    } finally {
-      IOUtils.closeQuietly(reader);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSetService.java b/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
deleted file mode 100644
index 7ce1a52..0000000
--- a/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.core;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Instant;
-import java.util.Locale;
-import java.util.concurrent.ExecutionException;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import org.apache.solr.cloud.CloudConfigSetService;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.cloud.ZkSolrResourceLoader;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.IndexSchemaFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Service class used by the CoreContainer to load ConfigSets for use in SolrCore
- * creation.
- */
-public abstract class ConfigSetService {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static ConfigSetService createConfigSetService(NodeConfig nodeConfig, SolrResourceLoader loader, ZkController zkController) {
-    if (zkController != null)
-      return new CloudConfigSetService(loader, zkController);
-
-    if (nodeConfig.hasSchemaCache())
-      return new SchemaCaching(loader, nodeConfig.getConfigSetBaseDirectory());
-
-    return new Default(loader, nodeConfig.getConfigSetBaseDirectory());
-  }
-
-  protected final SolrResourceLoader parentLoader;
-
-  /**
-   * Create a new ConfigSetService
-   * @param loader the CoreContainer's resource loader
-   */
-  public ConfigSetService(SolrResourceLoader loader) {
-    this.parentLoader = loader;
-  }
-
-  /**
-   * Load the ConfigSet for a core
-   * @param dcore the core's CoreDescriptor
-   * @return a ConfigSet
-   */
-  public final ConfigSet getConfig(CoreDescriptor dcore) {
-
-    SolrResourceLoader coreLoader = createCoreResourceLoader(dcore);
-
-    try {
-
-      // ConfigSet properties are loaded from ConfigSetProperties.DEFAULT_FILENAME file.
-      // ConfigSet flags are loaded from the metadata of the ZK node of the configset.
-      NamedList properties = createConfigSetProperties(dcore, coreLoader);
-      NamedList flags = getConfigSetFlags(dcore, coreLoader);
-
-      boolean trusted =
-          (coreLoader instanceof ZkSolrResourceLoader
-              && flags != null
-              && flags.get("trusted") != null
-              && !flags.getBooleanArg("trusted")
-              ) ? false: true;
-
-      SolrConfig solrConfig = createSolrConfig(dcore, coreLoader);
-      IndexSchema schema = createIndexSchema(dcore, solrConfig);
-      return new ConfigSet(configName(dcore), solrConfig, schema, properties, trusted);
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Could not load conf for core " + dcore.getName() +
-              ": " + e.getMessage(), e);
-    }
-
-  }
-
-  /**
-   * Create a SolrConfig object for a core
-   * @param cd the core's CoreDescriptor
-   * @param loader the core's resource loader
-   * @return a SolrConfig object
-   */
-  protected SolrConfig createSolrConfig(CoreDescriptor cd, SolrResourceLoader loader) {
-    return SolrConfig.readFromResourceLoader(loader, cd.getConfigName());
-  }
-
-  /**
-   * Create an IndexSchema object for a core
-   * @param cd the core's CoreDescriptor
-   * @param solrConfig the core's SolrConfig
-   * @return an IndexSchema
-   */
-  protected IndexSchema createIndexSchema(CoreDescriptor cd, SolrConfig solrConfig) {
-    return IndexSchemaFactory.buildIndexSchema(cd.getSchemaName(), solrConfig);
-  }
-
-  /**
-   * Return the ConfigSet properties
-   * @param cd the core's CoreDescriptor
-   * @param loader the core's resource loader
-   * @return the ConfigSet properties
-   */
-  protected NamedList createConfigSetProperties(CoreDescriptor cd, SolrResourceLoader loader) {
-    return ConfigSetProperties.readFromResourceLoader(loader, cd.getConfigSetPropertiesName());
-  }
-
-  protected NamedList getConfigSetFlags(CoreDescriptor cd, SolrResourceLoader loader) {
-    if (loader instanceof ZkSolrResourceLoader) {
-      try {
-        return ConfigSetProperties.readFromResourceLoader(loader, ".");
-      } catch (Exception ex) {
-        return null;
-      }
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Create a SolrResourceLoader for a core
-   * @param cd the core's CoreDescriptor
-   * @return a SolrResourceLoader
-   */
-  protected abstract SolrResourceLoader createCoreResourceLoader(CoreDescriptor cd);
-
-  /**
-   * Return a name for the ConfigSet for a core
-   * @param cd the core's CoreDescriptor
-   * @return a name for the core's ConfigSet
-   */
-  public abstract String configName(CoreDescriptor cd);
-
-  /**
-   * The default ConfigSetService.
-   *
-   * Loads a ConfigSet defined by the core's configSet property,
-   * looking for a directory named for the configSet property value underneath
-   * a base directory.  If no configSet property is set, loads the ConfigSet
-   * instead from the core's instance directory.
-   */
-  public static class Default extends ConfigSetService {
-
-    private final Path configSetBase;
-
-    /**
-     * Create a new ConfigSetService.Default
-     * @param loader the CoreContainer's resource loader
-     * @param configSetBase the base directory under which to look for config set directories
-     */
-    public Default(SolrResourceLoader loader, Path configSetBase) {
-      super(loader);
-      this.configSetBase = configSetBase;
-    }
-
-    @Override
-    public SolrResourceLoader createCoreResourceLoader(CoreDescriptor cd) {
-      Path instanceDir = locateInstanceDir(cd);
-      return new SolrResourceLoader(instanceDir, parentLoader.getClassLoader(), cd.getSubstitutableProperties());
-    }
-
-    @Override
-    public String configName(CoreDescriptor cd) {
-      return (cd.getConfigSet() == null ? "instancedir " : "configset ") + locateInstanceDir(cd);
-    }
-
-    protected Path locateInstanceDir(CoreDescriptor cd) {
-      String configSet = cd.getConfigSet();
-      if (configSet == null)
-        return cd.getInstanceDir();
-      Path configSetDirectory = configSetBase.resolve(configSet);
-      if (!Files.isDirectory(configSetDirectory))
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not load configuration from directory " + configSetDirectory);
-      return configSetDirectory;
-    }
-
-  }
-
-  /**
-   * A ConfigSetService that shares schema objects between cores
-   */
-  public static class SchemaCaching extends Default {
-
-    private final Cache<String, IndexSchema> schemaCache = CacheBuilder.newBuilder().build();
-
-    public SchemaCaching(SolrResourceLoader loader, Path configSetBase) {
-      super(loader, configSetBase);
-    }
-
-    public static String cacheName(Path schemaFile) throws IOException {
-      long lastModified = Files.getLastModifiedTime(schemaFile).toMillis();
-      return String.format(Locale.ROOT, "%s:%s",
-                            schemaFile.toString(), Instant.ofEpochMilli(lastModified).toString());
-    }
-
-    @Override
-    public IndexSchema createIndexSchema(final CoreDescriptor cd, final SolrConfig solrConfig) {
-      final String resourceNameToBeUsed = IndexSchemaFactory.getResourceNameToBeUsed(cd.getSchemaName(), solrConfig);
-      Path schemaFile = Paths.get(solrConfig.getResourceLoader().getConfigDir()).resolve(resourceNameToBeUsed);
-      if (Files.exists(schemaFile)) {
-        try {
-          String cachedName = cacheName(schemaFile);
-          return schemaCache.get(cachedName, () -> {
-            log.info("Creating new index schema for core {}", cd.getName());
-            return IndexSchemaFactory.buildIndexSchema(cd.getSchemaName(), solrConfig);
-          });
-        } catch (ExecutionException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Error creating index schema for core " + cd.getName(), e);
-        } catch (IOException e) {
-          log.warn("Couldn't get last modified time for schema file {}: {}", schemaFile, e.getMessage());
-          log.warn("Will not use schema cache");
-        }
-      }
-      return IndexSchemaFactory.buildIndexSchema(cd.getSchemaName(), solrConfig);
-    }
-  }
-
-}


[03/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java b/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
deleted file mode 100644
index 144d1b1..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
+++ /dev/null
@@ -1,863 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.FacetParams;
-import org.apache.solr.common.params.GroupParams;
-import org.apache.solr.common.params.RequiredSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.schema.CurrencyFieldType;
-import org.apache.solr.schema.CurrencyValue;
-import org.apache.solr.schema.DatePointField;
-import org.apache.solr.schema.DateRangeField;
-import org.apache.solr.schema.ExchangeRateProvider;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.schema.TrieDateField;
-import org.apache.solr.schema.TrieField;
-import org.apache.solr.util.DateMathParser;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Encapsulates a single facet.range request along with all its parameters. This class
- * calculates all the ranges (gaps) required to be counted.
- */
-public class RangeFacetRequest extends FacetComponent.FacetBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected final SchemaField schemaField;
-  protected final String start;
-  protected final String end;
-  protected final String gap;
-  protected final boolean hardEnd;
-  protected final EnumSet<FacetParams.FacetRangeInclude> include;
-  protected final EnumSet<FacetParams.FacetRangeOther> others;
-  protected final FacetParams.FacetRangeMethod method;
-  protected final int minCount;
-  protected final boolean groupFacet;
-  protected final List<FacetRange> facetRanges;
-
-  /**
-   * The computed start value of this range
-   */
-  protected final Object startObj;
-  /**
-   * The computed end value of this range taking into account facet.range.hardend
-   */
-  protected final Object endObj;
-  /**
-   * The computed gap between each range
-   */
-  protected final Object gapObj;
-
-  public RangeFacetRequest(ResponseBuilder rb, String f) {
-    super(rb, FacetParams.FACET_RANGE, f);
-
-    IndexSchema schema = rb.req.getSchema();
-    this.schemaField = schema.getField(facetOn);
-
-    SolrParams params = SolrParams.wrapDefaults(localParams, rb.req.getParams());
-    SolrParams required = new RequiredSolrParams(params);
-
-    String methodStr = params.get(FacetParams.FACET_RANGE_METHOD);
-    FacetParams.FacetRangeMethod method = (methodStr == null ? FacetParams.FacetRangeMethod.getDefault() : FacetParams.FacetRangeMethod.get(methodStr));
-
-    if ((schemaField.getType() instanceof DateRangeField) && method.equals(FacetParams.FacetRangeMethod.DV)) {
-      // the user has explicitly selected the FacetRangeMethod.DV method
-      log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported together with field type '" +
-          DateRangeField.class + "'. Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
-      method = FacetParams.FacetRangeMethod.FILTER;
-    }
-    if (method.equals(FacetParams.FacetRangeMethod.DV) && !schemaField.hasDocValues() && (schemaField.getType().isPointField())) {
-      log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported on PointFields without docValues." +
-          "Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
-      method = FacetParams.FacetRangeMethod.FILTER;
-    }
-
-    this.start = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_START);
-    this.end = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_END);
-
-
-    this.gap = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_GAP);
-    this.minCount = params.getFieldInt(facetOn, FacetParams.FACET_MINCOUNT, 0);
-
-    this.include = FacetParams.FacetRangeInclude.parseParam
-        (params.getFieldParams(facetOn, FacetParams.FACET_RANGE_INCLUDE));
-
-    this.hardEnd = params.getFieldBool(facetOn, FacetParams.FACET_RANGE_HARD_END, false);
-
-    this.others = EnumSet.noneOf(FacetParams.FacetRangeOther.class);
-    final String[] othersP = params.getFieldParams(facetOn, FacetParams.FACET_RANGE_OTHER);
-    if (othersP != null && othersP.length > 0) {
-      for (final String o : othersP) {
-        others.add(FacetParams.FacetRangeOther.get(o));
-      }
-    }
-
-    this.groupFacet = params.getBool(GroupParams.GROUP_FACET, false);
-    if (groupFacet && method.equals(FacetParams.FacetRangeMethod.DV)) {
-      // the user has explicitly selected the FacetRangeMethod.DV method
-      log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported together with '" +
-          GroupParams.GROUP_FACET + "'. Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
-      method = FacetParams.FacetRangeMethod.FILTER;
-    }
-
-    this.method = method;
-
-    RangeEndpointCalculator<? extends Comparable<?>> calculator = createCalculator();
-    this.facetRanges = calculator.computeRanges();
-    this.gapObj = calculator.getGap();
-    this.startObj = calculator.getStart();
-    this.endObj = calculator.getComputedEnd();
-  }
-
-  /**
-   * Creates the right instance of {@link org.apache.solr.handler.component.RangeFacetRequest.RangeEndpointCalculator}
-   * depending on the field type of the schema field
-   */
-  private RangeEndpointCalculator<? extends Comparable<?>> createCalculator() {
-    RangeEndpointCalculator<?> calc;
-    FieldType ft = schemaField.getType();
-
-    if (ft instanceof TrieField) {
-      switch (ft.getNumberType()) {
-        case FLOAT:
-          calc = new FloatRangeEndpointCalculator(this);
-          break;
-        case DOUBLE:
-          calc = new DoubleRangeEndpointCalculator(this);
-          break;
-        case INTEGER:
-          calc = new IntegerRangeEndpointCalculator(this);
-          break;
-        case LONG:
-          calc = new LongRangeEndpointCalculator(this);
-          break;
-        case DATE:
-          calc = new DateRangeEndpointCalculator(this, null);
-          break;
-        default:
-          throw new SolrException
-              (SolrException.ErrorCode.BAD_REQUEST,
-                  "Unable to range facet on Trie field of unexpected type:" + this.facetOn);
-      }
-    } else if (ft instanceof DateRangeField) {
-      calc = new DateRangeEndpointCalculator(this, null);
-    } else if (ft.isPointField()) {
-      switch (ft.getNumberType()) {
-        case FLOAT:
-          calc = new FloatRangeEndpointCalculator(this);
-          break;
-        case DOUBLE:
-          calc = new DoubleRangeEndpointCalculator(this);
-          break;
-        case INTEGER:
-          calc = new IntegerRangeEndpointCalculator(this);
-          break;
-        case LONG:
-          calc = new LongRangeEndpointCalculator(this);
-          break;
-        case DATE:
-          calc = new DateRangeEndpointCalculator(this, null);
-          break;
-        default:
-          throw new SolrException
-              (SolrException.ErrorCode.BAD_REQUEST,
-                  "Unable to range facet on Point field of unexpected type:" + this.facetOn);
-      }
-    } else if (ft instanceof CurrencyFieldType) {
-      calc = new CurrencyRangeEndpointCalculator(this);
-    } else {
-      throw new SolrException
-          (SolrException.ErrorCode.BAD_REQUEST,
-              "Unable to range facet on field:" + schemaField);
-    }
-
-    return calc;
-  }
-
-  /**
-   * @return the start of this range as specified by {@link FacetParams#FACET_RANGE_START} parameter
-   */
-  public String getStart() {
-    return start;
-  }
-
-  /**
-   * The end of this facet.range as specified by {@link FacetParams#FACET_RANGE_END} parameter
-   * <p>
-   * Note that the actual computed end value can be different depending on the
-   * {@link FacetParams#FACET_RANGE_HARD_END} parameter. See {@link #endObj}
-   */
-  public String getEnd() {
-    return end;
-  }
-
-  /**
-   * @return an {@link EnumSet} containing all the values specified via
-   * {@link FacetParams#FACET_RANGE_INCLUDE} parameter. Defaults to
-   * {@link org.apache.solr.common.params.FacetParams.FacetRangeInclude#LOWER} if no parameter
-   * is supplied. Includes all values from {@link org.apache.solr.common.params.FacetParams.FacetRangeInclude} enum
-   * if {@link FacetParams#FACET_RANGE_INCLUDE} includes
-   * {@link org.apache.solr.common.params.FacetParams.FacetRangeInclude#ALL}
-   */
-  public EnumSet<FacetParams.FacetRangeInclude> getInclude() {
-    return include;
-  }
-
-  /**
-   * @return the gap as specified by {@link FacetParams#FACET_RANGE_GAP} parameter
-   */
-  public String getGap() {
-    return gap;
-  }
-
-  /**
-   * @return the computed gap object
-   */
-  public Object getGapObj() {
-    return gapObj;
-  }
-
-  /**
-   * @return the boolean value of {@link FacetParams#FACET_RANGE_HARD_END} parameter
-   */
-  public boolean isHardEnd() {
-    return hardEnd;
-  }
-
-  /**
-   * @return an {@link EnumSet} of {@link org.apache.solr.common.params.FacetParams.FacetRangeOther} values
-   * specified by {@link FacetParams#FACET_RANGE_OTHER} parameter
-   */
-  public EnumSet<FacetParams.FacetRangeOther> getOthers() {
-    return others;
-  }
-
-  /**
-   * @return the {@link org.apache.solr.common.params.FacetParams.FacetRangeMethod} to be used for computing
-   * ranges determined either by the value of {@link FacetParams#FACET_RANGE_METHOD} parameter
-   * or other internal constraints.
-   */
-  public FacetParams.FacetRangeMethod getMethod() {
-    return method;
-  }
-
-  /**
-   * @return the minimum allowed count for facet ranges as specified by {@link FacetParams#FACET_MINCOUNT}
-   */
-  public int getMinCount() {
-    return minCount;
-  }
-
-  /**
-   * @return the {@link SchemaField} instance representing the field on which ranges have to be calculated
-   */
-  public SchemaField getSchemaField() {
-    return schemaField;
-  }
-
-  /**
-   * @return the boolean value specified by {@link GroupParams#GROUP_FACET} parameter
-   */
-  public boolean isGroupFacet() {
-    return groupFacet;
-  }
-
-  /**
-   * @return a {@link List} of {@link org.apache.solr.handler.component.RangeFacetRequest.FacetRange} objects
-   * representing the ranges (gaps) for which range counts are to be calculated.
-   */
-  public List<FacetRange> getFacetRanges() {
-    return facetRanges;
-  }
-
-  /**
-   * @return The computed start value of this range
-   */
-  public Object getStartObj() {
-    return startObj;
-  }
-
-  /**
-   * The end of this facet.range as calculated using the value of facet.range.end
-   * parameter and facet.range.hardend. This can be different from the
-   * value specified in facet.range.end if facet.range.hardend=true
-   */
-  public Object getEndObj() {
-    return endObj;
-  }
-
-  /**
-   * Represents a range facet response combined from all shards.
-   * Provides helper methods to merge facet_ranges response from a shard.
-   * See {@link #mergeFacetRangesFromShardResponse(LinkedHashMap, SimpleOrderedMap)}
-   * and {@link #mergeContributionFromShard(SimpleOrderedMap)}
-   */
-  static class DistribRangeFacet {
-    public SimpleOrderedMap<Object> rangeFacet;
-
-    public DistribRangeFacet(SimpleOrderedMap<Object> rangeFacet) {
-      this.rangeFacet = rangeFacet;
-    }
-
-    /**
-     * Helper method to merge range facet values from a shard's response to already accumulated
-     * values for each range.
-     *
-     * @param rangeCounts a {@link LinkedHashMap} containing the accumulated values for each range
-     *                    keyed by the 'key' of the facet.range. Must not be null.
-     * @param shardRanges the facet_ranges response from a shard. Must not be null.
-     */
-    public static void mergeFacetRangesFromShardResponse(LinkedHashMap<String, DistribRangeFacet> rangeCounts,
-                                                         SimpleOrderedMap<SimpleOrderedMap<Object>> shardRanges) {
-      assert shardRanges != null;
-      assert rangeCounts != null;
-      for (Map.Entry<String, SimpleOrderedMap<Object>> entry : shardRanges) {
-        String rangeKey = entry.getKey();
-
-        RangeFacetRequest.DistribRangeFacet existing = rangeCounts.get(rangeKey);
-        if (existing == null) {
-          rangeCounts.put(rangeKey, new RangeFacetRequest.DistribRangeFacet(entry.getValue()));
-        } else {
-          existing.mergeContributionFromShard(entry.getValue());
-        }
-      }
-    }
-
-    /**
-     * Accumulates an individual facet_ranges count from a shard into global counts.
-     * <p>
-     * The implementation below uses the first encountered shard's
-     * facet_ranges as the basis for subsequent shards' data to be merged.
-     *
-     * @param rangeFromShard the facet_ranges response from a shard
-     */
-    public void mergeContributionFromShard(SimpleOrderedMap<Object> rangeFromShard) {
-      if (rangeFacet == null) {
-        rangeFacet = rangeFromShard;
-        return;
-      }
-
-      @SuppressWarnings("unchecked")
-      NamedList<Integer> shardFieldValues
-          = (NamedList<Integer>) rangeFromShard.get("counts");
-
-      @SuppressWarnings("unchecked")
-      NamedList<Integer> existFieldValues
-          = (NamedList<Integer>) rangeFacet.get("counts");
-
-      for (Map.Entry<String, Integer> existPair : existFieldValues) {
-        final String key = existPair.getKey();
-        // can be null if inconsistencies in shards responses
-        Integer newValue = shardFieldValues.get(key);
-        if (null != newValue) {
-          Integer oldValue = existPair.getValue();
-          existPair.setValue(oldValue + newValue);
-        }
-      }
-
-      // merge facet.other=before/between/after/all if they exist
-      for (FacetParams.FacetRangeOther otherKey : FacetParams.FacetRangeOther.values()) {
-        if (otherKey == FacetParams.FacetRangeOther.NONE) continue;
-
-        String name = otherKey.toString();
-        Integer shardValue = (Integer) rangeFromShard.get(name);
-        if (shardValue != null && shardValue > 0) {
-          Integer existingValue = (Integer) rangeFacet.get(name);
-          // shouldn't be null
-          int idx = rangeFacet.indexOf(name, 0);
-          rangeFacet.setVal(idx, existingValue + shardValue);
-        }
-      }
-    }
-
-    /**
-     * Removes all counts under the given minCount from the accumulated facet_ranges.
-     * <p>
-     * Note: this method should only be called after all shard responses have been
-     * accumulated using {@link #mergeContributionFromShard(SimpleOrderedMap)}
-     *
-     * @param minCount the minimum allowed count for any range
-     */
-    public void removeRangeFacetsUnderLimits(int minCount) {
-      boolean replace = false;
-
-      @SuppressWarnings("unchecked")
-      NamedList<Number> vals = (NamedList<Number>) rangeFacet.get("counts");
-      NamedList<Number> newList = new NamedList<>();
-      for (Map.Entry<String, Number> pair : vals) {
-        if (pair.getValue().longValue() >= minCount) {
-          newList.add(pair.getKey(), pair.getValue());
-        } else {
-          replace = true;
-        }
-      }
-      if (replace) {
-        vals.clear();
-        vals.addAll(newList);
-      }
-    }
-  }
-
-  /**
-   * Perhaps someday instead of having a giant "instanceof" case
-   * statement to pick an impl, we can add a "RangeFacetable" marker
-   * interface to FieldTypes and they can return instances of these
-   * directly from some method -- but until then, keep this locked down
-   * and private.
-   */
-  private static abstract class RangeEndpointCalculator<T extends Comparable<T>> {
-    protected final RangeFacetRequest rfr;
-    protected final SchemaField field;
-
-    /**
-     * The end of the facet.range as determined by this calculator.
-     * This can be different from the facet.range.end depending on the
-     * facet.range.hardend parameter
-     */
-    protected T computedEnd;
-
-    protected T start;
-
-    protected Object gap;
-
-    protected boolean computed = false;
-
-    public RangeEndpointCalculator(RangeFacetRequest rfr) {
-      this.rfr = rfr;
-      this.field = rfr.getSchemaField();
-    }
-
-    /** The Computed End point of all ranges, as an Object of type suitable for direct inclusion in the response data */
-    public Object getComputedEnd() {
-      assert computed;
-      return computedEnd;
-    }
-
-    /** The Start point of all ranges, as an Object of type suitable for direct inclusion in the response data */
-    public Object getStart() {
-      assert computed;
-      return start;
-    }
-
-    /**
-     * @return the parsed value of {@link FacetParams#FACET_RANGE_GAP} parameter. This type
-     * of the returned object is the boxed type of the schema field type's primitive counterpart
-     * except in the case of Dates in which case the returned type is just a string (because in
-     * case of dates the gap can either be a date or a DateMath string).
-     */
-    public Object getGap() {
-      assert computed;
-      return gap;
-    }
-
-    /**
-     * Formats a Range endpoint for use as a range label name in the response.
-     * Default Impl just uses toString()
-     */
-    public String formatValue(final T val) {
-      return val.toString();
-    }
-
-    /**
-     * Parses a String param into an Range endpoint value throwing
-     * a useful exception if not possible
-     */
-    public final T getValue(final String rawval) {
-      try {
-        return parseVal(rawval);
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Can't parse value " + rawval + " for field: " +
-                field.getName(), e);
-      }
-    }
-
-    /**
-     * Parses a String param into an Range endpoint.
-     * Can throw a low level format exception as needed.
-     */
-    protected abstract T parseVal(final String rawval)
-        throws java.text.ParseException;
-
-    /**
-     * Parses a String param into a value that represents the gap and
-     * can be included in the response, throwing
-     * a useful exception if not possible.
-     * <p>
-     * Note: uses Object as the return type instead of T for things like
-     * Date where gap is just a DateMathParser string
-     */
-    protected final Object getGap(final String gap) {
-      try {
-        return parseGap(gap);
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Can't parse gap " + gap + " for field: " +
-                field.getName(), e);
-      }
-    }
-
-    /**
-     * Parses a String param into a value that represents the gap and
-     * can be included in the response.
-     * Can throw a low level format exception as needed.
-     * <p>
-     * Default Impl calls parseVal
-     */
-    protected Object parseGap(final String rawval)
-        throws java.text.ParseException {
-      return parseVal(rawval);
-    }
-
-    /**
-     * Adds the String gap param to a low Range endpoint value to determine
-     * the corresponding high Range endpoint value, throwing
-     * a useful exception if not possible.
-     */
-    public final T addGap(T value, String gap) {
-      try {
-        return parseAndAddGap(value, gap);
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Can't add gap " + gap + " to value " + value +
-                " for field: " + field.getName(), e);
-      }
-    }
-
-    /**
-     * Adds the String gap param to a low Range endpoint value to determine
-     * the corrisponding high Range endpoint value.
-     * Can throw a low level format exception as needed.
-     */
-    protected abstract T parseAndAddGap(T value, String gap)
-        throws java.text.ParseException;
-
-    public List<FacetRange> computeRanges() {
-      List<FacetRange> ranges = new ArrayList<>();
-
-      this.gap = getGap(rfr.getGap());
-      this.start = getValue(rfr.getStart());
-      // not final, hardend may change this
-      T end = getValue(rfr.getEnd());
-      if (end.compareTo(start) < 0) {
-        throw new SolrException
-            (SolrException.ErrorCode.BAD_REQUEST,
-                "range facet 'end' comes before 'start': " + end + " < " + start);
-      }
-
-      final EnumSet<FacetParams.FacetRangeInclude> include = rfr.getInclude();
-
-      T low = start;
-
-      while (low.compareTo(end) < 0) {
-        T high = addGap(low, rfr.getGap());
-        if (end.compareTo(high) < 0) {
-          if (rfr.isHardEnd()) {
-            high = end;
-          } else {
-            end = high;
-          }
-        }
-        if (high.compareTo(low) < 0) {
-          throw new SolrException
-              (SolrException.ErrorCode.BAD_REQUEST,
-                  "range facet infinite loop (is gap negative? did the math overflow?)");
-        }
-        if (high.compareTo(low) == 0) {
-          throw new SolrException
-              (SolrException.ErrorCode.BAD_REQUEST,
-                  "range facet infinite loop: gap is either zero, or too small relative start/end and caused underflow: " + low + " + " + rfr.getGap() + " = " + high);
-        }
-
-        final boolean includeLower =
-            (include.contains(FacetParams.FacetRangeInclude.LOWER) ||
-                (include.contains(FacetParams.FacetRangeInclude.EDGE) &&
-                    0 == low.compareTo(start)));
-        final boolean includeUpper =
-            (include.contains(FacetParams.FacetRangeInclude.UPPER) ||
-                (include.contains(FacetParams.FacetRangeInclude.EDGE) &&
-                    0 == high.compareTo(end)));
-
-        final String lowS = formatValue(low);
-        final String highS = formatValue(high);
-
-        ranges.add(new FacetRange(lowS, lowS, highS, includeLower, includeUpper));
-
-        low = high;
-      }
-
-      // we must update the end value in RangeFacetRequest because the end is returned
-      // as a separate element in the range facet response
-      this.computedEnd = end;
-      this.computed = true;
-
-      // no matter what other values are listed, we don't do
-      // anything if "none" is specified.
-      if (!rfr.getOthers().contains(FacetParams.FacetRangeOther.NONE)) {
-
-        boolean all = rfr.getOthers().contains(FacetParams.FacetRangeOther.ALL);
-        final String startS = formatValue(start);
-        final String endS = formatValue(end);
-
-        if (all || rfr.getOthers().contains(FacetParams.FacetRangeOther.BEFORE)) {
-          // include upper bound if "outer" or if first gap doesn't already include it
-          ranges.add(new FacetRange(FacetParams.FacetRangeOther.BEFORE,
-              null, startS, false, include.contains(FacetParams.FacetRangeInclude.OUTER) || include.contains(FacetParams.FacetRangeInclude.ALL) ||
-              !(include.contains(FacetParams.FacetRangeInclude.LOWER) || include.contains(FacetParams.FacetRangeInclude.EDGE))));
-        }
-        if (all || rfr.getOthers().contains(FacetParams.FacetRangeOther.AFTER)) {
-          // include lower bound if "outer" or if last gap doesn't already include it
-          ranges.add(new FacetRange(FacetParams.FacetRangeOther.AFTER,
-              endS, null, include.contains(FacetParams.FacetRangeInclude.OUTER) || include.contains(FacetParams.FacetRangeInclude.ALL) ||
-              !(include.contains(FacetParams.FacetRangeInclude.UPPER) || include.contains(FacetParams.FacetRangeInclude.EDGE)), false));
-        }
-        if (all || rfr.getOthers().contains(FacetParams.FacetRangeOther.BETWEEN)) {
-          ranges.add(new FacetRange(FacetParams.FacetRangeOther.BETWEEN, startS, endS,
-              include.contains(FacetParams.FacetRangeInclude.LOWER) || include.contains(FacetParams.FacetRangeInclude.EDGE) || include.contains(FacetParams.FacetRangeInclude.ALL),
-              include.contains(FacetParams.FacetRangeInclude.UPPER) || include.contains(FacetParams.FacetRangeInclude.EDGE) || include.contains(FacetParams.FacetRangeInclude.ALL)));
-        }
-      }
-
-      return ranges;
-    }
-
-  }
-
-  private static class FloatRangeEndpointCalculator
-      extends RangeEndpointCalculator<Float> {
-
-    public FloatRangeEndpointCalculator(final RangeFacetRequest rangeFacetRequest) {
-      super(rangeFacetRequest);
-    }
-
-    @Override
-    protected Float parseVal(String rawval) {
-      return Float.valueOf(rawval);
-    }
-
-    @Override
-    public Float parseAndAddGap(Float value, String gap) {
-      return value.floatValue() + Float.parseFloat(gap);
-    }
-  }
-
-  private static class DoubleRangeEndpointCalculator
-      extends RangeEndpointCalculator<Double> {
-
-    public DoubleRangeEndpointCalculator(final RangeFacetRequest rangeFacetRequest) {
-      super(rangeFacetRequest);
-    }
-
-    @Override
-    protected Double parseVal(String rawval) {
-      return Double.valueOf(rawval);
-    }
-
-    @Override
-    public Double parseAndAddGap(Double value, String gap) {
-      return value.doubleValue() + Double.parseDouble(gap);
-    }
-  }
-
-  private static class IntegerRangeEndpointCalculator
-      extends RangeEndpointCalculator<Integer> {
-
-    public IntegerRangeEndpointCalculator(final RangeFacetRequest rangeFacetRequest) {
-      super(rangeFacetRequest);
-    }
-
-    @Override
-    protected Integer parseVal(String rawval) {
-      return Integer.valueOf(rawval);
-    }
-
-    @Override
-    public Integer parseAndAddGap(Integer value, String gap) {
-      return value.intValue() + Integer.parseInt(gap);
-    }
-  }
-
-  private static class LongRangeEndpointCalculator
-      extends RangeEndpointCalculator<Long> {
-
-    public LongRangeEndpointCalculator(final RangeFacetRequest rangeFacetRequest) {
-      super(rangeFacetRequest);
-    }
-
-    @Override
-    protected Long parseVal(String rawval) {
-      return Long.valueOf(rawval);
-    }
-
-    @Override
-    public Long parseAndAddGap(Long value, String gap) {
-      return value.longValue() + Long.parseLong(gap);
-    }
-  }
-
-  private static class DateRangeEndpointCalculator
-      extends RangeEndpointCalculator<Date> {
-    private static final String TYPE_ERR_MSG = "SchemaField must use field type extending TrieDateField or DateRangeField";
-    private final Date now;
-
-    public DateRangeEndpointCalculator(final RangeFacetRequest rangeFacetRequest,
-                                       final Date now) {
-      super(rangeFacetRequest);
-      this.now = now;
-      if (!(field.getType() instanceof TrieDateField)
-          && !(field.getType() instanceof DateRangeField)
-          && !(field.getType() instanceof DatePointField)) {
-        throw new IllegalArgumentException(TYPE_ERR_MSG);
-      }
-    }
-
-    @Override
-    public String formatValue(Date val) {
-      return val.toInstant().toString();
-    }
-
-    @Override
-    protected Date parseVal(String rawval) {
-      return DateMathParser.parseMath(now, rawval);
-    }
-
-    @Override
-    protected Object parseGap(final String rawval) {
-      return rawval;
-    }
-
-    @Override
-    public Date parseAndAddGap(Date value, String gap) throws java.text.ParseException {
-      final DateMathParser dmp = new DateMathParser();
-      dmp.setNow(value);
-      return dmp.parseMath(gap);
-    }
-  }
-
-  private static class CurrencyRangeEndpointCalculator
-    extends RangeEndpointCalculator<CurrencyValue> {
-    private String defaultCurrencyCode;
-    private ExchangeRateProvider exchangeRateProvider;
-    public CurrencyRangeEndpointCalculator(final RangeFacetRequest rangeFacetRequest) {
-      super(rangeFacetRequest);
-      if(!(this.field.getType() instanceof CurrencyFieldType)) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                                "Cannot perform range faceting over non CurrencyField fields");
-      }
-      defaultCurrencyCode =
-        ((CurrencyFieldType)this.field.getType()).getDefaultCurrency();
-      exchangeRateProvider =
-        ((CurrencyFieldType)this.field.getType()).getProvider();
-    }
-
-    @Override
-    protected Object parseGap(String rawval) throws java.text.ParseException {
-      return parseVal(rawval).strValue();
-    }
-
-    @Override
-    public String formatValue(CurrencyValue val) {
-      return val.strValue();
-    }
-
-    /** formats the value as a String since {@link CurrencyValue} is not suitable for response writers */
-    @Override
-    public Object getComputedEnd() {
-      assert computed;
-      return formatValue(computedEnd);
-    }
-    
-    /** formats the value as a String since {@link CurrencyValue} is not suitable for response writers */
-    @Override
-    public Object getStart() {
-      assert computed;
-      return formatValue(start);
-    }
-    
-    @Override
-    protected CurrencyValue parseVal(String rawval) {
-      return CurrencyValue.parse(rawval, defaultCurrencyCode);
-    }
-
-    @Override
-    public CurrencyValue parseAndAddGap(CurrencyValue value, String gap) {
-      if(value == null) {
-        throw new NullPointerException("Cannot perform range faceting on null CurrencyValue");
-      }
-      CurrencyValue gapCurrencyValue =
-        CurrencyValue.parse(gap, defaultCurrencyCode);
-      long gapAmount =
-        CurrencyValue.convertAmount(this.exchangeRateProvider,
-                                    gapCurrencyValue.getCurrencyCode(),
-                                    gapCurrencyValue.getAmount(),
-                                    value.getCurrencyCode());
-      return new CurrencyValue(value.getAmount() + gapAmount,
-                               value.getCurrencyCode());
-    }
-  }
-  
-  /**
-   * Represents a single facet range (or gap) for which the count is to be calculated
-   */
-  public static class FacetRange {
-    public final FacetParams.FacetRangeOther other;
-    public final String name;
-    public final String lower;
-    public final String upper;
-    public final boolean includeLower;
-    public final boolean includeUpper;
-
-    private FacetRange(FacetParams.FacetRangeOther other, String name, String lower, String upper, boolean includeLower, boolean includeUpper) {
-      this.other = other;
-      this.name = name;
-      this.lower = lower;
-      this.upper = upper;
-      this.includeLower = includeLower;
-      this.includeUpper = includeUpper;
-    }
-
-    /**
-     * Construct a facet range for a {@link org.apache.solr.common.params.FacetParams.FacetRangeOther} instance
-     */
-    public FacetRange(FacetParams.FacetRangeOther other, String lower, String upper, boolean includeLower, boolean includeUpper) {
-      this(other, other.toString(), lower, upper, includeLower, includeUpper);
-    }
-
-    /**
-     * Construct a facet range for the give name
-     */
-    public FacetRange(String name, String lower, String upper, boolean includeLower, boolean includeUpper) {
-      this(null, name, lower, upper, includeLower, includeUpper);
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
deleted file mode 100644
index 6ca8ad1..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
+++ /dev/null
@@ -1,1268 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreMode;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.StringUtils;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.ResultContext;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.response.transform.DocTransformer;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.ReturnFields;
-import org.apache.solr.search.SolrDocumentFetcher;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SolrReturnFields;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.update.CdcrUpdateLog;
-import org.apache.solr.update.DocumentBuilder;
-import org.apache.solr.update.IndexFingerprint;
-import org.apache.solr.update.PeerSync;
-import org.apache.solr.update.PeerSyncWithLeader;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.TestInjection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-import static org.apache.solr.common.params.CommonParams.ID;
-import static org.apache.solr.common.params.CommonParams.VERSION_FIELD;
-
-public class RealTimeGetComponent extends SearchComponent
-{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final String COMPONENT_NAME = "get";
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    // Set field flags
-    ReturnFields returnFields = new SolrReturnFields( rb.req );
-    rb.rsp.setReturnFields( returnFields );
-  }
-
-
-  @Override
-  public void process(ResponseBuilder rb) throws IOException
-  {
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-    SolrParams params = req.getParams();
-    CloudDescriptor cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
-
-    if (cloudDesc != null) {
-      Replica.Type replicaType = cloudDesc.getReplicaType();
-      if (replicaType != null) {
-        if (replicaType == Replica.Type.PULL) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, 
-              String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", 
-                  cloudDesc.getCoreNodeName(),
-                  Replica.Type.PULL));
-        } 
-        // non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
-      }
-    }
-    
-    if (!params.getBool(COMPONENT_NAME, true)) {
-      return;
-    }
-    
-    // This seems rather kludgey, may there is better way to indicate
-    // that replica can support handling version ranges
-    String val = params.get("checkCanHandleVersionRanges");
-    if(val != null) {
-      rb.rsp.add("canHandleVersionRanges", true);
-      return;
-    }
-    
-    val = params.get("getFingerprint");
-    if(val != null) {
-      processGetFingeprint(rb);
-      return;
-    }
-    
-    val = params.get("getVersions");
-    if (val != null) {
-      processGetVersions(rb);
-      return;
-    }
-
-    val = params.get("getUpdates");
-    if (val != null) {
-      // solrcloud_debug
-      if (log.isDebugEnabled()) {
-        try {
-          RefCounted<SolrIndexSearcher> searchHolder = req.getCore()
-              .getNewestSearcher(false);
-          SolrIndexSearcher searcher = searchHolder.get();
-          try {
-            log.debug(req.getCore()
-                .getCoreContainer().getZkController().getNodeName()
-                + " min count to sync to (from most recent searcher view) "
-                + searcher.count(new MatchAllDocsQuery()));
-          } finally {
-            searchHolder.decref();
-          }
-        } catch (Exception e) {
-          log.debug("Error in solrcloud_debug block", e);
-        }
-      }
-      
-      processGetUpdates(rb);
-      return;
-    }
-    
-    val = params.get("getInputDocument");
-    if (val != null) {
-      processGetInputDocument(rb);
-      return;
-    }
-
-    final IdsRequsted reqIds = IdsRequsted.parseParams(req);
-    
-    if (reqIds.allIds.isEmpty()) {
-      return;
-    }
-
-    // parse any existing filters
-    try {
-      String[] fqs = req.getParams().getParams(CommonParams.FQ);
-      if (fqs!=null && fqs.length!=0) {
-        List<Query> filters = rb.getFilters();
-        // if filters already exists, make a copy instead of modifying the original
-        filters = filters == null ? new ArrayList<Query>(fqs.length) : new ArrayList<>(filters);
-        for (String fq : fqs) {
-          if (fq != null && fq.trim().length()!=0) {
-            QParser fqp = QParser.getParser(fq, req);
-            filters.add(fqp.getQuery());
-          }
-        }
-        if (!filters.isEmpty()) {
-          rb.setFilters( filters );
-        }
-      }
-    } catch (SyntaxError e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
-    }
-
-    final SolrCore core = req.getCore();
-    SchemaField idField = core.getLatestSchema().getUniqueKeyField();
-    FieldType fieldType = idField.getType();
-
-    SolrDocumentList docList = new SolrDocumentList();
-    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-
-    SearcherInfo searcherInfo =  new SearcherInfo(core);
-    
-    // this is initialized & set on the context *after* any searcher (re-)opening
-    ResultContext resultContext = null;
-    final DocTransformer transformer = rsp.getReturnFields().getTransformer();
-
-    // true in any situation where we have to use a realtime searcher rather then returning docs
-    // directly from the UpdateLog
-    final boolean mustUseRealtimeSearcher =
-      // if we have filters, we need to check those against the indexed form of the doc
-      (rb.getFilters() != null)
-      || ((null != transformer) && transformer.needsSolrIndexSearcher());
-
-   try {
-
-
-     BytesRefBuilder idBytes = new BytesRefBuilder();
-     for (String idStr : reqIds.allIds) {
-       fieldType.readableToIndexed(idStr, idBytes);
-       if (ulog != null) {
-         Object o = ulog.lookup(idBytes.get());
-         if (o != null) {
-           // should currently be a List<Oper,Ver,Doc/Id>
-           List entry = (List)o;
-           assert entry.size() >= 3;
-           int oper = (Integer)entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
-           switch (oper) {
-             case UpdateLog.UPDATE_INPLACE: // fall through to ADD
-             case UpdateLog.ADD:
-
-               if (mustUseRealtimeSearcher) {
-                 // close handles to current searchers & result context
-                 searcherInfo.clear();
-                 resultContext = null;
-                 ulog.openRealtimeSearcher();  // force open a new realtime searcher
-                 o = null;  // pretend we never found this record and fall through to use the searcher
-                 break;
-               }
-
-               SolrDocument doc;
-               if (oper == UpdateLog.ADD) {
-                 doc = toSolrDoc((SolrInputDocument)entry.get(entry.size()-1), core.getLatestSchema());
-               } else if (oper == UpdateLog.UPDATE_INPLACE) {
-                 if (ulog instanceof CdcrUpdateLog) {
-                   assert entry.size() == 6;
-                 } else {
-                   assert entry.size() == 5;
-                 }
-                 // For in-place update case, we have obtained the partial document till now. We need to
-                 // resolve it to a full document to be returned to the user.
-                 doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument)entry.get(entry.size()-1), entry, null);
-                 if (doc == null) {
-                   break; // document has been deleted as the resolve was going on
-                 }
-               } else {
-                 throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
-               }
-               if (transformer!=null) {
-                 transformer.transform(doc, -1); // unknown docID
-               }
-              docList.add(doc);
-              break;
-             case UpdateLog.DELETE:
-              break;
-             default:
-               throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,  "Unknown Operation! " + oper);
-           }
-           if (o != null) continue;
-         }
-       }
-
-       // didn't find it in the update log, so it should be in the newest searcher opened
-       searcherInfo.init();
-       // don't bother with ResultContext yet, we won't need it if doc doesn't match filters
-
-       int docid = -1;
-       long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
-       if (segAndId >= 0) {
-         int segid = (int) segAndId;
-         LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
-         docid = segid + ctx.docBase;
-
-         if (rb.getFilters() != null) {
-           for (Query raw : rb.getFilters()) {
-             Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
-             Scorer scorer = searcherInfo.getSearcher().createWeight(q, ScoreMode.COMPLETE_NO_SCORES, 1f).scorer(ctx);
-             if (scorer == null || segid != scorer.iterator().advance(segid)) {
-               // filter doesn't match.
-               docid = -1;
-               break;
-             }
-           }
-         }
-       }
-
-       if (docid < 0) continue;
-       
-       Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
-       SolrDocument doc = toSolrDoc(luceneDocument,  core.getLatestSchema());
-       SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
-       docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
-       if ( null != transformer) {
-         if (null == resultContext) {
-           // either first pass, or we've re-opened searcher - either way now we setContext
-           resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
-           transformer.setContext(resultContext);
-         }
-         transformer.transform(doc, docid);
-       }
-       docList.add(doc);
-     }
-
-   } finally {
-     searcherInfo.clear();
-   }
-
-   addDocListToResponse(rb, docList);
-  }
-  
-  /**
-   * Return the requested SolrInputDocument from the tlog/index. This will
-   * always be a full document, i.e. any partial in-place document will be resolved.
-   */
-  void processGetInputDocument(ResponseBuilder rb) throws IOException {
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-    SolrParams params = req.getParams();
-
-    if (!params.getBool(COMPONENT_NAME, true)) {
-      return;
-    }
-
-    String idStr = params.get("getInputDocument", null);
-    if (idStr == null) return;
-    AtomicLong version = new AtomicLong();
-    SolrInputDocument doc = getInputDocument(req.getCore(), new BytesRef(idStr), version, false, null, true);
-    log.info("getInputDocument called for id="+idStr+", returning: "+doc);
-    rb.rsp.add("inputDocument", doc);
-    rb.rsp.add("version", version.get());
-  }
-
-  /**
-   * A SearcherInfo provides mechanism for obtaining RT searcher, from
-   * a SolrCore, and closing it, while taking care of the RefCounted references.
-   */
-  private static class SearcherInfo {
-    private RefCounted<SolrIndexSearcher> searcherHolder = null;
-    private SolrIndexSearcher searcher = null;
-    final SolrCore core;
-    
-    public SearcherInfo(SolrCore core) {
-      this.core = core;
-    }
-    
-    void clear(){
-      if (searcherHolder != null) {
-        // close handles to current searchers
-        searcher = null;
-        searcherHolder.decref();
-        searcherHolder = null;
-      }
-    }
-
-    void init(){
-      if (searcher == null) {
-        searcherHolder = core.getRealtimeSearcher();
-        searcher = searcherHolder.get();
-      }
-    }
-    
-    public SolrIndexSearcher getSearcher() {
-      assert null != searcher : "init not called!";
-      return searcher;
-    }
-  }
-
-  /***
-   * Given a partial document obtained from the transaction log (e.g. as a result of RTG), resolve to a full document
-   * by populating all the partial updates that were applied on top of that last full document update.
-   * 
-   * @param onlyTheseFields When a non-null set of field names is passed in, the resolve process only attempts to populate
-   *        the given fields in this set. When this set is null, it resolves all fields.
-   * @return Returns the merged document, i.e. the resolved full document, or null if the document was not found (deleted
-   *          after the resolving began)
-   */
-  private static SolrDocument resolveFullDocument(SolrCore core, BytesRef idBytes,
-                                           ReturnFields returnFields, SolrInputDocument partialDoc, List logEntry, Set<String> onlyTheseFields) throws IOException {
-    if (idBytes == null || (logEntry.size() != 5 && logEntry.size() != 6)) {
-      throw new SolrException(ErrorCode.INVALID_STATE, "Either Id field not present in partial document or log entry doesn't have previous version.");
-    }
-    long prevPointer = (long) logEntry.get(UpdateLog.PREV_POINTER_IDX);
-    long prevVersion = (long) logEntry.get(UpdateLog.PREV_VERSION_IDX);
-
-    // get the last full document from ulog
-    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-    long lastPrevPointer = ulog.applyPartialUpdates(idBytes, prevPointer, prevVersion, onlyTheseFields, partialDoc);
-
-    if (lastPrevPointer == -1) { // full document was not found in tlog, but exists in index
-      SolrDocument mergedDoc = mergePartialDocWithFullDocFromIndex(core, idBytes, returnFields, onlyTheseFields, partialDoc);
-      return mergedDoc;
-    } else if (lastPrevPointer > 0) {
-      // We were supposed to have found the last full doc also in the tlogs, but the prevPointer links led to nowhere
-      // We should reopen a new RT searcher and get the doc. This should be a rare occurrence
-      Term idTerm = new Term(core.getLatestSchema().getUniqueKeyField().getName(), idBytes);
-      SolrDocument mergedDoc = reopenRealtimeSearcherAndGet(core, idTerm, returnFields);
-      if (mergedDoc == null) {
-        return null; // the document may have been deleted as the resolving was going on.
-      }
-      return mergedDoc;
-    } else { // i.e. lastPrevPointer==0
-      assert lastPrevPointer == 0;
-      // We have successfully resolved the document based off the tlogs
-      return toSolrDoc(partialDoc, core.getLatestSchema());
-    }
-  }
-
-  /**
-   * Re-open the RT searcher and get the document, referred to by the idTerm, from that searcher. 
-   * @return Returns the document or null if not found.
-   */
-  private static SolrDocument reopenRealtimeSearcherAndGet(SolrCore core, Term idTerm, ReturnFields returnFields) throws IOException {
-    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-    ulog.openRealtimeSearcher();
-    RefCounted<SolrIndexSearcher> searcherHolder = core.getRealtimeSearcher();
-    try {
-      SolrIndexSearcher searcher = searcherHolder.get();
-
-      int docid = searcher.getFirstMatch(idTerm);
-      if (docid < 0) {
-        return null;
-      }
-      Document luceneDocument = searcher.doc(docid, returnFields.getLuceneFieldNames());
-      SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
-      SolrDocumentFetcher docFetcher = searcher.getDocFetcher();
-      docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(false));
-
-      return doc;
-    } finally {
-      searcherHolder.decref();
-    }
-  }
-
-  /**
-   * Gets a document from the index by id. If a non-null partial document (for in-place update) is passed in,
-   * this method obtains the document from the tlog/index by the given id, merges the partial document on top of it and then returns
-   * the resultant document.
-   *
-   * @param core           A SolrCore instance, useful for obtaining a realtimesearcher and the schema
-   * @param idBytes        Binary representation of the value of the unique key field
-   * @param returnFields   Return fields, as requested
-   * @param onlyTheseFields When a non-null set of field names is passed in, the merge process only attempts to merge
-   *        the given fields in this set. When this set is null, it merges all fields.
-   * @param partialDoc     A partial document (containing an in-place update) used for merging against a full document
-   *                       from index; this maybe be null.
-   * @return If partial document is null, this returns document from the index or null if not found. 
-   *         If partial document is not null, this returns a document from index merged with the partial document, or null if
-   *         document doesn't exist in the index.
-   */
-  private static SolrDocument mergePartialDocWithFullDocFromIndex(SolrCore core, BytesRef idBytes, ReturnFields returnFields,
-             Set<String> onlyTheseFields, SolrInputDocument partialDoc) throws IOException {
-    RefCounted<SolrIndexSearcher> searcherHolder = core.getRealtimeSearcher(); //Searcher();
-    try {
-      // now fetch last document from index, and merge partialDoc on top of it
-      SolrIndexSearcher searcher = searcherHolder.get();
-      SchemaField idField = core.getLatestSchema().getUniqueKeyField();
-      Term idTerm = new Term(idField.getName(), idBytes);
-
-      int docid = searcher.getFirstMatch(idTerm);
-      if (docid < 0) {
-        // The document was not found in index! Reopen a new RT searcher (to be sure) and get again.
-        // This should be because the document was deleted recently.
-        SolrDocument doc = reopenRealtimeSearcherAndGet(core, idTerm, returnFields);
-        if (doc == null) {
-          // Unable to resolve the last full doc in tlog fully,
-          // and document not found in index even after opening new rt searcher.
-          // This must be a case of deleted doc
-          return null;
-        }
-        return doc;
-      }
-
-      SolrDocument doc;
-      Set<String> decorateFields = onlyTheseFields == null ? searcher.getDocFetcher().getNonStoredDVs(false): onlyTheseFields;
-      Document luceneDocument = searcher.doc(docid, returnFields.getLuceneFieldNames());
-      doc = toSolrDoc(luceneDocument, core.getLatestSchema());
-      searcher.getDocFetcher().decorateDocValueFields(doc, docid, decorateFields);
-
-      long docVersion = (long) doc.getFirstValue(VERSION_FIELD);
-      Object partialVersionObj = partialDoc.getFieldValue(VERSION_FIELD);
-      long partialDocVersion = partialVersionObj instanceof Field? ((Field) partialVersionObj).numericValue().longValue():
-        partialVersionObj instanceof Number? ((Number) partialVersionObj).longValue(): Long.parseLong(partialVersionObj.toString());
-      if (docVersion > partialDocVersion) {
-        return doc;
-      }
-      for (String fieldName: partialDoc.getFieldNames()) {
-        doc.setField(fieldName.toString(), partialDoc.getFieldValue(fieldName));  // since partial doc will only contain single valued fields, this is fine
-      }
-
-      return doc;
-    } finally {
-      if (searcherHolder != null) {
-        searcherHolder.decref();
-      }
-    }
-  }
-
-  public static SolrInputDocument DELETED = new SolrInputDocument();
-
-  /** returns the SolrInputDocument from the current tlog, or DELETED if it has been deleted, or
-   * null if there is no record of it in the current update log.  If null is returned, it could
-   * still be in the latest index.
-   * @param versionReturned If a non-null AtomicLong is passed in, it is set to the version of the update returned from the TLog.
-   * @param resolveFullDocument In case the document is fetched from the tlog, it could only be a partial document if the last update
-   *                  was an in-place update. In that case, should this partial document be resolved to a full document (by following
-   *                  back prevPointer/prevVersion)?
-   */
-  public static SolrInputDocument getInputDocumentFromTlog(SolrCore core, BytesRef idBytes, AtomicLong versionReturned,
-      Set<String> onlyTheseNonStoredDVs, boolean resolveFullDocument) {
-
-    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
-
-    if (ulog != null) {
-      Object o = ulog.lookup(idBytes);
-      if (o != null) {
-        // should currently be a List<Oper,Ver,Doc/Id>
-        List entry = (List)o;
-        assert entry.size() >= 3;
-        int oper = (Integer)entry.get(0) & UpdateLog.OPERATION_MASK;
-        if (versionReturned != null) {
-          versionReturned.set((long)entry.get(UpdateLog.VERSION_IDX));
-        }
-        switch (oper) {
-          case UpdateLog.UPDATE_INPLACE:
-            if (ulog instanceof CdcrUpdateLog) {
-              assert entry.size() == 6;
-            } else {
-              assert entry.size() == 5;
-            }
-
-            if (resolveFullDocument) {
-              SolrInputDocument doc = (SolrInputDocument)entry.get(entry.size()-1);
-              try {
-                // For in-place update case, we have obtained the partial document till now. We need to
-                // resolve it to a full document to be returned to the user.
-                SolrDocument sdoc = resolveFullDocument(core, idBytes, new SolrReturnFields(), doc, entry, onlyTheseNonStoredDVs);
-                if (sdoc == null) {
-                  return DELETED;
-                }
-                doc = toSolrInputDocument(sdoc, core.getLatestSchema());
-                return doc;
-              } catch (IOException ex) {
-                throw new SolrException(ErrorCode.SERVER_ERROR, "Error while resolving full document. ", ex);
-              }
-            } else {
-              // fall through to ADD, so as to get only the partial document
-            }
-          case UpdateLog.ADD:
-            return (SolrInputDocument) entry.get(entry.size()-1);
-          case UpdateLog.DELETE:
-            return DELETED;
-          default:
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,  "Unknown Operation! " + oper);
-        }
-      }
-    }
-
-    return null;
-  }
-
-  /**
-   * Obtains the latest document for a given id from the tlog or index (if not found in the tlog).
-   * 
-   * NOTE: This method uses the effective value for avoidRetrievingStoredFields param as false and
-   * for nonStoredDVs as null in the call to @see {@link RealTimeGetComponent#getInputDocument(SolrCore, BytesRef, AtomicLong, boolean, Set, boolean)},
-   * so as to retrieve all stored and non-stored DV fields from all documents. Also, it uses the effective value of
-   * resolveFullDocument param as true, i.e. it resolves any partial documents (in-place updates), in case the 
-   * document is fetched from the tlog, to a full document.
-   */
-  public static SolrInputDocument getInputDocument(SolrCore core, BytesRef idBytes) throws IOException {
-    return getInputDocument (core, idBytes, null, false, null, true);
-  }
-  
-  /**
-   * Obtains the latest document for a given id from the tlog or through the realtime searcher (if not found in the tlog). 
-   * @param versionReturned If a non-null AtomicLong is passed in, it is set to the version of the update returned from the TLog.
-   * @param avoidRetrievingStoredFields Setting this to true avoids fetching stored fields through the realtime searcher,
-   *                  however has no effect on documents obtained from the tlog. 
-   *                  Non-stored docValues fields are populated anyway, and are not affected by this parameter. Note that if
-   *                  the id field is a stored field, it will not be populated if this parameter is true and the document is
-   *                  obtained from the index.
-   * @param onlyTheseNonStoredDVs If not-null, populate only these DV fields in the document fetched through the realtime searcher. 
-   *                  If this is null, decorate all non-stored  DVs (that are not targets of copy fields) from the searcher.
-   * @param resolveFullDocument In case the document is fetched from the tlog, it could only be a partial document if the last update
-   *                  was an in-place update. In that case, should this partial document be resolved to a full document (by following
-   *                  back prevPointer/prevVersion)?
-   */
-  public static SolrInputDocument getInputDocument(SolrCore core, BytesRef idBytes, AtomicLong versionReturned, boolean avoidRetrievingStoredFields,
-      Set<String> onlyTheseNonStoredDVs, boolean resolveFullDocument) throws IOException {
-    SolrInputDocument sid = null;
-    RefCounted<SolrIndexSearcher> searcherHolder = null;
-    try {
-      SolrIndexSearcher searcher = null;
-      sid = getInputDocumentFromTlog(core, idBytes, versionReturned, onlyTheseNonStoredDVs, resolveFullDocument);
-      if (sid == DELETED) {
-        return null;
-      }
-
-      if (sid == null) {
-        // didn't find it in the update log, so it should be in the newest searcher opened
-        if (searcher == null) {
-          searcherHolder = core.getRealtimeSearcher();
-          searcher = searcherHolder.get();
-        }
-
-        // SolrCore.verbose("RealTimeGet using searcher ", searcher);
-        SchemaField idField = core.getLatestSchema().getUniqueKeyField();
-
-        int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
-        if (docid < 0) return null;
-
-        SolrDocumentFetcher docFetcher = searcher.getDocFetcher();
-        if (avoidRetrievingStoredFields) {
-          sid = new SolrInputDocument();
-        } else {
-          Document luceneDocument = docFetcher.doc(docid);
-          sid = toSolrInputDocument(luceneDocument, core.getLatestSchema());
-        }
-        if (onlyTheseNonStoredDVs != null) {
-          docFetcher.decorateDocValueFields(sid, docid, onlyTheseNonStoredDVs);
-        } else {
-          docFetcher.decorateDocValueFields(sid, docid, docFetcher.getNonStoredDVsWithoutCopyTargets());
-        }
-      }
-    } finally {
-      if (searcherHolder != null) {
-        searcherHolder.decref();
-      }
-    }
-
-    if (versionReturned != null) {
-      if (sid.containsKey(VERSION_FIELD)) {
-        versionReturned.set((long)sid.getFieldValue(VERSION_FIELD));
-      }
-    }
-    return sid;
-  }
-
-  private static SolrInputDocument toSolrInputDocument(Document doc, IndexSchema schema) {
-    SolrInputDocument out = new SolrInputDocument();
-    for( IndexableField f : doc.getFields() ) {
-      String fname = f.name();
-      SchemaField sf = schema.getFieldOrNull(f.name());
-      Object val = null;
-      if (sf != null) {
-        if ((!sf.hasDocValues() && !sf.stored()) || schema.isCopyFieldTarget(sf)) continue;
-        val = sf.getType().toObject(f);   // object or external string?
-      } else {
-        val = f.stringValue();
-        if (val == null) val = f.numericValue();
-        if (val == null) val = f.binaryValue();
-        if (val == null) val = f;
-      }
-
-      // todo: how to handle targets of copy fields (including polyfield sub-fields)?
-      out.addField(fname, val);
-    }
-    return out;
-  }
-
-  private static SolrInputDocument toSolrInputDocument(SolrDocument doc, IndexSchema schema) {
-    SolrInputDocument out = new SolrInputDocument();
-    for( String fname : doc.getFieldNames() ) {
-      SchemaField sf = schema.getFieldOrNull(fname);
-      if (sf != null) {
-        if ((!sf.hasDocValues() && !sf.stored()) || schema.isCopyFieldTarget(sf)) continue;
-      }
-      for (Object val: doc.getFieldValues(fname)) {
-        if (val instanceof Field) {
-          Field f = (Field) val;
-          if (sf != null) {
-            val = sf.getType().toObject(f);   // object or external string?
-          } else {
-            val = f.stringValue();
-            if (val == null) val = f.numericValue();
-            if (val == null) val = f.binaryValue();
-            if (val == null) val = f;
-          }
-        }
-        out.addField(fname, val);
-      }
-    }
-    return out;
-  }
-
-  private static SolrDocument toSolrDoc(Document doc, IndexSchema schema) {
-    SolrDocument out = new SolrDocument();
-    for( IndexableField f : doc.getFields() ) {
-      // Make sure multivalued fields are represented as lists
-      Object existing = out.get(f.name());
-      if (existing == null) {
-        SchemaField sf = schema.getFieldOrNull(f.name());
-
-        // don't return copyField targets
-        if (sf != null && schema.isCopyFieldTarget(sf)) continue;
-
-        if (sf != null && sf.multiValued()) {
-          List<Object> vals = new ArrayList<>();
-          if (f.fieldType().docValuesType() == DocValuesType.SORTED_NUMERIC) {
-            // SORTED_NUMERICS store sortable bits version of the value, need to retrieve the original
-            vals.add(sf.getType().toObject(f)); // (will materialize by side-effect)
-          } else {
-            vals.add( materialize(f) );
-          }
-          out.setField( f.name(), vals );
-        }
-        else{
-          out.setField( f.name(), materialize(f) );
-        }
-      }
-      else {
-        out.addField( f.name(), materialize(f) );
-      }
-    }
-    return out;
-  }
-
-  /**
-   * Ensure we don't have {@link org.apache.lucene.document.LazyDocument.LazyField} or equivalent.
-   * It can pose problems if the searcher is about to be closed and we haven't fetched a value yet.
-   */
-  private static IndexableField materialize(IndexableField in) {
-    if (in instanceof Field) { // already materialized
-      return in;
-    }
-    return new ClonedField(in);
-  }
-
-  private static class ClonedField extends Field { // TODO Lucene Field has no copy constructor; maybe it should?
-    ClonedField(IndexableField in) {
-      super(in.name(), in.fieldType());
-      this.fieldsData = in.numericValue();
-      if (this.fieldsData == null) {
-        this.fieldsData = in.binaryValue();
-        if (this.fieldsData == null) {
-          this.fieldsData = in.stringValue();
-          if (this.fieldsData == null) {
-            // fallback:
-            assert false : in; // unexpected
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Converts a SolrInputDocument to SolrDocument, using an IndexSchema instance. 
-   * @lucene.experimental
-   */
-  public static SolrDocument toSolrDoc(SolrInputDocument sdoc, IndexSchema schema) {
-    // TODO what about child / nested docs?
-    // TODO: do something more performant than this double conversion
-    Document doc = DocumentBuilder.toDocument(sdoc, schema);
-
-    // copy the stored fields only
-    Document out = new Document();
-    for (IndexableField f : doc.getFields()) {
-      if (f.fieldType().stored()) {
-        out.add(f);
-      } else if (f.fieldType().docValuesType() != DocValuesType.NONE) {
-        SchemaField schemaField = schema.getFieldOrNull(f.name());
-        if (schemaField != null && !schemaField.stored() && schemaField.useDocValuesAsStored()) {
-          out.add(f);
-        }
-      } else {
-        log.debug("Don't know how to handle field {}", f);
-      }
-    }
-
-    return toSolrDoc(out, schema);
-  }
-
-  @Override
-  public int distributedProcess(ResponseBuilder rb) throws IOException {
-    if (rb.stage < ResponseBuilder.STAGE_GET_FIELDS)
-      return ResponseBuilder.STAGE_GET_FIELDS;
-    if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
-      return createSubRequests(rb);
-    }
-    return ResponseBuilder.STAGE_DONE;
-  }
-
-  public int createSubRequests(ResponseBuilder rb) throws IOException {
-    
-    final IdsRequsted reqIds = IdsRequsted.parseParams(rb.req);
-    if (reqIds.allIds.isEmpty()) {
-      return ResponseBuilder.STAGE_DONE;
-    }
-    
-    SolrParams params = rb.req.getParams();
-
-    // TODO: handle collection=...?
-
-    ZkController zkController = rb.req.getCore().getCoreContainer().getZkController();
-
-    // if shards=... then use that
-    if (zkController != null && params.get(ShardParams.SHARDS) == null) {
-      CloudDescriptor cloudDescriptor = rb.req.getCore().getCoreDescriptor().getCloudDescriptor();
-
-      String collection = cloudDescriptor.getCollectionName();
-      ClusterState clusterState = zkController.getClusterState();
-      DocCollection coll = clusterState.getCollection(collection);
-
-
-      Map<String, List<String>> sliceToId = new HashMap<>();
-      for (String id : reqIds.allIds) {
-        Slice slice = coll.getRouter().getTargetSlice(id, null, null, params, coll);
-
-        List<String> idsForShard = sliceToId.get(slice.getName());
-        if (idsForShard == null) {
-          idsForShard = new ArrayList<>(2);
-          sliceToId.put(slice.getName(), idsForShard);
-        }
-        idsForShard.add(id);
-      }
-
-      for (Map.Entry<String,List<String>> entry : sliceToId.entrySet()) {
-        String shard = entry.getKey();
-
-        ShardRequest sreq = createShardRequest(rb, entry.getValue());
-        // sreq.shards = new String[]{shard};    // TODO: would be nice if this would work...
-        sreq.shards = sliceToShards(rb, collection, shard);
-        sreq.actualShards = sreq.shards;
-        
-        rb.addRequest(this, sreq);
-      }      
-    } else {
-      ShardRequest sreq = createShardRequest(rb, reqIds.allIds);
-      sreq.shards = null;  // ALL
-      sreq.actualShards = sreq.shards;
-
-      rb.addRequest(this, sreq);
-    }
-
-    return ResponseBuilder.STAGE_DONE;
-  }
-
-  /**
-   * Helper method for creating a new ShardRequest for the specified ids, based on the params 
-   * specified for the current request.  The new ShardRequest does not yet know anything about 
-   * which shard/slice it will be sent to.
-   */
-  private ShardRequest createShardRequest(final ResponseBuilder rb, final List<String> ids) {
-    final ShardRequest sreq = new ShardRequest();
-    sreq.purpose = 1;
-    sreq.params = new ModifiableSolrParams(rb.req.getParams());
-
-    // TODO: how to avoid hardcoding this and hit the same handler?
-    sreq.params.set(ShardParams.SHARDS_QT,"/get");      
-    sreq.params.set(DISTRIB,false);
-
-    sreq.params.remove(ShardParams.SHARDS);
-    sreq.params.remove(ID);
-    sreq.params.remove("ids");
-    sreq.params.set("ids", StrUtils.join(ids, ','));
-    
-    return sreq;
-  }
-  
-  private String[] sliceToShards(ResponseBuilder rb, String collection, String slice) {
-    String lookup = collection + '_' + slice;  // seems either form may be filled in rb.slices?
-    
-    // We use this since the shard handler already filled in the slice to shards mapping.
-    // A better approach would be to avoid filling out every slice each time, or to cache
-    // the mappings.
-
-    for (int i=0; i<rb.slices.length; i++) {
-      log.info("LOOKUP_SLICE:" + rb.slices[i] + "=" + rb.shards[i]);
-      if (lookup.equals(rb.slices[i]) || slice.equals(rb.slices[i])) {
-        return new String[]{rb.shards[i]};
-      }
-    }
-
-
-    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Can't find shard '" + lookup + "'");
-  }
-
-  /***
-  private void handleRegularResponses(ResponseBuilder rb, ShardRequest sreq) {
-  }
-  ***/
-
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    if (rb.stage != ResponseBuilder.STAGE_GET_FIELDS) {
-      return;
-    }
-    
-    mergeResponses(rb);
-  }
-  
-  private void mergeResponses(ResponseBuilder rb) {
-    SolrDocumentList docList = new SolrDocumentList();
-    
-    for (ShardRequest sreq : rb.finished) {
-      // if shards=shard1,shard2 was used, then  we query both shards for each id and
-      // can get more than one response
-      for (ShardResponse srsp : sreq.responses) {
-        SolrResponse sr = srsp.getSolrResponse();
-        NamedList nl = sr.getResponse();
-        SolrDocumentList subList = (SolrDocumentList)nl.get("response");
-        docList.addAll(subList);
-      }
-    }
-    
-    addDocListToResponse(rb, docList);
-  }
-
-  /**
-   * Encapsulates logic for how a {@link SolrDocumentList} should be added to the response
-   * based on the request params used
-   */
-  private void addDocListToResponse(final ResponseBuilder rb, final SolrDocumentList docList) {
-    assert null != docList;
-    
-    final SolrQueryResponse rsp = rb.rsp;
-    final IdsRequsted reqIds = IdsRequsted.parseParams(rb.req);
-    
-    if (reqIds.useSingleDocResponse) {
-      assert docList.size() <= 1;
-      // if the doc was not found, then use a value of null.
-      rsp.add("doc", docList.size() > 0 ? docList.get(0) : null);
-    } else {
-      docList.setNumFound(docList.size());
-      rsp.addResponse(docList);
-    }
-  }
-
-                                                                                               
-
-  ////////////////////////////////////////////
-  ///  SolrInfoBean
-  ////////////////////////////////////////////
-
-  @Override
-  public String getDescription() {
-    return "query";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-
-  public void processGetFingeprint(ResponseBuilder rb) throws IOException {
-    TestInjection.injectFailIndexFingerprintRequests();
-
-    SolrQueryRequest req = rb.req;
-    SolrParams params = req.getParams();
-
-    long maxVersion = params.getLong("getFingerprint", Long.MAX_VALUE);
-    if (TestInjection.injectWrongIndexFingerprint())  {
-      maxVersion = -1;
-    }
-    IndexFingerprint fingerprint = IndexFingerprint.getFingerprint(req.getCore(), Math.abs(maxVersion));
-    rb.rsp.add("fingerprint", fingerprint);
-  }
-  
-
-  ///////////////////////////////////////////////////////////////////////////////////
-  // Returns last versions added to index
-  ///////////////////////////////////////////////////////////////////////////////////
-
-
-  public void processGetVersions(ResponseBuilder rb) throws IOException
-  {
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-    SolrParams params = req.getParams();
-
-    if (!params.getBool(COMPONENT_NAME, true)) {
-      return;
-    }
-
-    int nVersions = params.getInt("getVersions", -1);
-    if (nVersions == -1) return;
-
-    boolean doFingerprint = params.getBool("fingerprint", false);
-
-    String sync = params.get("sync");
-    if (sync != null) {
-      processSync(rb, nVersions, sync);
-      return;
-    }
-
-    UpdateLog ulog = req.getCore().getUpdateHandler().getUpdateLog();
-    if (ulog == null) return;
-    String syncWithLeader = params.get("syncWithLeader");
-    if (syncWithLeader != null) {
-      List<Long> versions;
-      try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-        versions = recentUpdates.getVersions(nVersions);
-      }
-      processSyncWithLeader(rb, nVersions, syncWithLeader, versions);
-      return;
-    }
-
-    // get fingerprint first as it will cause a soft commit
-    // and would avoid mismatch if documents are being actively index especially during PeerSync
-    if (doFingerprint) {
-      IndexFingerprint fingerprint = IndexFingerprint.getFingerprint(req.getCore(), Long.MAX_VALUE);
-      rb.rsp.add("fingerprint", fingerprint);
-    }
-
-    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-      List<Long> versions = recentUpdates.getVersions(nVersions);
-      rb.rsp.add("versions", versions);
-    }
-  }
-
-  public void processSyncWithLeader(ResponseBuilder rb, int nVersions, String syncWithLeader, List<Long> versions) {
-    PeerSyncWithLeader peerSync = new PeerSyncWithLeader(rb.req.getCore(), syncWithLeader, nVersions);
-    boolean success = peerSync.sync(versions).isSuccess();
-    rb.rsp.add("syncWithLeader", success);
-  }
-
-  
-  public void processSync(ResponseBuilder rb, int nVersions, String sync) {
-    
-    boolean onlyIfActive = rb.req.getParams().getBool("onlyIfActive", false);
-    
-    if (onlyIfActive) {
-      if (rb.req.getCore().getCoreDescriptor().getCloudDescriptor().getLastPublished() != Replica.State.ACTIVE) {
-        log.info("Last published state was not ACTIVE, cannot sync.");
-        rb.rsp.add("sync", "false");
-        return;
-      }
-    }
-    
-    List<String> replicas = StrUtils.splitSmart(sync, ",", true);
-    
-    boolean cantReachIsSuccess = rb.req.getParams().getBool("cantReachIsSuccess", false);
-    
-    PeerSync peerSync = new PeerSync(rb.req.getCore(), replicas, nVersions, cantReachIsSuccess);
-    boolean success = peerSync.sync().isSuccess();
-    
-    // TODO: more complex response?
-    rb.rsp.add("sync", success);
-  }
-  
-
-  public void processGetUpdates(ResponseBuilder rb) throws IOException
-  {
-    SolrQueryRequest req = rb.req;
-    SolrQueryResponse rsp = rb.rsp;
-    SolrParams params = req.getParams();
-
-    if (!params.getBool(COMPONENT_NAME, true)) {
-      return;
-    }
-
-    String versionsStr = params.get("getUpdates");
-    if (versionsStr == null) return;
-
-    UpdateLog ulog = req.getCore().getUpdateHandler().getUpdateLog();
-    if (ulog == null) return;
-
-    // handle version ranges
-    List<Long> versions = null;
-    if (versionsStr.indexOf("...") != -1) {
-      versions = resolveVersionRanges(versionsStr, ulog);
-    } else {
-      versions = StrUtils.splitSmart(versionsStr, ",", true).stream().map(Long::parseLong)
-          .collect(Collectors.toList());
-    }
-
-    // find fingerprint for max version for which updates are requested
-    boolean doFingerprint = params.getBool("fingerprint", false);
-    if (doFingerprint) {
-      long maxVersionForUpdate = Collections.min(versions, PeerSync.absComparator);
-      IndexFingerprint fingerprint = IndexFingerprint.getFingerprint(req.getCore(), Math.abs(maxVersionForUpdate));
-      rb.rsp.add("fingerprint", fingerprint);
-    }
-
-    List<Object> updates = new ArrayList<>(versions.size());
-
-    long minVersion = Long.MAX_VALUE;
-
-    // TODO: get this from cache instead of rebuilding?
-    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-      for (Long version : versions) {
-        try {
-          Object o = recentUpdates.lookup(version);
-          if (o == null) continue;
-
-          if (version > 0) {
-            minVersion = Math.min(minVersion, version);
-          }
-
-          // TODO: do any kind of validation here?
-          updates.add(o);
-
-        } catch (SolrException | ClassCastException e) {
-          log.warn("Exception reading log for updates", e);
-        }
-      }
-
-      // Must return all delete-by-query commands that occur after the first add requested
-      // since they may apply.
-      if (params.getBool("skipDbq", false)) {
-        updates.addAll(recentUpdates.getDeleteByQuery(minVersion));
-      }
-
-      rb.rsp.add("updates", updates);
-
-    }
-  }
-  
-  
-  private List<Long> resolveVersionRanges(String versionsStr, UpdateLog ulog) {
-    if (StringUtils.isEmpty(versionsStr)) {
-      return Collections.emptyList();
-    }
-    
-    List<String> ranges = StrUtils.splitSmart(versionsStr, ",", true);
-    
-    // TODO merge ranges.
-    
-    // get all the versions from updatelog and sort them
-    List<Long> versionAvailable = null;
-    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-      versionAvailable = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
-    }
-    // sort versions
-    Collections.sort(versionAvailable, PeerSync.absComparator);
-    
-    // This can be done with single pass over both ranges and versionsAvailable, that would require 
-    // merging ranges. We currently use Set to ensure there are no duplicates.
-    Set<Long> versionsToRet = new HashSet<>(ulog.getNumRecordsToKeep());
-    for (String range : ranges) {
-      String[] rangeBounds = range.split("\\.{3}");
-      int indexStart = Collections.binarySearch(versionAvailable, Long.valueOf(rangeBounds[1]), PeerSync.absComparator);
-      int indexEnd = Collections.binarySearch(versionAvailable, Long.valueOf(rangeBounds[0]), PeerSync.absComparator); 
-      if(indexStart >=0 && indexEnd >= 0) {
-        versionsToRet.addAll(versionAvailable.subList(indexStart, indexEnd + 1)); // indexEnd is exclusive
-      }
-    }
-    // TODO do we need to sort versions using PeerSync.absComparator?
-    return new ArrayList<>(versionsToRet);
-  }
-
-  /** 
-   * Simple struct for tracking what ids were requested and what response format is expected 
-   * acording to the request params
-   */
-  private final static class IdsRequsted {
-    /** An List (which may be empty but will never be null) of the uniqueKeys requested. */
-    public final List<String> allIds;
-    /** 
-     * true if the params provided by the user indicate that a single doc response structure 
-     * should be used.  
-     * Value is meaninless if <code>ids</code> is empty.
-     */
-    public final boolean useSingleDocResponse;
-    private IdsRequsted(List<String> allIds, boolean useSingleDocResponse) {
-      assert null != allIds;
-      this.allIds = allIds;
-      this.useSingleDocResponse = useSingleDocResponse;
-    }
-    
-    /**
-     * Parsers the <code>id</code> and <code>ids</code> params attached to the specified request object, 
-     * and returns an <code>IdsRequsted</code> struct to use for this request.
-     * The <code>IdsRequsted</code> is cached in the {@link SolrQueryRequest#getContext} so subsequent 
-     * method calls on the same request will not re-parse the params.
-     */
-    public static IdsRequsted parseParams(SolrQueryRequest req) {
-      final String contextKey = IdsRequsted.class.toString() + "_PARSED_ID_PARAMS";
-      if (req.getContext().containsKey(contextKey)) {
-        return (IdsRequsted)req.getContext().get(contextKey);
-      }
-      final SolrParams params = req.getParams();
-      final String id[] = params.getParams(ID);
-      final String ids[] = params.getParams("ids");
-      
-      if (id == null && ids == null) {
-        IdsRequsted result = new IdsRequsted(Collections.<String>emptyList(), true);
-        req.getContext().put(contextKey, result);
-        return result;
-      }
-      final List<String> allIds = new ArrayList<>((null == id ? 0 : id.length)
-                                                  + (null == ids ? 0 : (2 * ids.length)));
-      if (null != id) {
-        for (String singleId : id) {
-          allIds.add(singleId);
-        }
-      }
-      if (null != ids) {
-        for (String idList : ids) {
-          allIds.addAll( StrUtils.splitSmart(idList, ",", true) );
-        }
-      }
-      // if the client specified a single id=foo, then use "doc":{
-      // otherwise use a standard doclist
-      IdsRequsted result = new IdsRequsted(allIds, (ids == null && allIds.size() <= 1));
-      req.getContext().put(contextKey, result);
-      return result;
-    }
-  }
-
-  
-  /**
-   * A lite weight ResultContext for use with RTG requests that can point at Realtime Searchers
-   */
-  private static final class RTGResultContext extends ResultContext {
-    final ReturnFields returnFields;
-    final SolrIndexSearcher searcher;
-    final SolrQueryRequest req;
-    public RTGResultContext(ReturnFields returnFields, SolrIndexSearcher searcher, SolrQueryRequest req) {
-      this.returnFields = returnFields;
-      this.searcher = searcher;
-      this.req = req;
-    }
-    
-    /** @returns null */
-    public DocList getDocList() {
-      return null;
-    }
-    
-    public ReturnFields getReturnFields() {
-      return this.returnFields;
-    }
-    
-    public SolrIndexSearcher getSearcher() {
-      return this.searcher;
-    }
-    
-    /** @returns null */
-    public Query getQuery() {
-      return null;
-    }
-    
-    public SolrQueryRequest getRequest() {
-      return this.req;
-    }
-    
-    /** @returns null */
-    public Iterator<SolrDocument> getProcessedDocuments() {
-      return null;
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/ReplicaListTransformer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ReplicaListTransformer.java b/solr/core/src/java/org/apache/solr/handler/component/ReplicaListTransformer.java
deleted file mode 100644
index b7784e8..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/ReplicaListTransformer.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.List;
-
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.params.ShardParams;
-
-public interface ReplicaListTransformer {
-
-  /**
-   * Transforms the passed in list of choices. Transformations can include (but are not limited to)
-   * reordering of elements (e.g. via shuffling) and removal of elements (i.e. filtering).
-   *
-   * @param choices - a list of choices to transform, typically the choices are {@link Replica} objects but choices
-   * can also be {@link String} objects such as URLs passed in via the {@link ShardParams#SHARDS} parameter.
-   */
-  public void transform(List<?> choices);
-
-}