You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/10/23 00:05:29 UTC

[08/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
deleted file mode 100644
index a548031..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.lang.invoke.MethodHandles;
-import java.net.ConnectException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import org.apache.http.client.HttpClient;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.util.ClientUtils;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.request.SolrQueryRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-public class HttpShardHandler extends ShardHandler {
-  
-  /**
-   * If the request context map has an entry with this key and Boolean.TRUE as value,
-   * {@link #prepDistributed(ResponseBuilder)} will only include {@link org.apache.solr.common.cloud.Replica.Type#NRT} replicas as possible
-   * destination of the distributed request (or a leader replica of type {@link org.apache.solr.common.cloud.Replica.Type#TLOG}). This is used 
-   * by the RealtimeGet handler, since other types of replicas shouldn't respond to RTG requests
-   */
-  public static String ONLY_NRT_REPLICAS = "distribOnlyRealtime";
-
-  private HttpShardHandlerFactory httpShardHandlerFactory;
-  private CompletionService<ShardResponse> completionService;
-  private Set<Future<ShardResponse>> pending;
-  private Map<String,List<String>> shardToURLs;
-  private HttpClient httpClient;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public HttpShardHandler(HttpShardHandlerFactory httpShardHandlerFactory, HttpClient httpClient) {
-    this.httpClient = httpClient;
-    this.httpShardHandlerFactory = httpShardHandlerFactory;
-    completionService = httpShardHandlerFactory.newCompletionService();
-    pending = new HashSet<>();
-
-    // maps "localhost:8983|localhost:7574" to a shuffled List("http://localhost:8983","http://localhost:7574")
-    // This is primarily to keep track of what order we should use to query the replicas of a shard
-    // so that we use the same replica for all phases of a distributed request.
-    shardToURLs = new HashMap<>();
-
-  }
-
-
-  private static class SimpleSolrResponse extends SolrResponse {
-
-    long elapsedTime;
-
-    NamedList<Object> nl;
-
-    @Override
-    public long getElapsedTime() {
-      return elapsedTime;
-    }
-
-    @Override
-    public NamedList<Object> getResponse() {
-      return nl;
-    }
-
-    @Override
-    public void setResponse(NamedList<Object> rsp) {
-      nl = rsp;
-    }
-
-    @Override
-    public void setElapsedTime(long elapsedTime) {
-      this.elapsedTime = elapsedTime;
-    }
-  }
-
-
-  // Not thread safe... don't use in Callable.
-  // Don't modify the returned URL list.
-  private List<String> getURLs(String shard) {
-    List<String> urls = shardToURLs.get(shard);
-    if (urls == null) {
-      urls = httpShardHandlerFactory.buildURLList(shard);
-      shardToURLs.put(shard, urls);
-    }
-    return urls;
-  }
-
-  @Override
-  public void submit(final ShardRequest sreq, final String shard, final ModifiableSolrParams params) {
-    // do this outside of the callable for thread safety reasons
-    final List<String> urls = getURLs(shard);
-
-    Callable<ShardResponse> task = () -> {
-
-      ShardResponse srsp = new ShardResponse();
-      if (sreq.nodeName != null) {
-        srsp.setNodeName(sreq.nodeName);
-      }
-      srsp.setShardRequest(sreq);
-      srsp.setShard(shard);
-      SimpleSolrResponse ssr = new SimpleSolrResponse();
-      srsp.setSolrResponse(ssr);
-      long startTime = System.nanoTime();
-
-      try {
-        params.remove(CommonParams.WT); // use default (currently javabin)
-        params.remove(CommonParams.VERSION);
-
-        QueryRequest req = makeQueryRequest(sreq, params, shard);
-        req.setMethod(SolrRequest.METHOD.POST);
-
-        // no need to set the response parser as binary is the default
-        // req.setResponseParser(new BinaryResponseParser());
-
-        // if there are no shards available for a slice, urls.size()==0
-        if (urls.size()==0) {
-          // TODO: what's the right error code here? We should use the same thing when
-          // all of the servers for a shard are down.
-          throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "no servers hosting shard: " + shard);
-        }
-
-        if (urls.size() <= 1) {
-          String url = urls.get(0);
-          srsp.setShardAddress(url);
-          try (SolrClient client = new Builder(url).withHttpClient(httpClient).build()) {
-            ssr.nl = client.request(req);
-          }
-        } else {
-          LBHttpSolrClient.Rsp rsp = httpShardHandlerFactory.makeLoadBalancedRequest(req, urls);
-          ssr.nl = rsp.getResponse();
-          srsp.setShardAddress(rsp.getServer());
-        }
-      }
-      catch( ConnectException cex ) {
-        srsp.setException(cex); //????
-      } catch (Exception th) {
-        srsp.setException(th);
-        if (th instanceof SolrException) {
-          srsp.setResponseCode(((SolrException)th).code());
-        } else {
-          srsp.setResponseCode(-1);
-        }
-      }
-
-      ssr.elapsedTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
-
-      return transfomResponse(sreq, srsp, shard);
-    };
-
-    try {
-      if (shard != null)  {
-        MDC.put("ShardRequest.shards", shard);
-      }
-      if (urls != null && !urls.isEmpty())  {
-        MDC.put("ShardRequest.urlList", urls.toString());
-      }
-      pending.add( completionService.submit(task) );
-    } finally {
-      MDC.remove("ShardRequest.shards");
-      MDC.remove("ShardRequest.urlList");
-    }
-  }
-  
-  /**
-   * Subclasses could modify the request based on the shard
-   */
-  protected QueryRequest makeQueryRequest(final ShardRequest sreq, ModifiableSolrParams params, String shard)
-  {
-    // use generic request to avoid extra processing of queries
-    return new QueryRequest(params);
-  }
-  
-  /**
-   * Subclasses could modify the Response based on the the shard
-   */
-  protected ShardResponse transfomResponse(final ShardRequest sreq, ShardResponse rsp, String shard)
-  {
-    return rsp;
-  }
-
-  /** returns a ShardResponse of the last response correlated with a ShardRequest.  This won't 
-   * return early if it runs into an error.  
-   **/
-  @Override
-  public ShardResponse takeCompletedIncludingErrors() {
-    return take(false);
-  }
-
-
-  /** returns a ShardResponse of the last response correlated with a ShardRequest,
-   * or immediately returns a ShardResponse if there was an error detected
-   */
-  @Override
-  public ShardResponse takeCompletedOrError() {
-    return take(true);
-  }
-  
-  private ShardResponse take(boolean bailOnError) {
-    
-    while (pending.size() > 0) {
-      try {
-        Future<ShardResponse> future = completionService.take();
-        pending.remove(future);
-        ShardResponse rsp = future.get();
-        if (bailOnError && rsp.getException() != null) return rsp; // if exception, return immediately
-        // add response to the response list... we do this after the take() and
-        // not after the completion of "call" so we know when the last response
-        // for a request was received.  Otherwise we might return the same
-        // request more than once.
-        rsp.getShardRequest().responses.add(rsp);
-        if (rsp.getShardRequest().responses.size() == rsp.getShardRequest().actualShards.length) {
-          return rsp;
-        }
-      } catch (InterruptedException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
-      } catch (ExecutionException e) {
-        // should be impossible... the problem with catching the exception
-        // at this level is we don't know what ShardRequest it applied to
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Impossible Exception",e);
-      }
-    }
-    return null;
-  }
-
-
-  @Override
-  public void cancelAll() {
-    for (Future<ShardResponse> future : pending) {
-      future.cancel(false);
-    }
-  }
-
-  @Override
-  public void prepDistributed(ResponseBuilder rb) {
-    final SolrQueryRequest req = rb.req;
-    final SolrParams params = req.getParams();
-    final String shards = params.get(ShardParams.SHARDS);
-
-    // since the cost of grabbing cloud state is still up in the air, we grab it only
-    // if we need it.
-    ClusterState clusterState = null;
-    Map<String,Slice> slices = null;
-    CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor();
-    CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor();
-    ZkController zkController = req.getCore().getCoreContainer().getZkController();
-
-    final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req);
-
-    if (shards != null) {
-      List<String> lst = StrUtils.splitSmart(shards, ",", true);
-      rb.shards = lst.toArray(new String[lst.size()]);
-      rb.slices = new String[rb.shards.length];
-
-      if (zkController != null) {
-        // figure out which shards are slices
-        for (int i=0; i<rb.shards.length; i++) {
-          if (rb.shards[i].indexOf('/') < 0) {
-            // this is a logical shard
-            rb.slices[i] = rb.shards[i];
-            rb.shards[i] = null;
-          }
-        }
-      }
-    } else if (zkController != null) {
-      // we weren't provided with an explicit list of slices to query via "shards", so use the cluster state
-
-      clusterState =  zkController.getClusterState();
-      String shardKeys =  params.get(ShardParams._ROUTE_);
-
-      // This will be the complete list of slices we need to query for this request.
-      slices = new HashMap<>();
-
-      // we need to find out what collections this request is for.
-
-      // A comma-separated list of specified collections.
-      // Eg: "collection1,collection2,collection3"
-      String collections = params.get("collection");
-      if (collections != null) {
-        // If there were one or more collections specified in the query, split
-        // each parameter and store as a separate member of a List.
-        List<String> collectionList = StrUtils.splitSmart(collections, ",",
-            true);
-        // In turn, retrieve the slices that cover each collection from the
-        // cloud state and add them to the Map 'slices'.
-        for (String collectionName : collectionList) {
-          // The original code produced <collection-name>_<shard-name> when the collections
-          // parameter was specified (see ClientUtils.appendMap)
-          // Is this necessary if ony one collection is specified?
-          // i.e. should we change multiCollection to collectionList.size() > 1?
-          addSlices(slices, clusterState, params, collectionName,  shardKeys, true);
-        }
-      } else {
-        // just this collection
-        String collectionName = cloudDescriptor.getCollectionName();
-        addSlices(slices, clusterState, params, collectionName,  shardKeys, false);
-      }
-
-
-      // Store the logical slices in the ResponseBuilder and create a new
-      // String array to hold the physical shards (which will be mapped
-      // later).
-      rb.slices = slices.keySet().toArray(new String[slices.size()]);
-      rb.shards = new String[rb.slices.length];
-    }
-
-    //
-    // Map slices to shards
-    //
-    if (zkController != null) {
-
-      // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
-      // and make it a non-distributed request.
-      String ourSlice = cloudDescriptor.getShardId();
-      String ourCollection = cloudDescriptor.getCollectionName();
-      // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
-      boolean onlyNrtReplicas = Boolean.TRUE == req.getContext().get(ONLY_NRT_REPLICAS);
-      if (rb.slices.length == 1 && rb.slices[0] != null
-          && ( rb.slices[0].equals(ourSlice) || rb.slices[0].equals(ourCollection + "_" + ourSlice) )  // handle the <collection>_<slice> format
-          && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE
-          && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
-        boolean shortCircuit = params.getBool("shortCircuit", true);       // currently just a debugging parameter to check distrib search on a single node
-
-        String targetHandler = params.get(ShardParams.SHARDS_QT);
-        shortCircuit = shortCircuit && targetHandler == null;             // if a different handler is specified, don't short-circuit
-
-        if (shortCircuit) {
-          rb.isDistrib = false;
-          rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
-          return;
-        }
-        // We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
-      }
-
-
-      for (int i=0; i<rb.shards.length; i++) {
-        if (rb.shards[i] != null) {
-          final List<String> shardUrls = StrUtils.splitSmart(rb.shards[i], "|", true);
-          replicaListTransformer.transform(shardUrls);
-          // And now recreate the | delimited list of equivalent servers
-          rb.shards[i] = createSliceShardsStr(shardUrls);
-        } else {
-          if (clusterState == null) {
-            clusterState =  zkController.getClusterState();
-            slices = clusterState.getCollection(cloudDescriptor.getCollectionName()).getSlicesMap();
-          }
-          String sliceName = rb.slices[i];
-
-          Slice slice = slices.get(sliceName);
-
-          if (slice==null) {
-            // Treat this the same as "all servers down" for a slice, and let things continue
-            // if partial results are acceptable
-            rb.shards[i] = "";
-            continue;
-            // throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
-          }
-          final Predicate<Replica> isShardLeader = new Predicate<Replica>() {
-            private Replica shardLeader = null;
-
-            @Override
-            public boolean test(Replica replica) {
-              if (shardLeader == null) {
-                try {
-                  shardLeader = zkController.getZkStateReader().getLeaderRetry(cloudDescriptor.getCollectionName(), slice.getName());
-                } catch (InterruptedException e) {
-                  throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + slice.getName() + " in collection " 
-                      + cloudDescriptor.getCollectionName(), e);
-                } catch (SolrException e) {
-                  if (log.isDebugEnabled()) {
-                    log.debug("Exception finding leader for shard {} in collection {}. Collection State: {}", 
-                        slice.getName(), cloudDescriptor.getCollectionName(), zkController.getZkStateReader().getClusterState().getCollectionOrNull(cloudDescriptor.getCollectionName()));
-                  }
-                  throw e;
-                }
-              }
-              return replica.getName().equals(shardLeader.getName());
-            }
-          };
-
-          final List<Replica> eligibleSliceReplicas = collectEligibleReplicas(slice, clusterState, onlyNrtReplicas, isShardLeader);
-
-          final List<String> shardUrls = transformReplicasToShardUrls(replicaListTransformer, eligibleSliceReplicas);
-
-          // And now recreate the | delimited list of equivalent servers
-          final String sliceShardsStr = createSliceShardsStr(shardUrls);
-          if (sliceShardsStr.isEmpty()) {
-            boolean tolerant = ShardParams.getShardsTolerantAsBool(rb.req.getParams());
-            if (!tolerant) {
-              // stop the check when there are no replicas available for a shard
-              throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
-                  "no servers hosting shard: " + rb.slices[i]);
-            }
-          }
-          rb.shards[i] = sliceShardsStr;
-        }
-      }
-    }
-    String shards_rows = params.get(ShardParams.SHARDS_ROWS);
-    if(shards_rows != null) {
-      rb.shards_rows = Integer.parseInt(shards_rows);
-    }
-    String shards_start = params.get(ShardParams.SHARDS_START);
-    if(shards_start != null) {
-      rb.shards_start = Integer.parseInt(shards_start);
-    }
-  }
-
-  private static List<Replica> collectEligibleReplicas(Slice slice, ClusterState clusterState, boolean onlyNrtReplicas, Predicate<Replica> isShardLeader) {
-    final Collection<Replica> allSliceReplicas = slice.getReplicasMap().values();
-    final List<Replica> eligibleSliceReplicas = new ArrayList<>(allSliceReplicas.size());
-    for (Replica replica : allSliceReplicas) {
-      if (!clusterState.liveNodesContain(replica.getNodeName())
-          || replica.getState() != Replica.State.ACTIVE
-          || (onlyNrtReplicas && replica.getType() == Replica.Type.PULL)) {
-        continue;
-      }
-
-      if (onlyNrtReplicas && replica.getType() == Replica.Type.TLOG) {
-        if (!isShardLeader.test(replica)) {
-          continue;
-        }
-      }
-      eligibleSliceReplicas.add(replica);
-    }
-    return eligibleSliceReplicas;
-  }
-
-  private static List<String> transformReplicasToShardUrls(final ReplicaListTransformer replicaListTransformer, final List<Replica> eligibleSliceReplicas) {
-    replicaListTransformer.transform(eligibleSliceReplicas);
-
-    final List<String> shardUrls = new ArrayList<>(eligibleSliceReplicas.size());
-    for (Replica replica : eligibleSliceReplicas) {
-      String url = ZkCoreNodeProps.getCoreUrl(replica);
-      shardUrls.add(url);
-    }
-    return shardUrls;
-  }
-
-  private static String createSliceShardsStr(final List<String> shardUrls) {
-    final StringBuilder sliceShardsStr = new StringBuilder();
-    boolean first = true;
-    for (String shardUrl : shardUrls) {
-      if (first) {
-        first = false;
-      } else {
-        sliceShardsStr.append('|');
-      }
-      sliceShardsStr.append(shardUrl);
-    }
-    return sliceShardsStr.toString();
-  }
-
-
-  private void addSlices(Map<String,Slice> target, ClusterState state, SolrParams params, String collectionName, String shardKeys, boolean multiCollection) {
-    DocCollection coll = state.getCollection(collectionName);
-    Collection<Slice> slices = coll.getRouter().getSearchSlices(shardKeys, params , coll);
-    ClientUtils.addSlices(target, collectionName, slices, multiCollection);
-  }
-
-  public ShardHandlerFactory getShardHandlerFactory(){
-    return httpShardHandlerFactory;
-  }
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
deleted file mode 100644
index 1bb1fdb..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.http.client.HttpClient;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
-import org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.URLUtil;
-import org.apache.solr.core.PluginInfo;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.update.UpdateShardHandlerConfig;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.stats.HttpClientMetricNameStrategy;
-import org.apache.solr.util.stats.InstrumentedHttpRequestExecutor;
-import org.apache.solr.util.stats.InstrumentedPoolingHttpClientConnectionManager;
-import org.apache.solr.util.stats.MetricUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.solr.util.stats.InstrumentedHttpRequestExecutor.KNOWN_METRIC_NAME_STRATEGIES;
-
-
-public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.apache.solr.util.plugin.PluginInfoInitialized, SolrMetricProducer {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String DEFAULT_SCHEME = "http";
-  
-  // We want an executor that doesn't take up any resources if
-  // it's not used, so it could be created statically for
-  // the distributed search component if desired.
-  //
-  // Consider CallerRuns policy and a lower max threads to throttle
-  // requests at some point (or should we simply return failure?)
-  private ExecutorService commExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(
-      0,
-      Integer.MAX_VALUE,
-      5, TimeUnit.SECONDS, // terminate idle threads after 5 sec
-      new SynchronousQueue<>(),  // directly hand off tasks
-      new DefaultSolrThreadFactory("httpShardExecutor"),
-      // the Runnable added to this executor handles all exceptions so we disable stack trace collection as an optimization
-      // see SOLR-11880 for more details
-      false
-  );
-
-  protected InstrumentedPoolingHttpClientConnectionManager clientConnectionManager;
-  protected CloseableHttpClient defaultClient;
-  protected InstrumentedHttpRequestExecutor httpRequestExecutor;
-  private LBHttpSolrClient loadbalancer;
-  //default values:
-  int soTimeout = HttpClientUtil.DEFAULT_SO_TIMEOUT;
-  int connectionTimeout = HttpClientUtil.DEFAULT_CONNECT_TIMEOUT;
-  int maxConnectionsPerHost = HttpClientUtil.DEFAULT_MAXCONNECTIONSPERHOST;
-  int maxConnections = HttpClientUtil.DEFAULT_MAXCONNECTIONS;
-  int corePoolSize = 0;
-  int maximumPoolSize = Integer.MAX_VALUE;
-  int keepAliveTime = 5;
-  int queueSize = -1;
-  int   permittedLoadBalancerRequestsMinimumAbsolute = 0;
-  float permittedLoadBalancerRequestsMaximumFraction = 1.0f;
-  boolean accessPolicy = false;
-
-  private String scheme = null;
-
-  private HttpClientMetricNameStrategy metricNameStrategy;
-
-  private String metricTag;
-
-  protected final Random r = new Random();
-
-  private final ReplicaListTransformer shufflingReplicaListTransformer = new ShufflingReplicaListTransformer(r);
-
-  // URL scheme to be used in distributed search.
-  static final String INIT_URL_SCHEME = "urlScheme";
-
-  // The core size of the threadpool servicing requests
-  static final String INIT_CORE_POOL_SIZE = "corePoolSize";
-
-  // The maximum size of the threadpool servicing requests
-  static final String INIT_MAX_POOL_SIZE = "maximumPoolSize";
-
-  // The amount of time idle threads persist for in the queue, before being killed
-  static final String MAX_THREAD_IDLE_TIME = "maxThreadIdleTime";
-
-  // If the threadpool uses a backing queue, what is its maximum size (-1) to use direct handoff
-  static final String INIT_SIZE_OF_QUEUE = "sizeOfQueue";
-
-  // The minimum number of replicas that may be used
-  static final String LOAD_BALANCER_REQUESTS_MIN_ABSOLUTE = "loadBalancerRequestsMinimumAbsolute";
-
-  // The maximum proportion of replicas to be used
-  static final String LOAD_BALANCER_REQUESTS_MAX_FRACTION = "loadBalancerRequestsMaximumFraction";
-
-  // Configure if the threadpool favours fairness over throughput
-  static final String INIT_FAIRNESS_POLICY = "fairnessPolicy";
-
-  /**
-   * Get {@link ShardHandler} that uses the default http client.
-   */
-  @Override
-  public ShardHandler getShardHandler() {
-    return getShardHandler(defaultClient);
-  }
-
-  /**
-   * Get {@link ShardHandler} that uses custom http client.
-   */
-  public ShardHandler getShardHandler(final HttpClient httpClient){
-    return new HttpShardHandler(this, httpClient);
-  }
-
-  @Override
-  public void init(PluginInfo info) {
-    StringBuilder sb = new StringBuilder();
-    NamedList args = info.initArgs;
-    this.soTimeout = getParameter(args, HttpClientUtil.PROP_SO_TIMEOUT, soTimeout,sb);
-    this.scheme = getParameter(args, INIT_URL_SCHEME, null,sb);
-    if(StringUtils.endsWith(this.scheme, "://")) {
-      this.scheme = StringUtils.removeEnd(this.scheme, "://");
-    }
-
-    String strategy = getParameter(args, "metricNameStrategy", UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY, sb);
-    this.metricNameStrategy = KNOWN_METRIC_NAME_STRATEGIES.get(strategy);
-    if (this.metricNameStrategy == null)  {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Unknown metricNameStrategy: " + strategy + " found. Must be one of: " + KNOWN_METRIC_NAME_STRATEGIES.keySet());
-    }
-
-    this.connectionTimeout = getParameter(args, HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout, sb);
-    this.maxConnectionsPerHost = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost,sb);
-    this.maxConnections = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections,sb);
-    this.corePoolSize = getParameter(args, INIT_CORE_POOL_SIZE, corePoolSize,sb);
-    this.maximumPoolSize = getParameter(args, INIT_MAX_POOL_SIZE, maximumPoolSize,sb);
-    this.keepAliveTime = getParameter(args, MAX_THREAD_IDLE_TIME, keepAliveTime,sb);
-    this.queueSize = getParameter(args, INIT_SIZE_OF_QUEUE, queueSize,sb);
-    this.permittedLoadBalancerRequestsMinimumAbsolute = getParameter(
-        args,
-        LOAD_BALANCER_REQUESTS_MIN_ABSOLUTE,
-        permittedLoadBalancerRequestsMinimumAbsolute,
-        sb);
-    this.permittedLoadBalancerRequestsMaximumFraction = getParameter(
-        args,
-        LOAD_BALANCER_REQUESTS_MAX_FRACTION,
-        permittedLoadBalancerRequestsMaximumFraction,
-        sb);
-    this.accessPolicy = getParameter(args, INIT_FAIRNESS_POLICY, accessPolicy,sb);
-    log.debug("created with {}",sb);
-    
-    // magic sysprop to make tests reproducible: set by SolrTestCaseJ4.
-    String v = System.getProperty("tests.shardhandler.randomSeed");
-    if (v != null) {
-      r.setSeed(Long.parseLong(v));
-    }
-
-    BlockingQueue<Runnable> blockingQueue = (this.queueSize == -1) ?
-        new SynchronousQueue<Runnable>(this.accessPolicy) :
-        new ArrayBlockingQueue<Runnable>(this.queueSize, this.accessPolicy);
-
-    this.commExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(
-        this.corePoolSize,
-        this.maximumPoolSize,
-        this.keepAliveTime, TimeUnit.SECONDS,
-        blockingQueue,
-        new DefaultSolrThreadFactory("httpShardExecutor")
-    );
-
-    ModifiableSolrParams clientParams = getClientParams();
-    httpRequestExecutor = new InstrumentedHttpRequestExecutor(this.metricNameStrategy);
-    clientConnectionManager = new InstrumentedPoolingHttpClientConnectionManager(HttpClientUtil.getSchemaRegisteryProvider().getSchemaRegistry());
-    this.defaultClient = HttpClientUtil.createClient(clientParams, clientConnectionManager, false, httpRequestExecutor);
-    this.loadbalancer = createLoadbalancer(defaultClient);
-  }
-
-  protected ModifiableSolrParams getClientParams() {
-    ModifiableSolrParams clientParams = new ModifiableSolrParams();
-    clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost);
-    clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections);
-    return clientParams;
-  }
-
-  protected ExecutorService getThreadPoolExecutor(){
-    return this.commExecutor;
-  }
-
-  protected LBHttpSolrClient createLoadbalancer(HttpClient httpClient){
-    LBHttpSolrClient client = new Builder()
-        .withHttpClient(httpClient)
-        .withConnectionTimeout(connectionTimeout)
-        .withSocketTimeout(soTimeout)
-        .build();
-    return client;
-  }
-
-  protected <T> T getParameter(NamedList initArgs, String configKey, T defaultValue, StringBuilder sb) {
-    T toReturn = defaultValue;
-    if (initArgs != null) {
-      T temp = (T) initArgs.get(configKey);
-      toReturn = (temp != null) ? temp : defaultValue;
-    }
-    if(sb!=null && toReturn != null) sb.append(configKey).append(" : ").append(toReturn).append(",");
-    return toReturn;
-  }
-
-
-  @Override
-  public void close() {
-    try {
-      ExecutorUtil.shutdownAndAwaitTermination(commExecutor);
-    } finally {
-      try {
-        if (loadbalancer != null) {
-          loadbalancer.close();
-        }
-      } finally { 
-        if (defaultClient != null) {
-          HttpClientUtil.close(defaultClient);
-        }
-        if (clientConnectionManager != null)  {
-          clientConnectionManager.close();
-        }
-      }
-    }
-  }
-
-  /**
-   * Makes a request to one or more of the given urls, using the configured load balancer.
-   *
-   * @param req The solr search request that should be sent through the load balancer
-   * @param urls The list of solr server urls to load balance across
-   * @return The response from the request
-   */
-  public LBHttpSolrClient.Rsp makeLoadBalancedRequest(final QueryRequest req, List<String> urls)
-    throws SolrServerException, IOException {
-    return loadbalancer.request(newLBHttpSolrClientReq(req, urls));
-  }
-
-  protected LBHttpSolrClient.Req newLBHttpSolrClientReq(final QueryRequest req, List<String> urls) {
-    int numServersToTry = (int)Math.floor(urls.size() * this.permittedLoadBalancerRequestsMaximumFraction);
-    if (numServersToTry < this.permittedLoadBalancerRequestsMinimumAbsolute) {
-      numServersToTry = this.permittedLoadBalancerRequestsMinimumAbsolute;
-    }
-    return new LBHttpSolrClient.Req(req, urls, numServersToTry);
-  }
-
-  /**
-   * Creates a list of urls for the given shard.
-   *
-   * @param shard the urls for the shard, separated by '|'
-   * @return A list of valid urls (including protocol) that are replicas for the shard
-   */
-  public List<String> buildURLList(String shard) {
-    List<String> urls = StrUtils.splitSmart(shard, "|", true);
-
-    // convert shard to URL
-    for (int i=0; i<urls.size(); i++) {
-      urls.set(i, buildUrl(urls.get(i)));
-    }
-
-    return urls;
-  }
-
-  /**
-   * A distributed request is made via {@link LBHttpSolrClient} to the first live server in the URL list.
-   * This means it is just as likely to choose current host as any of the other hosts.
-   * This function makes sure that the cores are sorted according to the given list of preferences.
-   * E.g. If all nodes prefer local cores then a bad/heavily-loaded node will receive less requests from 
-   * healthy nodes. This will help prevent a distributed deadlock or timeouts in all the healthy nodes due 
-   * to one bad node.
-   */
-  static class NodePreferenceRulesComparator implements Comparator<Object> {
-    private static class PreferenceRule {
-      public final String name;
-      public final String value;
-
-      public PreferenceRule(String name, String value) {
-        this.name = name;
-        this.value = value;
-      }
-    }
-
-    private final SolrQueryRequest request;
-    private List<PreferenceRule> preferenceRules;
-    private String localHostAddress = null;
-
-    public NodePreferenceRulesComparator(final List<String> sortRules, final SolrQueryRequest request) {
-      this.request = request;
-      this.preferenceRules = new ArrayList<PreferenceRule>(sortRules.size());
-      sortRules.forEach(rule -> {
-        String[] parts = rule.split(":", 2);
-        if (parts.length != 2) {
-          throw new IllegalArgumentException("Invalid " + ShardParams.SHARDS_PREFERENCE + " rule: " + rule);
-        }
-        this.preferenceRules.add(new PreferenceRule(parts[0], parts[1])); 
-      });
-    }
-    @Override
-    public int compare(Object left, Object right) {
-      for (PreferenceRule preferenceRule: this.preferenceRules) {
-        final boolean lhs;
-        final boolean rhs;
-        switch (preferenceRule.name) {
-          case ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE:
-            lhs = hasReplicaType(left, preferenceRule.value);
-            rhs = hasReplicaType(right, preferenceRule.value);
-            break;
-          case ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION:
-            lhs = hasCoreUrlPrefix(left, preferenceRule.value);
-            rhs = hasCoreUrlPrefix(right, preferenceRule.value);
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid " + ShardParams.SHARDS_PREFERENCE + " type: " + preferenceRule.name);
-        }
-        if (lhs != rhs) {
-          return lhs ? -1 : +1;
-        }
-      }
-      return 0;
-    }
-    private boolean hasCoreUrlPrefix(Object o, String prefix) {
-      final String s;
-      if (o instanceof String) {
-        s = (String)o;
-      }
-      else if (o instanceof Replica) {
-        s = ((Replica)o).getCoreUrl();
-      } else {
-        return false;
-      }
-      if (prefix.equals(ShardParams.REPLICA_LOCAL)) {
-        if (null == localHostAddress) {
-          final ZkController zkController = this.request.getCore().getCoreContainer().getZkController();
-          localHostAddress = zkController != null ? zkController.getBaseUrl() : "";
-          if (localHostAddress.isEmpty()) {
-            log.warn("Couldn't determine current host address for sorting of local replicas");
-          }
-        }
-        if (!localHostAddress.isEmpty()) {
-          if (s.startsWith(localHostAddress)) {
-            return true;
-          }
-        }
-      } else {
-        if (s.startsWith(prefix)) {
-          return true;
-        }
-      }
-      return false;
-    }
-    private static boolean hasReplicaType(Object o, String preferred) {
-      if (!(o instanceof Replica)) {
-        return false;
-      }
-      final String s = ((Replica)o).getType().toString();
-      return s.equals(preferred);
-    }
-  }
-
-  protected ReplicaListTransformer getReplicaListTransformer(final SolrQueryRequest req) {
-    final SolrParams params = req.getParams();
-    @SuppressWarnings("deprecation")
-    final boolean preferLocalShards = params.getBool(CommonParams.PREFER_LOCAL_SHARDS, false);
-    final String shardsPreferenceSpec = params.get(ShardParams.SHARDS_PREFERENCE, "");
-
-    if (preferLocalShards || !shardsPreferenceSpec.isEmpty()) {
-      if (preferLocalShards && !shardsPreferenceSpec.isEmpty()) {
-        throw new SolrException(
-          SolrException.ErrorCode.BAD_REQUEST,
-          "preferLocalShards is deprecated and must not be used with shards.preference" 
-        );
-      }
-      List<String> preferenceRules = StrUtils.splitSmart(shardsPreferenceSpec, ',');
-      if (preferLocalShards) {
-        preferenceRules.add(ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION + ":" + ShardParams.REPLICA_LOCAL);
-      }
-
-      return new ShufflingReplicaListTransformer(r) {
-        @Override
-        public void transform(List<?> choices)
-        {
-          if (choices.size() > 1) {
-            super.transform(choices);
-            if (log.isDebugEnabled()) {
-              log.debug("Applying the following sorting preferences to replicas: {}",
-                  Arrays.toString(preferenceRules.toArray()));
-            }
-            try {
-              choices.sort(new NodePreferenceRulesComparator(preferenceRules, req));
-            } catch (IllegalArgumentException iae) {
-              throw new SolrException(
-                SolrException.ErrorCode.BAD_REQUEST,
-                iae.getMessage()
-              );
-            }
-            if (log.isDebugEnabled()) {
-              log.debug("Applied sorting preferences to replica list: {}",
-                  Arrays.toString(choices.toArray()));
-            }
-          }
-        }
-      };
-    }
-
-    return shufflingReplicaListTransformer;
-  }
-
-  /**
-   * Creates a new completion service for use by a single set of distributed requests.
-   */
-  public CompletionService newCompletionService() {
-    return new ExecutorCompletionService<ShardResponse>(commExecutor);
-  }
-  
-  /**
-   * Rebuilds the URL replacing the URL scheme of the passed URL with the
-   * configured scheme replacement.If no scheme was configured, the passed URL's
-   * scheme is left alone.
-   */
-  private String buildUrl(String url) {
-    if(!URLUtil.hasScheme(url)) {
-      return StringUtils.defaultIfEmpty(scheme, DEFAULT_SCHEME) + "://" + url;
-    } else if(StringUtils.isNotEmpty(scheme)) {
-      return scheme + "://" + URLUtil.removeScheme(url);
-    }
-    
-    return url;
-  }
-
-  @Override
-  public void initializeMetrics(SolrMetricManager manager, String registry, String tag, String scope) {
-    this.metricTag = tag;
-    String expandedScope = SolrMetricManager.mkName(scope, SolrInfoBean.Category.QUERY.name());
-    clientConnectionManager.initializeMetrics(manager, registry, tag, expandedScope);
-    httpRequestExecutor.initializeMetrics(manager, registry, tag, expandedScope);
-    commExecutor = MetricUtils.instrumentedExecutorService(commExecutor, null,
-        manager.registry(registry),
-        SolrMetricManager.mkName("httpShardExecutor", expandedScope, "threadPool"));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java b/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
deleted file mode 100644
index 97d4199..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/IterativeMergeStrategy.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-import java.util.concurrent.ExecutorService;
-import java.util.List;
-import java.util.ArrayList;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.SolrjNamedThreadFactory;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.http.client.HttpClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.DISTRIB;
-
-public abstract class IterativeMergeStrategy implements MergeStrategy  {
-
-  protected ExecutorService executorService;
-  protected static HttpClient httpClient;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public void merge(ResponseBuilder rb, ShardRequest sreq) {
-    rb._responseDocs = new SolrDocumentList(); // Null pointers will occur otherwise.
-    rb.onePassDistributedQuery = true;   // Turn off the second pass distributed.
-    executorService =     ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("IterativeMergeStrategy"));
-    try {
-      process(rb, sreq);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    } finally {
-      executorService.shutdownNow();
-    }
-  }
-
-  public boolean mergesIds() {
-    return true;
-  }
-
-  public int getCost() {
-    return 0;
-  }
-
-  public boolean handlesMergeFields() {
-    return false;
-  }
-
-  public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) {
-
-  }
-
-  public static class CallBack implements Callable<CallBack> {
-    private HttpSolrClient solrClient;
-    private QueryRequest req;
-    private QueryResponse response;
-    private ShardResponse originalShardResponse;
-
-    public CallBack(ShardResponse originalShardResponse, QueryRequest req) {
-
-      this.solrClient = new Builder(originalShardResponse.getShardAddress())
-          .withHttpClient(getHttpClient())
-          .build();
-      this.req = req;
-      this.originalShardResponse = originalShardResponse;
-      req.setMethod(SolrRequest.METHOD.POST);
-      ModifiableSolrParams params = (ModifiableSolrParams)req.getParams();
-      params.add(DISTRIB, "false");
-    }
-
-    public QueryResponse getResponse() {
-      return this.response;
-    }
-
-    public ShardResponse getOriginalShardResponse() {
-      return this.originalShardResponse;
-    }
-
-    public CallBack call() throws Exception{
-      this.response = req.process(solrClient);
-      return this;
-    }
-  }
-
-  public List<Future<CallBack>> callBack(List<ShardResponse> responses, QueryRequest req) {
-    List<Future<CallBack>> futures = new ArrayList();
-    for(ShardResponse response : responses) {
-      futures.add(this.executorService.submit(new CallBack(response, req)));
-    }
-    return futures;
-  }
-
-  public Future<CallBack> callBack(ShardResponse response, QueryRequest req) {
-    return this.executorService.submit(new CallBack(response, req));
-  }
-
-  protected abstract void process(ResponseBuilder rb, ShardRequest sreq) throws Exception;
-
-  static synchronized HttpClient getHttpClient() {
-
-      if(httpClient == null) {
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 128);
-        params.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 32);
-        httpClient = HttpClientUtil.createClient(params);
-        return httpClient;
-      } else {
-        return httpClient;
-      }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java b/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java
deleted file mode 100644
index 503dfb5..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/MergeStrategy.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import org.apache.solr.search.SolrIndexSearcher;
-
-import java.util.Comparator;
-import java.io.IOException;
-
-/**
-* The MergeStrategy class defines custom merge logic for distributed searches.
-*
-*  <b>Note: This API is experimental and may change in non backward-compatible ways in the future</b>
-**/
-
-
-public interface MergeStrategy {
-
-  /**
-  *  merge defines the merging behaving of results that are collected from the
-  *  shards during a distributed search.
-  *
-  **/
-
-  public void merge(ResponseBuilder rb, ShardRequest sreq);
-
-  /**
-  * mergesIds must return true if the merge method merges document ids from the shards.
-  * If it merges other output from the shards it must return false.
-  * */
-
-  public boolean mergesIds();
-
-
-  /**
-  * handlesMergeFields must return true if the MergeStrategy
-  * implements a custom handleMergeFields(ResponseBuilder rb, SolrIndexSearch searcher)
-  * */
-
-  public boolean handlesMergeFields();
-
-
-  /**
-  *  Implement handleMergeFields(ResponseBuilder rb, SolrIndexSearch searcher) if
-  *  your merge strategy needs more complex data then the sort fields provide.
-  * */
-
-  public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException;
-
-  /**
-  *  Defines the order that the mergeStrategies are applied. Lower costs are applied first.
-  * */
-  public int getCost();
-
-  final Comparator MERGE_COMP = (o1, o2) -> {
-    MergeStrategy m1 = (MergeStrategy) o1;
-    MergeStrategy m2 = (MergeStrategy) o2;
-    return m1.getCost() - m2.getCost();
-  };
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
deleted file mode 100644
index fd9d37d..0000000
--- a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.MoreLikeThisParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.handler.MoreLikeThisHandler;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocListAndSet;
-import org.apache.solr.search.ReturnFields;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.search.SolrReturnFields;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.SORT;
-
-/**
- * TODO!
- * 
- * 
- * @since solr 1.3
- */
-public class MoreLikeThisComponent extends SearchComponent {
-  public static final String COMPONENT_NAME = "mlt";
-  public static final String DIST_DOC_ID = "mlt.dist.id";
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    if (rb.req.getParams().getBool(MoreLikeThisParams.MLT, false)) {
-      rb.setNeedDocList(true);
-    }
-    
-  }
-  
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-
-    SolrParams params = rb.req.getParams();
-    
-    if (params.getBool(MoreLikeThisParams.MLT, false)) {
-      ReturnFields returnFields = new SolrReturnFields( rb.req );
-  
-      int flags = 0;
-      if (returnFields.wantsScore()) {
-        flags |= SolrIndexSearcher.GET_SCORES;
-      }
-  
-      rb.setFieldFlags(flags);
-
-      log.debug("Starting MoreLikeThis.Process.  isShard: "
-          + params.getBool(ShardParams.IS_SHARD));
-      SolrIndexSearcher searcher = rb.req.getSearcher();
-
-      if (params.getBool(ShardParams.IS_SHARD, false)) {
-        if (params.get(MoreLikeThisComponent.DIST_DOC_ID) == null) {
-          if (rb.getResults().docList.size() == 0) {
-            // return empty response
-            rb.rsp.add("moreLikeThis", new NamedList<DocList>());
-            return;
-          }
-          
-          MoreLikeThisHandler.MoreLikeThisHelper mlt = new MoreLikeThisHandler.MoreLikeThisHelper(
-              params, searcher);
-
-          NamedList<BooleanQuery> bQuery = mlt.getMoreLikeTheseQuery(rb
-              .getResults().docList);
-          
-          NamedList<String> temp = new NamedList<>();
-          Iterator<Entry<String,BooleanQuery>> idToQueryIt = bQuery.iterator();
-
-          
-          while (idToQueryIt.hasNext()) {
-            Entry<String,BooleanQuery> idToQuery = idToQueryIt.next();
-            String s = idToQuery.getValue().toString();
-
-            log.debug("MLT Query:" + s);
-            temp.add(idToQuery.getKey(), idToQuery.getValue().toString());
-          }
-
-          rb.rsp.add("moreLikeThis", temp);
-        } else {
-          NamedList<DocList> sim = getMoreLikeThese(rb, rb.req.getSearcher(),
-              rb.getResults().docList, flags);
-          rb.rsp.add("moreLikeThis", sim);
-        }
-      } else {
-        // non distrib case
-        NamedList<DocList> sim = getMoreLikeThese(rb, rb.req.getSearcher(), rb.getResults().docList,
-            flags);
-        rb.rsp.add("moreLikeThis", sim);
-      }
-    }
-  }
-  
-  @Override
-  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0
-        && rb.req.getParams().getBool(COMPONENT_NAME, false)) {
-      log.debug("ShardRequest.response.size: " + sreq.responses.size());
-      for (ShardResponse r : sreq.responses) {
-        if (r.getException() != null) {
-          // This should only happen in case of using shards.tolerant=true. Omit this ShardResponse
-          continue;
-        }
-        NamedList<?> moreLikeThisReponse = (NamedList<?>) r.getSolrResponse()
-            .getResponse().get("moreLikeThis");
-        log.debug("ShardRequest.response.shard: " + r.getShard());
-        if (moreLikeThisReponse != null) {
-          for (Entry<String,?> entry : moreLikeThisReponse) {
-            log.debug("id: \"" + entry.getKey() + "\" Query: \""
-                + entry.getValue() + "\"");
-            ShardRequest s = buildShardQuery(rb, (String) entry.getValue(),
-                entry.getKey());
-            rb.addRequest(this, s);
-          }
-        }
-      }
-    }
-    
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) != 0) {
-      for (ShardResponse r : sreq.responses) {
-        log.debug("MLT Query returned: "
-            + r.getSolrResponse().getResponse().toString());
-      }
-    }
-  }
-  
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    
-    // Handling Responses in finishStage, because solrResponse will put
-    // moreLikeThis xml
-    // segment ahead of result/response.
-    if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS
-        && rb.req.getParams().getBool(COMPONENT_NAME, false)) {
-      Map<Object,SolrDocumentList> tempResults = new LinkedHashMap<>();
-      
-      int mltcount = rb.req.getParams().getInt(MoreLikeThisParams.DOC_COUNT, MoreLikeThisParams.DEFAULT_DOC_COUNT);
-      String keyName = rb.req.getSchema().getUniqueKeyField().getName();
-      
-      for (ShardRequest sreq : rb.finished) {
-        if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) != 0) {
-          for (ShardResponse r : sreq.responses) {
-            log.debug("ShardRequest.response.shard: " + r.getShard());
-            String key = r.getShardRequest().params
-                .get(MoreLikeThisComponent.DIST_DOC_ID);
-            SolrDocumentList shardDocList =  (SolrDocumentList) r.getSolrResponse().getResponse().get("response");
-
-            if (shardDocList == null) {
-              continue;
-            }
- 
-            log.info("MLT: results added for key: " + key + " documents: "
-                + shardDocList.toString());
-//            if (log.isDebugEnabled()) {
-//              for (SolrDocument doc : shardDocList) {
-//                doc.addField("shard", "=" + r.getShard());
-//              }
-//            }
-            SolrDocumentList mergedDocList = tempResults.get(key);
- 
-            if (mergedDocList == null) {
-              mergedDocList = new SolrDocumentList();
-              mergedDocList.addAll(shardDocList);
-              mergedDocList.setNumFound(shardDocList.getNumFound());
-              mergedDocList.setStart(shardDocList.getStart());
-              mergedDocList.setMaxScore(shardDocList.getMaxScore());
-            } else {
-              mergedDocList = mergeSolrDocumentList(mergedDocList,
-                  shardDocList, mltcount, keyName);
-            }
-            log.debug("Adding docs for key: " + key);
-            tempResults.put(key, mergedDocList);
-          }
-        }
-      }
-
-      NamedList<SolrDocumentList> list = buildMoreLikeThisNamed(tempResults,
-          rb.resultIds);
-     
-      rb.rsp.add("moreLikeThis", list);
-      
-    }
-    super.finishStage(rb);
-  }
-
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) return;
-    if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) == 0
-        && (sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) == 0) {
-      sreq.params.set(COMPONENT_NAME, "false");
-    }
-  }
-
-  /**
-   * Returns NamedList based on the order of
-   * resultIds.shardDoc.positionInResponse
-   */
-  NamedList<SolrDocumentList> buildMoreLikeThisNamed(
-      Map<Object,SolrDocumentList> allMlt, Map<Object,ShardDoc> resultIds) {
-    NamedList<SolrDocumentList> result = new NamedList<>();
-    TreeMap<Integer,Object> sortingMap = new TreeMap<>();
-    for (Entry<Object,ShardDoc> next : resultIds.entrySet()) {
-      sortingMap.put(next.getValue().positionInResponse, next.getKey());
-    }
-    for (Object key : sortingMap.values()) {
-      SolrDocumentList sdl = allMlt.get(key);
-      if (sdl == null) {
-        sdl = new SolrDocumentList();
-        sdl.setNumFound(0);
-        sdl.setStart(0);
-      }
-      result.add(key.toString(), sdl);
-    }
-    return result;
-  }
-  
-  public SolrDocumentList mergeSolrDocumentList(SolrDocumentList one,
-      SolrDocumentList two, int maxSize, String idField) {
-
-    List<SolrDocument> l = new ArrayList<>();
-    
-    // De-dup records sets. Shouldn't happen if indexed correctly.
-    Map<String,SolrDocument> map = new HashMap<>();
-    for (SolrDocument doc : one) {
-      Object id = doc.getFieldValue(idField);
-      assert id != null : doc.toString();
-      map.put(id.toString(), doc);
-    }
-    for (SolrDocument doc : two) {
-      map.put(doc.getFieldValue(idField).toString(), doc);
-    }
-    
-    l = new ArrayList<>(map.values());
-    
-    // Comparator to sort docs based on score. null scores/docs are set to 0.
-    
-    // hmm...we are ordering by scores that are not really comparable...
-    Comparator<SolrDocument> c = new Comparator<SolrDocument>() {
-      public int compare(SolrDocument o1, SolrDocument o2) {
-        Float f1 = getFloat(o1);
-        Float f2 = getFloat(o2);
-        return f2.compareTo(f1);
-      }
-      
-      private Float getFloat(SolrDocument doc) {
-        Float f = 0f;
-        if (doc != null) {
-          Object o = doc.getFieldValue("score");
-          if (o != null && o instanceof Float) {
-            f = (Float) o;
-          }
-        }
-        return f;
-      }
-    };
-    
-    Collections.sort(l, c);
-    
-    // Truncate list to maxSize
-    if (l.size() > maxSize) {
-      l = l.subList(0, maxSize);
-    }
-    
-    // Create SolrDocumentList Attributes from originals
-    SolrDocumentList result = new SolrDocumentList();
-    result.addAll(l);
-    result.setMaxScore(Math.max(one.getMaxScore(), two.getMaxScore()));
-    result.setNumFound(one.getNumFound() + two.getNumFound());
-    result.setStart(Math.min(one.getStart(), two.getStart()));
-
-    return result;
-  }
-  
-  ShardRequest buildShardQuery(ResponseBuilder rb, String q, String key) {
-    ShardRequest s = new ShardRequest();
-    s.params = new ModifiableSolrParams(rb.req.getParams());
-    s.purpose |= ShardRequest.PURPOSE_GET_MLT_RESULTS;
-    // Maybe unnecessary, but safe.
-    s.purpose |= ShardRequest.PURPOSE_PRIVATE;
-    
-    s.params.remove(ShardParams.SHARDS);
-    // s.params.remove(MoreLikeThisComponent.COMPONENT_NAME);
-    
-    // needed to correlate results
-    s.params.set(MoreLikeThisComponent.DIST_DOC_ID, key);
-    s.params.set(CommonParams.START, 0);
-    int mltcount = s.params.getInt(MoreLikeThisParams.DOC_COUNT, 20); // overrequest
-    s.params.set(CommonParams.ROWS, mltcount);
-    
-    // adding score to rank moreLikeThis
-    s.params.remove(CommonParams.FL);
-    
-    // Should probably add something like this:
-    // String fl = s.params.get(MoreLikeThisParams.RETURN_FL, "*");
-    // if(fl != null){
-    // s.params.set(CommonParams.FL, fl + ",score");
-    // }
-    String id = rb.req.getSchema().getUniqueKeyField()
-    .getName();
-    s.params.set(CommonParams.FL, "score," + id);
-    s.params.set(SORT, "score desc");
-    // MLT Query is submitted as normal query to shards.
-    s.params.set(CommonParams.Q, q);
-    
-    return s;
-  }
-  
-  ShardRequest buildMLTQuery(ResponseBuilder rb, String q) {
-    ShardRequest s = new ShardRequest();
-    s.params = new ModifiableSolrParams();
-    
-
-    s.params.set(CommonParams.START, 0);
-
-    String id = rb.req.getSchema().getUniqueKeyField().getName();
-
-    s.params.set(CommonParams.FL, "score," + id);
-    // MLT Query is submitted as normal query to shards.
-    s.params.set(CommonParams.Q, q);
-    
-    s.shards = ShardRequest.ALL_SHARDS;
-    return s;
-  }
-  
-  NamedList<DocList> getMoreLikeThese(ResponseBuilder rb,
-      SolrIndexSearcher searcher, DocList docs, int flags) throws IOException {
-    SolrParams p = rb.req.getParams();
-    IndexSchema schema = searcher.getSchema();
-    MoreLikeThisHandler.MoreLikeThisHelper mltHelper = new MoreLikeThisHandler.MoreLikeThisHelper(
-        p, searcher);
-    NamedList<DocList> mlt = new SimpleOrderedMap<>();
-    DocIterator iterator = docs.iterator();
-    
-    SimpleOrderedMap<Object> dbg = null;
-    if (rb.isDebug()) {
-      dbg = new SimpleOrderedMap<>();
-    }
-    
-    while (iterator.hasNext()) {
-      int id = iterator.nextDoc();
-      int rows = p.getInt(MoreLikeThisParams.DOC_COUNT, 5);
-      DocListAndSet sim = mltHelper.getMoreLikeThis(id, 0, rows, null, null,
-          flags);
-      String name = schema.printableUniqueKey(searcher.doc(id));
-      mlt.add(name, sim.docList);
-      
-      if (dbg != null) {
-        SimpleOrderedMap<Object> docDbg = new SimpleOrderedMap<>();
-        docDbg.add("rawMLTQuery", mltHelper.getRawMLTQuery().toString());
-        docDbg
-            .add("boostedMLTQuery", mltHelper.getBoostedMLTQuery().toString());
-        docDbg.add("realMLTQuery", mltHelper.getRealMLTQuery().toString());
-        SimpleOrderedMap<Object> explains = new SimpleOrderedMap<>();
-        DocIterator mltIte = sim.docList.iterator();
-        while (mltIte.hasNext()) {
-          int mltid = mltIte.nextDoc();
-          String key = schema.printableUniqueKey(searcher.doc(mltid));
-          explains.add(key,
-              searcher.explain(mltHelper.getRealMLTQuery(), mltid));
-        }
-        docDbg.add("explain", explains);
-        dbg.add(name, docDbg);
-      }
-    }
-    
-    // add debug information
-    if (dbg != null) {
-      rb.addDebugInfo("moreLikeThis", dbg);
-    }
-    return mlt;
-  }
-  
-  // ///////////////////////////////////////////
-  // / SolrInfoBean
-  // //////////////////////////////////////////
-  
-  @Override
-  public String getDescription() {
-    return "More Like This";
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.QUERY;
-  }
-}