You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by gu...@apache.org on 2021/11/29 12:08:25 UTC

[solr] branch main updated: SOLR-15590 - Start CoreContainer with Context Listener (#416)

This is an automated email from the ASF dual-hosted git repository.

gus pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new 5b6d9db  SOLR-15590 - Start CoreContainer with Context Listener (#416)
5b6d9db is described below

commit 5b6d9dbb3472088be2043b8942bc7b54c25b7305
Author: Gus Heck <46...@users.noreply.github.com>
AuthorDate: Mon Nov 29 07:08:17 2021 -0500

    SOLR-15590 - Start CoreContainer with Context Listener (#416)
---
 .../client/solrj/embedded/JettySolrRunner.java     |  83 ++-
 .../src/java/org/apache/solr/core/NodeConfig.java  |  39 ++
 .../apache/solr/servlet/CoreContainerProvider.java | 476 +++++++++++++
 ...ervletUtils.java => ExceptionWhileTracing.java} |  21 +-
 .../java/org/apache/solr/servlet/HttpSolrCall.java |   4 +-
 .../apache/solr/servlet/LoadAdminUiServlet.java    |   4 +-
 .../{ServletUtils.java => PathExcluder.java}       |  21 +-
 .../java/org/apache/solr/servlet/ServletUtils.java | 320 +++++++++
 ...Utils.java => SolrAuthenticationException.java} |  19 +-
 .../apache/solr/servlet/SolrDispatchFilter.java    | 745 +++++----------------
 .../org/apache/solr/cloud/SolrXmlInZkTest.java     |   3 +-
 .../test/org/apache/solr/core/TestLazyCores.java   |   3 +-
 .../solr/servlet/HttpSolrCallGetCoreTest.java      |   3 +-
 .../java/org/apache/solr/util/BaseTestHarness.java |   4 +-
 solr/webapp/web/WEB-INF/web.xml                    |   4 +-
 15 files changed, 1055 insertions(+), 694 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index 5f481c4..ec8d3c7 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -23,6 +23,7 @@ import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
+import javax.servlet.UnavailableException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -69,6 +70,7 @@ import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.admin.CoreAdminOperation;
 import org.apache.solr.handler.admin.LukeRequestHandler;
 import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.servlet.CoreContainerProvider;
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.apache.solr.util.TimeOut;
 import org.eclipse.jetty.alpn.server.ALPNServerConnectionFactory;
@@ -113,7 +115,7 @@ public class JettySolrRunner {
   // NOTE: needs to be larger than SolrHttpClient.threadPoolSweeperMaxIdleTime
   private static final int THREAD_POOL_MAX_IDLE_TIME_MS = 260000;
 
-  Server server;
+  private Server server;
 
   volatile FilterHolder dispatchFilter;
   volatile FilterHolder debugFilter;
@@ -143,6 +145,8 @@ public class JettySolrRunner {
 
   private volatile boolean started = false;
 
+  private CoreContainerProvider coreContainerProvider;
+
   public static class DebugFilter implements Filter {
     private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -364,7 +368,9 @@ public class JettySolrRunner {
       }
 
       @Override
-      public void lifeCycleStopped(LifeCycle arg0) {}
+      public synchronized void lifeCycleStopped(LifeCycle arg0) {
+        coreContainerProvider.close();
+      }
 
       @Override
       public void lifeCycleStarting(LifeCycle arg0) {
@@ -372,8 +378,7 @@ public class JettySolrRunner {
       }
 
       @Override
-      public void lifeCycleStarted(LifeCycle arg0) {
-
+      public synchronized void lifeCycleStarted(LifeCycle arg0) {
         jettyPort = getFirstConnectorPort();
         int port = jettyPort;
         if (proxyPort != -1) port = proxyPort;
@@ -382,7 +387,8 @@ public class JettySolrRunner {
 
         root.getServletContext().setAttribute(SolrDispatchFilter.PROPERTIES_ATTRIBUTE, nodeProperties);
         root.getServletContext().setAttribute(SolrDispatchFilter.SOLRHOME_ATTRIBUTE, solrHome);
-
+        coreContainerProvider = new CoreContainerProvider();
+        coreContainerProvider.init(root.getServletContext());
         log.info("Jetty properties: {}", nodeProperties);
 
         debugFilter = root.addFilter(DebugFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST) );
@@ -455,10 +461,16 @@ public class JettySolrRunner {
    * @return the {@link CoreContainer} for this node
    */
   public CoreContainer getCoreContainer() {
-    if (getSolrDispatchFilter() == null || getSolrDispatchFilter().getCores() == null) {
-      return null;
+    try {
+      if (getSolrDispatchFilter() == null || getSolrDispatchFilter().getCores() == null) {
+        return null;
+      }
+      return getSolrDispatchFilter().getCores();
+    } catch (UnavailableException e) {
+      // Since this is only used in tests, this is just a straight-up failure
+      // If this is converted for other use something else might be better here
+      throw new RuntimeException(e);
     }
-    return getSolrDispatchFilter().getCores();
   }
 
   public String getNodeName() {
@@ -500,7 +512,7 @@ public class JettySolrRunner {
    *
    * @throws Exception if an error occurs on startup
    */
-  public void start(boolean reusePort) throws Exception {
+  public synchronized void start(boolean reusePort) throws Exception {
     // Do not let Jetty/Solr pollute the MDC for this thread
     Map<String, String> prevContext = MDC.getCopyOfContextMap();
     MDC.clear();
@@ -527,7 +539,7 @@ public class JettySolrRunner {
       }
       synchronized (JettySolrRunner.this) {
         int cnt = 0;
-        while (!waitOnSolr || !dispatchFilter.isRunning() || getCoreContainer() == null) {
+        while (!waitOnSolr || !dispatchFilter.isRunning() ) {
           this.wait(100);
           if (cnt++ == 15) {
             throw new RuntimeException("Jetty/Solr unresponsive");
@@ -561,7 +573,7 @@ public class JettySolrRunner {
 
 
   private void setProtocolAndHost() {
-    String protocol = null;
+    String protocol;
 
     Connector[] conns = server.getConnectors();
     if (0 == conns.length) {
@@ -575,7 +587,7 @@ public class JettySolrRunner {
     this.host = c.getHost();
   }
 
-  private void retryOnPortBindFailure(int portRetryTime, int port) throws Exception, InterruptedException {
+  private void retryOnPortBindFailure(int portRetryTime, int port) throws Exception {
     TimeOut timeout = new TimeOut(portRetryTime, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     int tryCnt = 1;
     while (true) {
@@ -625,34 +637,12 @@ public class JettySolrRunner {
    *
    * @throws Exception if an error occurs on shutdown
    */
-  public void stop() throws Exception {
+  public synchronized void  stop() throws Exception {
     // Do not let Jetty/Solr pollute the MDC for this thread
     Map<String,String> prevContext = MDC.getCopyOfContextMap();
     MDC.clear();
     try {
       Filter filter = dispatchFilter.getFilter();
-
-      // we want to shutdown outside of jetty cutting us off
-      SolrDispatchFilter sdf = getSolrDispatchFilter();
-      ExecutorService customThreadPool = null;
-      if (sdf != null) {
-        customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("jettyShutDown"));
-
-        sdf.closeOnDestroy(false);
-//        customThreadPool.submit(() -> {
-//          try {
-//            sdf.close();
-//          } catch (Throwable t) {
-//            log.error("Error shutting down Solr", t);
-//          }
-//        });
-        try {
-          sdf.close();
-        } catch (Throwable t) {
-          log.error("Error shutting down Solr", t);
-        }
-      }
-
       QueuedThreadPool qtp = (QueuedThreadPool) server.getThreadPool();
       ReservedThreadExecutor rte = qtp.getBean(ReservedThreadExecutor.class);
 
@@ -687,12 +677,13 @@ public class JettySolrRunner {
         rte.stop();
 
         TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-        timeout.waitFor("Timeout waiting for reserved executor to stop.", ()
-            -> rte.isStopped());
+        timeout.waitFor("Timeout waiting for reserved executor to stop.", rte::isStopped);
       }
 
-      if (customThreadPool != null) {
-        ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
+      // we want to shutdown outside of jetty cutting us off
+      SolrDispatchFilter sdf = getSolrDispatchFilter();
+      if (sdf != null) {
+        ExecutorUtil.shutdownAndAwaitTermination(getJettyShutDownThreadPool());
       }
 
       do {
@@ -716,6 +707,10 @@ public class JettySolrRunner {
     }
   }
 
+  private ExecutorService getJettyShutDownThreadPool() {
+    return ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("jettyShutDown"));
+  }
+
   public void outputMetrics(File outputDirectory, String fileName) throws IOException {
     if (getCoreContainer() != null) {
 
@@ -750,12 +745,11 @@ public class JettySolrRunner {
         NamedList<Object> coreStatus = CoreAdminOperation.getCoreStatus(getCoreContainer(), core.getName(), false);
         core.withSearcher(solrIndexSearcher -> {
           SimpleOrderedMap<Object> lukeIndexInfo = LukeRequestHandler.getIndexInfo(solrIndexSearcher.getIndexReader());
-          @SuppressWarnings({"unchecked", "rawtypes"})
           Map<String,Object> indexInfoMap = coreStatus.toMap(new LinkedHashMap<>());
           indexInfoMap.putAll(lukeIndexInfo.toMap(new LinkedHashMap<>()));
           pw.println(JSONUtil.toJSON(indexInfoMap, 2));
 
-          pw.println("");
+          pw.println();
           return null;
         });
       }
@@ -897,7 +891,12 @@ public class JettySolrRunner {
   private void waitForLoadingCoresToFinish(long timeoutMs) {
     if (dispatchFilter != null) {
       SolrDispatchFilter solrFilter = (SolrDispatchFilter) dispatchFilter.getFilter();
-      CoreContainer cores = solrFilter.getCores();
+      CoreContainer cores;
+      try {
+        cores = solrFilter.getCores();
+      } catch (UnavailableException e) {
+        throw new IllegalStateException("The CoreContainer is unavailable!");
+      }
       if (cores != null) {
         cores.waitForLoadingCoresToFinish(timeoutMs);
       } else {
diff --git a/solr/core/src/java/org/apache/solr/core/NodeConfig.java b/solr/core/src/java/org/apache/solr/core/NodeConfig.java
index cd8bca7..03195c7 100644
--- a/solr/core/src/java/org/apache/solr/core/NodeConfig.java
+++ b/solr/core/src/java/org/apache/solr/core/NodeConfig.java
@@ -16,11 +16,19 @@
  */
 package org.apache.solr.core;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.logging.LogWatcherConfig;
+
 import org.apache.solr.update.UpdateShardHandlerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.ByteArrayInputStream;
+import java.lang.invoke.MethodHandles;
 import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.Collections;
@@ -31,6 +39,8 @@ import java.util.Set;
 
 
 public class NodeConfig {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
   // all Path fields here are absolute and normalized.
 
   private final String nodeName;
@@ -149,6 +159,35 @@ public class NodeConfig {
     if (null == this.loader) throw new NullPointerException("loader");
   }
 
+  /**
+   * Get the NodeConfig whether stored on disk, in ZooKeeper, etc.
+   * This may also be used by custom filters to load relevant configuration.
+   * @return the NodeConfig
+   */
+  public static NodeConfig loadNodeConfig(Path solrHome, Properties nodeProperties) {
+    if (!StringUtils.isEmpty(System.getProperty("solr.solrxml.location"))) {
+      log.warn("Solr property solr.solrxml.location is no longer supported. Will automatically load solr.xml from ZooKeeper if it exists");
+    }
+    nodeProperties = SolrXmlConfig.wrapAndSetZkHostFromSysPropIfNeeded(nodeProperties);
+    String zkHost = nodeProperties.getProperty(SolrXmlConfig.ZK_HOST);
+    if (!StringUtils.isEmpty(zkHost)) {
+      int startUpZkTimeOut = Integer.getInteger("waitForZk", 30);
+      startUpZkTimeOut *= 1000;
+      try (SolrZkClient zkClient = new SolrZkClient(zkHost, startUpZkTimeOut, startUpZkTimeOut)) {
+        if (zkClient.exists("/solr.xml", true)) {
+          log.info("solr.xml found in ZooKeeper. Loading...");
+          byte[] data = zkClient.getData("/solr.xml", null, null, true);
+          return SolrXmlConfig.fromInputStream(solrHome, new ByteArrayInputStream(data), nodeProperties, true);
+        }
+      } catch (Exception e) {
+        throw new SolrException(ErrorCode.SERVER_ERROR, "Error occurred while loading solr.xml from zookeeper", e);
+      }
+      log.info("Loading solr.xml from SolrHome (not found in ZooKeeper)");
+    }
+
+    return SolrXmlConfig.fromSolrHome(solrHome, nodeProperties);
+  }
+
   public String getConfigSetServiceClass() {
     return this.configSetServiceClass;
   }
diff --git a/solr/core/src/java/org/apache/solr/servlet/CoreContainerProvider.java b/solr/core/src/java/org/apache/solr/servlet/CoreContainerProvider.java
new file mode 100644
index 0000000..08c05a3
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/servlet/CoreContainerProvider.java
@@ -0,0 +1,476 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.servlet;
+
+import com.codahale.metrics.jvm.ClassLoadingGaugeSet;
+import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
+import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
+import com.codahale.metrics.jvm.ThreadStatesGaugeSet;
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.http.client.HttpClient;
+import org.apache.lucene.util.Version;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.NodeConfig;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoBean.Group;
+import org.apache.solr.core.SolrXmlConfig;
+import org.apache.solr.metrics.AltBufferPoolMetricSet;
+import org.apache.solr.metrics.MetricsMap;
+import org.apache.solr.metrics.OperatingSystemMetricSet;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.metrics.SolrMetricManager.ResolutionStrategy;
+import org.apache.solr.metrics.SolrMetricProducer;
+import org.apache.solr.servlet.RateLimitManager.Builder;
+import org.apache.solr.util.StartupLoggingUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import javax.naming.NoInitialContextException;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletContextEvent;
+import javax.servlet.ServletContextListener;
+import javax.servlet.UnavailableException;
+import java.lang.invoke.MethodHandles;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Set;
+import java.util.WeakHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+
+import static org.apache.solr.core.NodeConfig.loadNodeConfig;
+import static org.apache.solr.servlet.SolrDispatchFilter.PROPERTIES_ATTRIBUTE;
+import static org.apache.solr.servlet.SolrDispatchFilter.SOLRHOME_ATTRIBUTE;
+import static org.apache.solr.servlet.SolrDispatchFilter.SOLR_INSTALL_DIR_ATTRIBUTE;
+import static org.apache.solr.servlet.SolrDispatchFilter.SOLR_LOG_LEVEL;
+import static org.apache.solr.servlet.SolrDispatchFilter.SOLR_LOG_MUTECONSOLE;
+
+/**
+ * A service that can provide access to solr cores. This allows us to have multiple filters and
+ * servlets that depend on SolrCore and CoreContainer, while still only having one CoreContainer per
+ * instance of solr.
+ */
+public class CoreContainerProvider implements ServletContextListener {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final String metricTag = SolrMetricProducer.getUniqueMetricTag(this, null);
+  private CoreContainer cores;
+  private Properties extraProperties;
+  private HttpClient httpClient;
+  private SolrMetricManager metricManager;
+  private RateLimitManager rateLimitManager;
+  private final CountDownLatch init = new CountDownLatch(1);
+  private String registryName;
+  private final boolean isV2Enabled = !"true".equals(System.getProperty("disable.v2.api", "false"));
+  // AFAIK the only reason we need this is to support JettySolrRunner for tests. In tests we might have
+  // multiple CoreContainers in the same JVM, but I *think* that doesn't happen in a real server.
+  private static final Map<ContextInitializationKey, ServiceHolder> services =
+      Collections.synchronizedMap(new WeakHashMap<>());
+
+  // todo: dependency injection instead, but for now this method and the associated  map will have to suffice.
+  //  Note that this relies on ServletContext.equals() not implementing anything significantly different
+  //  than Object.equals for its .equals method (I've found no implementation that even implements it).
+  public static ServiceHolder serviceForContext(ServletContext ctx) throws InterruptedException {
+    ContextInitializationKey key = new ContextInitializationKey(ctx);
+    return services.computeIfAbsent(key, ServiceHolder::new);
+  }
+
+  @Override
+  public void contextInitialized(ServletContextEvent sce) {
+    init(sce.getServletContext());
+  }
+
+  @Override
+  public void contextDestroyed(ServletContextEvent sce) {
+      close();
+  }
+
+  CoreContainer getCoreContainer() throws UnavailableException {
+    waitForCoreContainer(() -> cores,init);
+    return cores;
+  }
+  HttpClient getHttpClient() throws UnavailableException {
+    waitForCoreContainer(() -> cores,init);
+    return httpClient;
+  }
+
+  private static void waitForCoreContainer(Supplier<CoreContainer> provider, CountDownLatch latch) throws UnavailableException {
+    CoreContainer cores = provider.get();
+    if (cores == null || cores.isShutDown()) {
+      long startWait = System.nanoTime();
+      try {
+        while (!latch.await(10, TimeUnit.SECONDS)) {
+          long now = System.nanoTime();
+          if (log.isInfoEnabled()) {
+            log.info("Still waiting for CoreContainerStartup ({} seconds elapsed)", (now - startWait) / 1_000_000_000);
+          }
+        }
+      } catch (InterruptedException e) { //well, no wait then
+        Thread.currentThread().interrupt();
+      }
+      cores = provider.get();
+      if (cores == null || cores.isShutDown()) {
+        final String msg = "Error processing the request. CoreContainer is either not initialized or shutting down.";
+        log.error(msg);
+        throw new UnavailableException(msg);
+      }
+    }
+  }
+
+  public void close() {
+    CoreContainer cc = cores;
+//    if (cc != null) {
+//      ZkController zkController = cc.getZkController();
+//      if (zkController != null) {
+//
+//        // Mark Miller suggested that we should be publishing that we are down before anything else which makes
+//        // good sense, but the following causes test failures, so that improvement can be the subject of another
+//        // PR/issue. Also, jetty might already be refusing requests by this point so that's a potential issue too.
+//        // Digging slightly I see that there's a whole mess of code looking up collections and calculating state
+//        // changes associated with this call, which smells a lot like we're duplicating node state in collection
+//        // stuff, but it will take a lot of code reading to figure out if that's really what it is, why we
+//        // did it and if there's room for improvement.
+//
+//        zkController.publishNodeAsDown(zkController.getNodeName());
+//      }
+//    }
+    cores = null;
+    try {
+      if (metricManager != null) {
+        try {
+          metricManager.unregisterGauges(registryName, metricTag);
+        } catch (NullPointerException e) {
+          // okay
+        } catch (Exception e) {
+          log.warn("Exception closing FileCleaningTracker", e);
+        } finally {
+          metricManager = null;
+        }
+      }
+    } finally {
+      if (cc != null) {
+        httpClient = null;
+        cc.shutdown();
+      }
+    }
+  }
+
+  public void init(ServletContext servletContext)  {
+    if (log.isTraceEnabled()) {
+      log.trace("CoreService.init(): {}", this.getClass().getClassLoader());
+    }
+    CoreContainer coresInit = null;
+    try {
+      // "extra" properties must be initialized first, so we know things like "do we have a zkHost"
+      // wrap as defaults (if set) so we can modify w/o polluting the Properties provided by our caller
+      this.extraProperties = SolrXmlConfig.wrapAndSetZkHostFromSysPropIfNeeded
+          ((Properties) servletContext.getAttribute(PROPERTIES_ATTRIBUTE));
+
+      StartupLoggingUtils.checkLogDir();
+      if (log.isInfoEnabled()) {
+        log.info("Using logger factory {}", StartupLoggingUtils.getLoggerImplStr());
+      }
+
+      logWelcomeBanner();
+
+      String muteConsole = System.getProperty(SOLR_LOG_MUTECONSOLE);
+      if (muteConsole != null && !Arrays.asList("false","0","off","no").contains(muteConsole.toLowerCase(Locale.ROOT))) {
+        StartupLoggingUtils.muteConsole();
+      }
+      String logLevel = System.getProperty(SOLR_LOG_LEVEL);
+      if (logLevel != null) {
+        log.info("Log level override, property solr.log.level={}", logLevel);
+        StartupLoggingUtils.changeLogLevel(logLevel);
+      }
+
+      coresInit = createCoreContainer(computeSolrHome(servletContext), extraProperties);
+      this.httpClient = coresInit.getUpdateShardHandler().getDefaultHttpClient();
+      setupJvmMetrics(coresInit);
+
+      SolrZkClient zkClient = null;
+      ZkController zkController = coresInit.getZkController();
+
+      if (zkController != null) {
+        zkClient = zkController.getZkClient();
+      }
+
+      Builder builder = new Builder(zkClient);
+
+      this.rateLimitManager = builder.build();
+
+      if (zkController != null) {
+        zkController.zkStateReader.registerClusterPropertiesListener(this.rateLimitManager);
+      }
+
+      if (log.isDebugEnabled()) {
+        log.debug("user.dir={}", System.getProperty("user.dir"));
+      }
+    } catch( Throwable t ) {
+      // catch this so our filter still works
+      log.error( "Could not start Solr. Check solr/home property and the logs");
+      SolrCore.log( t );
+      if (t instanceof Error) {
+        throw (Error) t;
+      }
+    } finally{
+      log.trace("SolrDispatchFilter.init() done");
+      this.cores = coresInit; // crucially final assignment
+      services.computeIfAbsent(new ContextInitializationKey(servletContext), ServiceHolder::new)
+          .setService(this);
+      init.countDown();
+    }
+  }
+
+
+  private void logWelcomeBanner() {
+    // _Really_ sorry about how clumsy this is as a result of the logging call checker, but this is the only one
+    // that's so ugly so far.
+    if (log.isInfoEnabled()) {
+      log.info(" ___      _       Welcome to Apache Solr™ version {}", solrVersion());
+    }
+    if (log.isInfoEnabled()) {
+      log.info("/ __| ___| |_ _   Starting in {} mode on port {}", isCloudMode() ? "cloud" : "standalone", getSolrPort());
+    }
+    if (log.isInfoEnabled()) {
+      log.info("\\__ \\/ _ \\ | '_|  Install dir: {}", System.getProperty(SOLR_INSTALL_DIR_ATTRIBUTE));
+    }
+    if (log.isInfoEnabled()) {
+      log.info("|___/\\___/_|_|    Start time: {}", Instant.now());
+    }
+  }
+  private String solrVersion() {
+    String specVer = Version.LATEST.toString();
+    try {
+      String implVer = SolrCore.class.getPackage().getImplementationVersion();
+      return (specVer.equals(implVer.split(" ")[0])) ? specVer : implVer;
+    } catch (Exception e) {
+      return specVer;
+    }
+  }
+
+  private String getSolrPort() {
+    return System.getProperty("jetty.port");
+  }
+
+  /**
+   * We are in cloud mode if Java option zkRun exists OR zkHost exists and is non-empty
+   * @see SolrXmlConfig#wrapAndSetZkHostFromSysPropIfNeeded
+   * @see #extraProperties
+   * @see #init
+   */
+  private boolean isCloudMode() {
+    assert null != extraProperties; // we should never be called w/o this being initialized
+    return (null != extraProperties.getProperty(SolrXmlConfig.ZK_HOST)) || (null != System.getProperty("zkRun"));
+  }
+
+  /**
+   * Returns the effective Solr Home to use for this node, based on looking up the value in this order:
+   * <ol>
+   * <li>attribute in the FilterConfig</li>
+   * <li>JNDI: via java:comp/env/solr/home</li>
+   * <li>The system property solr.solr.home</li>
+   * <li>Look in the current working directory for a solr/ directory</li>
+   * </ol>
+   * <p>
+   *
+   * @return the Solr home, absolute and normalized.
+   */
+  private static Path computeSolrHome(ServletContext servletContext) {
+
+    // start with explicit check of servlet config...
+    String source = "servlet config: " + SOLRHOME_ATTRIBUTE;
+    String home = (String) servletContext.getAttribute(SOLRHOME_ATTRIBUTE);
+
+    if (null == home) {
+      final String lookup = "java:comp/env/solr/home";
+      // Try JNDI
+      source = "JNDI: " + lookup;
+      try {
+        Context c = new InitialContext();
+        home = (String) c.lookup(lookup);
+      } catch (NoInitialContextException e) {
+        log.debug("JNDI not configured for solr (NoInitialContextEx)");
+      } catch (NamingException e) {
+        log.debug("No /solr/home in JNDI");
+      } catch (RuntimeException ex) {
+        log.warn("Odd RuntimeException while testing for JNDI: ", ex);
+      }
+    }
+
+    if (null == home) {
+      // Now try system property
+      final String prop = "solr.solr.home";
+      source = "system property: " + prop;
+      home = System.getProperty(prop);
+    }
+
+    if (null == home) {
+      // if all else fails, assume default dir
+      home = "solr/";
+      source = "defaulted to '" + home + "' ... could not find system property or JNDI";
+    }
+    final Path solrHome = Paths.get(home).toAbsolutePath().normalize();
+    log.info("Solr Home: {} (source: {})", solrHome, source);
+
+    return solrHome;
+  }
+
+  /**
+   * CoreContainer initialization
+   * @return a CoreContainer to hold this server's cores
+   */
+  protected CoreContainer createCoreContainer(Path solrHome, Properties nodeProps) {
+    NodeConfig nodeConfig = loadNodeConfig(solrHome, nodeProps);
+    final CoreContainer coreContainer = new CoreContainer(nodeConfig, true);
+    coreContainer.load();
+    return coreContainer;
+  }
+
+
+
+  private void setupJvmMetrics(CoreContainer coresInit)  {
+    metricManager = coresInit.getMetricManager();
+    registryName = SolrMetricManager.getRegistryName(Group.jvm);
+    final Set<String> hiddenSysProps = coresInit.getConfig().getMetricsConfig().getHiddenSysProps();
+    try {
+      metricManager.registerAll(registryName, new AltBufferPoolMetricSet(), ResolutionStrategy.IGNORE, "buffers");
+      metricManager.registerAll(registryName, new ClassLoadingGaugeSet(), ResolutionStrategy.IGNORE, "classes");
+      metricManager.registerAll(registryName, new OperatingSystemMetricSet(), ResolutionStrategy.IGNORE, "os");
+      metricManager.registerAll(registryName, new GarbageCollectorMetricSet(), ResolutionStrategy.IGNORE, "gc");
+      metricManager.registerAll(registryName, new MemoryUsageGaugeSet(), ResolutionStrategy.IGNORE, "memory");
+      metricManager.registerAll(registryName, new ThreadStatesGaugeSet(), ResolutionStrategy.IGNORE, "threads"); // todo should we use CachedThreadStatesGaugeSet instead?
+      MetricsMap sysprops = new MetricsMap(map -> System.getProperties().forEach((k, v) -> {
+        //noinspection SuspiciousMethodCalls
+        if (!hiddenSysProps.contains(k)) {
+          map.putNoEx(String.valueOf(k), v);
+        }
+      }));
+      metricManager.registerGauge(null, registryName, sysprops, metricTag, ResolutionStrategy.IGNORE, "properties", "system");
+      MetricsMap sysenv = new MetricsMap(map -> System.getenv().forEach((k, v) -> {
+        if (!hiddenSysProps.contains(k)) {
+          map.putNoEx(String.valueOf(k), v);
+        }
+      }));
+      metricManager.registerGauge(null, registryName, sysenv, metricTag, ResolutionStrategy.IGNORE, "env", "system");
+    } catch (Exception e) {
+      log.warn("Error registering JVM metrics", e);
+    }
+  }
+
+  public RateLimitManager getRateLimitManager() {
+    return rateLimitManager;
+  }
+
+  @VisibleForTesting
+  void setRateLimitManager(RateLimitManager rateLimitManager) {
+    this.rateLimitManager = rateLimitManager;
+  }
+
+  public boolean isV2Enabled() {
+    return isV2Enabled;
+  }
+
+  private static class ContextInitializationKey {
+    private final ServletContext ctx;
+    private final CountDownLatch initializing = new CountDownLatch(1);
+
+    private ContextInitializationKey(ServletContext ctx) {
+      if (ctx == null) {
+        throw new IllegalArgumentException("Context must not be null");
+      }
+      // if one of these is reachable both must be to avoid collection from weak hashmap, so
+      // set an attribute holding this object to ensure we never get collected until the ServletContext
+      // is eligible for collection too.
+      ctx.setAttribute(this.getClass().getName(), this);
+      this.ctx = ctx;
+    }
+
+    public synchronized ServletContext getCtx() {
+      return ctx;
+    }
+
+    synchronized void makeReady() {
+      this.initializing.countDown();
+    }
+
+    // NOT synchronized :)
+    public void waitForReadyService() throws InterruptedException {
+      initializing.await();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      ContextInitializationKey that = (ContextInitializationKey) o;
+      return ctx.equals(that.ctx);
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(ctx);
+    }
+  }
+
+
+  static class ServiceHolder {
+    private volatile CoreContainerProvider service;
+    private volatile ContextInitializationKey key;
+
+    private ServiceHolder(ContextInitializationKey key) {
+      if (key == null) {
+        throw new IllegalArgumentException("Key for accessing this service holder must be supplied");
+      }
+      this.key = key;
+    }
+
+    public void setService(CoreContainerProvider service) {
+      this.service = service;
+      key.makeReady();
+      key = null; // be sure not to hold a reference to the context via the key
+    }
+
+    public CoreContainerProvider getService()  {
+      try {
+        if (key != null) {
+          try {
+            key.waitForReadyService();
+          } catch (NullPointerException e) {
+            // ignore, means we raced with set service and lost, but that's fine since null implies we are ready.
+          }
+        }
+      } catch (InterruptedException e) {
+        throw new SolrException(ErrorCode.SERVER_ERROR,"Interrupted while obtaining reference to CoreService");
+      }
+      return service;
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java b/solr/core/src/java/org/apache/solr/servlet/ExceptionWhileTracing.java
similarity index 55%
copy from solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
copy to solr/core/src/java/org/apache/solr/servlet/ExceptionWhileTracing.java
index e61831a..c01d3d0 100644
--- a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
+++ b/solr/core/src/java/org/apache/solr/servlet/ExceptionWhileTracing.java
@@ -14,24 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.solr.servlet;
 
-import javax.servlet.http.HttpServletRequest;
-
-/**
- * Various Util methods for interaction on servlet level, i.e. HttpServletRequest
+/*
+ * This is not pretty, hope to remove it when tracing becomes a filter.
  */
-public abstract class ServletUtils {
-  private ServletUtils() { /* only static methods in this class */ }
+public class ExceptionWhileTracing extends RuntimeException {
+  public Exception e;
 
-  /**
-   * Use this to get the full path after context path "/solr", which is a combination of
-   * servletPath and pathInfo.
-   * @param request the HttpServletRequest object
-   * @return String with path starting with a "/", or empty string if no path
-   */
-  public static String getPathAfterContext(HttpServletRequest request) {
-    return request.getServletPath() + (request.getPathInfo() != null ? request.getPathInfo() : "");
+  public ExceptionWhileTracing(Exception e) {
+    this.e = e;
   }
 }
diff --git a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
index e932184..8536207 100644
--- a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
+++ b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
@@ -458,7 +458,7 @@ public class HttpSolrCall {
 
     if (statusCode == AuthorizationResponse.PROMPT.statusCode) {
       @SuppressWarnings({"unchecked"})
-      Map<String, String> headers = (Map) getReq().getAttribute(AuthenticationPlugin.class.getName());
+      Map<String, String> headers = (Map<String, String>) getReq().getAttribute(AuthenticationPlugin.class.getName());
       if (headers != null) {
         for (Map.Entry<String, String> e : headers.entrySet()) response.setHeader(e.getKey(), e.getValue());
       }
@@ -725,7 +725,7 @@ public class HttpSolrCall {
       }
 
       final HttpResponse response
-          = solrDispatchFilter.httpClient.execute(method, HttpClientUtil.createNewHttpClientRequestContext());
+          = solrDispatchFilter.getHttpClient().execute(method, HttpClientUtil.createNewHttpClientRequestContext());
       int httpStatus = response.getStatusLine().getStatusCode();
       httpEntity = response.getEntity();
 
diff --git a/solr/core/src/java/org/apache/solr/servlet/LoadAdminUiServlet.java b/solr/core/src/java/org/apache/solr/servlet/LoadAdminUiServlet.java
index 54d5924..42103f2 100644
--- a/solr/core/src/java/org/apache/solr/servlet/LoadAdminUiServlet.java
+++ b/solr/core/src/java/org/apache/solr/servlet/LoadAdminUiServlet.java
@@ -48,8 +48,8 @@ public final class LoadAdminUiServlet extends BaseSolrServlet {
           "ENABLED in bin/solr.in.sh or solr.in.cmd.");
       return;
     }
-    HttpServletRequest request = SolrDispatchFilter.closeShield(_request, false);
-    HttpServletResponse response = SolrDispatchFilter.closeShield(_response, false);
+    HttpServletRequest request = ServletUtils.closeShield(_request, false);
+    HttpServletResponse response = ServletUtils.closeShield(_response, false);
 
 
     response.addHeader("X-Frame-Options", "DENY"); // security: SOLR-7966 - avoid clickjacking for admin interface
diff --git a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java b/solr/core/src/java/org/apache/solr/servlet/PathExcluder.java
similarity index 55%
copy from solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
copy to solr/core/src/java/org/apache/solr/servlet/PathExcluder.java
index e61831a..fef696d 100644
--- a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
+++ b/solr/core/src/java/org/apache/solr/servlet/PathExcluder.java
@@ -14,24 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.solr.servlet;
 
-import javax.servlet.http.HttpServletRequest;
+import java.util.List;
+import java.util.regex.Pattern;
 
 /**
- * Various Util methods for interaction on servlet level, i.e. HttpServletRequest
+ * Denotes an object, usually a servlet that denies access to some paths based on the supplied patterns.
+ * Typically, this would be implemented via compiled regular expressions.
  */
-public abstract class ServletUtils {
-  private ServletUtils() { /* only static methods in this class */ }
-
-  /**
-   * Use this to get the full path after context path "/solr", which is a combination of
-   * servletPath and pathInfo.
-   * @param request the HttpServletRequest object
-   * @return String with path starting with a "/", or empty string if no path
-   */
-  public static String getPathAfterContext(HttpServletRequest request) {
-    return request.getServletPath() + (request.getPathInfo() != null ? request.getPathInfo() : "");
-  }
+public interface PathExcluder {
+  void setExcludePatterns(List<Pattern> excludePatterns);
 }
diff --git a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java b/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
index e61831a..8fd6e47 100644
--- a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
+++ b/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
@@ -17,12 +17,52 @@
 
 package org.apache.solr.servlet;
 
+import io.opentracing.Span;
+import io.opentracing.Tracer;
+import io.opentracing.noop.NoopSpan;
+import io.opentracing.noop.NoopTracer;
+import io.opentracing.propagation.Format;
+import io.opentracing.tag.Tags;
+import org.apache.http.HttpHeaders;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.logging.MDCLoggingContext;
+import org.apache.solr.request.SolrRequestInfo;
+import org.apache.solr.util.tracing.HttpServletCarrier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.FilterChain;
+import javax.servlet.ReadListener;
+import javax.servlet.ServletException;
+import javax.servlet.ServletInputStream;
+import javax.servlet.ServletOutputStream;
+import javax.servlet.WriteListener;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 /**
  * Various Util methods for interaction on servlet level, i.e. HttpServletRequest
  */
 public abstract class ServletUtils {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  static String CLOSE_STREAM_MSG = "Attempted close of http request or response stream - in general you should not do this, "
+      + "you may spoil connection reuse and possibly disrupt a client. If you must close without actually needing to close, "
+      + "use a CloseShield*Stream. Closing or flushing the response stream commits the response and prevents us from modifying it. "
+      + "Closing the request stream prevents us from guaranteeing ourselves that streams are fully read for proper connection reuse."
+      + "Let the container manage the lifecycle of these streams when possible.";
+
   private ServletUtils() { /* only static methods in this class */ }
 
   /**
@@ -34,4 +74,284 @@ public abstract class ServletUtils {
   public static String getPathAfterContext(HttpServletRequest request) {
     return request.getServletPath() + (request.getPathInfo() != null ? request.getPathInfo() : "");
   }
+
+  /**
+   * Wrap the request's input stream with a close shield. If this is a
+   * retry, we will assume that the stream has already been wrapped and do nothing.
+   *
+   * Only the container should ever actually close the servlet output stream. This method possibly
+   * should be turned into a servlet filter
+   *
+   * @param request The request to wrap.
+   * @param retry If this is an original request or a retry.
+   * @return A request object with an {@link InputStream} that will ignore calls to close.
+   */
+  public static HttpServletRequest closeShield(HttpServletRequest request, boolean retry) {
+    if (!retry) {
+      return new HttpServletRequestWrapper(request) {
+
+        @Override
+        public ServletInputStream getInputStream() throws IOException {
+
+          return new ServletInputStreamWrapper(super.getInputStream()) {
+            @Override
+            public void close() {
+              // even though we skip closes, we let local tests know not to close so that a full understanding can take
+              // place
+              assert !Thread.currentThread().getStackTrace()[2].getClassName().matches(
+                  "org\\.apache\\.(?:solr|lucene).*") : CLOSE_STREAM_MSG;
+              this.stream = ClosedServletInputStream.CLOSED_SERVLET_INPUT_STREAM;
+            }
+          };
+
+        }
+      };
+    } else {
+      return request;
+    }
+  }
+
+  /**
+   * Wrap the response's output stream with a close shield. If this is a
+   * retry, we will assume that the stream has already been wrapped and do nothing.
+   *
+   * Only the container should ever actually close the servlet request stream.
+   *
+   * @param response The response to wrap.
+   * @param retry If this response corresponds to an original request or a retry.
+   * @return A response object with an {@link OutputStream} that will ignore calls to close.
+   */
+  public static HttpServletResponse closeShield(HttpServletResponse response, boolean retry) {
+    if (!retry) {
+      return new HttpServletResponseWrapper(response) {
+
+        @Override
+        public ServletOutputStream getOutputStream() throws IOException {
+
+          return new ServletOutputStreamWrapper(super.getOutputStream()) {
+            @Override
+            public void close() {
+              // even though we skip closes, we let local tests know not to close so that a full understanding can take
+              // place
+              assert !Thread.currentThread().getStackTrace()[2].getClassName().matches(
+                  "org\\.apache\\.(?:solr|lucene).*") : CLOSE_STREAM_MSG;
+              stream = ClosedServletOutputStream.CLOSED_SERVLET_OUTPUT_STREAM;
+            }
+          };
+        }
+
+      };
+    } else {
+      return response;
+    }
+  }
+
+  static boolean excludedPath(List<Pattern> excludePatterns, HttpServletRequest request, HttpServletResponse response, FilterChain chain) throws IOException, ServletException {
+    String requestPath = getPathAfterContext(request);
+    // No need to even create the HttpSolrCall object if this path is excluded.
+    if (excludePatterns != null) {
+      for (Pattern p : excludePatterns) {
+        Matcher matcher = p.matcher(requestPath);
+        if (matcher.lookingAt()) {
+          if (chain != null) {
+            chain.doFilter(request, response);
+          }
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  static boolean excludedPath(List<Pattern> excludePatterns, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
+    return excludedPath(excludePatterns,request,response, null);
+  }
+
+  static void configExcludes(PathExcluder excluder, String patternConfig) {
+    if(patternConfig != null) {
+      String[] excludeArray = patternConfig.split(",");
+      List<Pattern> patterns = new ArrayList<>();
+      excluder.setExcludePatterns(patterns);
+      for (String element : excludeArray) {
+        patterns.add(Pattern.compile(element));
+      }
+    }
+  }
+
+  /**
+   * Enforces rate limiting for a request. Should be converted to a servlet filter at some point. Currently,
+   * this is tightly coupled with request tracing which is not ideal either.
+   *
+   * @param request The request to limit
+   * @param response The associated response
+   * @param limitedExecution code that will be traced
+   * @param trace a boolean that turns tracing on or off
+   */
+  static void rateLimitRequest(HttpServletRequest request, HttpServletResponse response, Runnable limitedExecution, boolean trace) throws ServletException, IOException {
+    boolean accepted = false;
+    RateLimitManager rateLimitManager = getRateLimitManager(request);
+    try {
+      try {
+        accepted = rateLimitManager.handleRequest(request);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage());
+      }
+
+      if (!accepted) {
+        String errorMessage = "Too many requests for this request type." +
+            "Please try after some time or increase the quota for this request type";
+
+        response.sendError(429, errorMessage);
+      }
+      // todo: this shouldn't be required, tracing and rate limiting should be independently composable
+      traceHttpRequestExecution2(request, response, limitedExecution, trace);
+    } finally {
+      if (accepted) {
+        rateLimitManager.decrementActiveRequests(request);
+      }
+    }
+  }
+
+  /**
+   * Sets up tracing for an HTTP request. Perhaps should be converted to a servlet filter at some point.
+   *
+   * @param tracedExecution the executed code
+   */
+  private static void traceHttpRequestExecution2(HttpServletRequest request, HttpServletResponse response, Runnable tracedExecution, boolean required) throws ServletException, IOException {
+    Tracer tracer = getTracer(request);
+    if (tracer != null) {
+      Span span = buildSpan(tracer, request);
+
+      request.setAttribute(Span.class.getName(), span);
+      try (var scope = tracer.scopeManager().activate(span)) {
+
+        assert scope != null; // prevent javac warning about scope being unused
+        MDCLoggingContext.setTracerId(span.context().toTraceId()); // handles empty string
+        try {
+          tracedExecution.run();
+        } catch (ExceptionWhileTracing e) {
+          if (e.e instanceof SolrAuthenticationException) {
+            throw (SolrAuthenticationException) e.e;
+          }
+          if (e.e instanceof ServletException) {
+            throw (ServletException) e.e;
+          }
+          if (e.e instanceof IOException) {
+            throw (IOException) e.e;
+          }
+          if (e.e instanceof RuntimeException) {
+            throw (RuntimeException) e.e;
+          } else {
+            throw new RuntimeException(e.e);
+          }
+        }
+      } catch (SolrAuthenticationException e) {
+        // done, the response and status code have already been sent
+      } finally {
+        consumeInputFully(request, response);
+        SolrRequestInfo.reset();
+        SolrRequestParsers.cleanupMultipartFiles(request);
+
+
+        span.setTag(Tags.HTTP_STATUS, response.getStatus());
+        span.finish();
+      }
+    } else {
+      if (required) {
+        throw new IllegalStateException("Tracing required, but could not find Tracer in request attribute:" + SolrDispatchFilter.ATTR_TRACING_TRACER);
+      } else {
+        tracedExecution.run();
+      }
+    }
+  }
+
+  private static Tracer getTracer(HttpServletRequest req) {
+    return (Tracer) req.getAttribute(SolrDispatchFilter.ATTR_TRACING_TRACER);
+  }
+
+  private static RateLimitManager getRateLimitManager(HttpServletRequest req) {
+    return (RateLimitManager) req.getAttribute(SolrDispatchFilter.ATTR_RATELIMIT_MANAGER);
+  }
+
+  protected static Span buildSpan(Tracer tracer, HttpServletRequest request) {
+    if (tracer instanceof NoopTracer) {
+      return NoopSpan.INSTANCE;
+    }
+    Tracer.SpanBuilder spanBuilder = tracer.buildSpan("http.request") // will be changed later
+        .asChildOf(tracer.extract(Format.Builtin.HTTP_HEADERS, new HttpServletCarrier(request)))
+        .withTag(Tags.SPAN_KIND, Tags.SPAN_KIND_SERVER)
+        .withTag(Tags.HTTP_METHOD, request.getMethod())
+        .withTag(Tags.HTTP_URL, request.getRequestURL().toString());
+    if (request.getQueryString() != null) {
+      spanBuilder.withTag("http.params", request.getQueryString());
+    }
+    spanBuilder.withTag(Tags.DB_TYPE, "solr");
+    return spanBuilder.start();
+  }
+
+  // we make sure we read the full client request so that the client does
+  // not hit a connection reset and we can reuse the
+  // connection - see SOLR-8453 and SOLR-8683
+  private static void consumeInputFully(HttpServletRequest req, HttpServletResponse response) {
+    try {
+      ServletInputStream is = req.getInputStream();
+      //noinspection StatementWithEmptyBody
+      while (!is.isFinished() && is.read() != -1) {}
+    } catch (IOException e) {
+      if (req.getHeader(HttpHeaders.EXPECT) != null && response.isCommitted()) {
+        log.debug("No input stream to consume from client");
+      } else {
+        log.info("Could not consume full client request", e);
+      }
+    }
+  }
+
+  public static class ClosedServletInputStream extends ServletInputStream {
+
+    public static final ClosedServletInputStream CLOSED_SERVLET_INPUT_STREAM = new ClosedServletInputStream();
+
+    @Override
+    public int read() {
+      return -1;
+    }
+
+    @Override
+    public boolean isFinished() {
+      return false;
+    }
+
+    @Override
+    public boolean isReady() {
+      return false;
+    }
+
+    @Override
+    public void setReadListener(ReadListener arg0) {}
+  }
+
+  public static class ClosedServletOutputStream extends ServletOutputStream {
+
+    public static final ClosedServletOutputStream CLOSED_SERVLET_OUTPUT_STREAM = new ClosedServletOutputStream();
+
+    @Override
+    public void write(final int b) throws IOException {
+      throw new IOException("write(" + b + ") failed: stream is closed");
+    }
+
+    @Override
+    public void flush() throws IOException {
+      throw new IOException("flush() failed: stream is closed");
+    }
+
+    @Override
+    public boolean isReady() {
+      return false;
+    }
+
+    @Override
+    public void setWriteListener(WriteListener arg0) {
+      throw new RuntimeException("setWriteListener() failed: stream is closed");
+    }
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java b/solr/core/src/java/org/apache/solr/servlet/SolrAuthenticationException.java
similarity index 55%
copy from solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
copy to solr/core/src/java/org/apache/solr/servlet/SolrAuthenticationException.java
index e61831a..d59f84b 100644
--- a/solr/core/src/java/org/apache/solr/servlet/ServletUtils.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrAuthenticationException.java
@@ -14,24 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.solr.servlet;
 
-import javax.servlet.http.HttpServletRequest;
-
-/**
- * Various Util methods for interaction on servlet level, i.e. HttpServletRequest
- */
-public abstract class ServletUtils {
-  private ServletUtils() { /* only static methods in this class */ }
-
-  /**
-   * Use this to get the full path after context path "/solr", which is a combination of
-   * servletPath and pathInfo.
-   * @param request the HttpServletRequest object
-   * @return String with path starting with a "/", or empty string if no path
-   */
-  public static String getPathAfterContext(HttpServletRequest request) {
-    return request.getServletPath() + (request.getPathInfo() != null ? request.getPathInfo() : "");
-  }
+public class SolrAuthenticationException extends Exception{
 }
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index 93a5897..632e0db 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -16,115 +16,89 @@
  */
 package org.apache.solr.servlet;
 
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import javax.naming.NoInitialContextException;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ReadListener;
-import javax.servlet.ServletException;
-import javax.servlet.ServletInputStream;
-import javax.servlet.ServletOutputStream;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.UnavailableException;
-import javax.servlet.WriteListener;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletRequestWrapper;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.http.HttpServletResponseWrapper;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Locale;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.codahale.metrics.jvm.ClassLoadingGaugeSet;
-import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
-import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
-import com.codahale.metrics.jvm.ThreadStatesGaugeSet;
 import com.google.common.annotations.VisibleForTesting;
 import io.opentracing.Span;
 import io.opentracing.Tracer;
-import io.opentracing.noop.NoopSpan;
-import io.opentracing.noop.NoopTracer;
-import io.opentracing.propagation.Format;
 import io.opentracing.tag.Tags;
 import io.opentracing.util.GlobalTracer;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.http.HttpHeaders;
 import org.apache.http.client.HttpClient;
-import org.apache.lucene.util.Version;
 import org.apache.solr.api.V2HttpCall;
-import org.apache.solr.cloud.ZkController;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.NodeConfig;
 import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoBean;
-import org.apache.solr.core.SolrXmlConfig;
 import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.logging.MDCSnapshot;
-import org.apache.solr.metrics.AltBufferPoolMetricSet;
-import org.apache.solr.metrics.MetricsMap;
-import org.apache.solr.metrics.OperatingSystemMetricSet;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.request.SolrRequestInfo;
 import org.apache.solr.security.AuditEvent;
 import org.apache.solr.security.AuthenticationPlugin;
 import org.apache.solr.security.PKIAuthenticationPlugin;
 import org.apache.solr.security.PublicKeyHandler;
-import org.apache.solr.util.StartupLoggingUtils;
+import org.apache.solr.servlet.CoreContainerProvider.ServiceHolder;
 import org.apache.solr.util.configuration.SSLConfigurationsFactory;
-import org.apache.solr.util.tracing.HttpServletCarrier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.UnavailableException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.regex.Pattern;
+
 import static org.apache.solr.security.AuditEvent.EventType;
+import static org.apache.solr.servlet.ServletUtils.closeShield;
+import static org.apache.solr.servlet.ServletUtils.configExcludes;
+import static org.apache.solr.servlet.ServletUtils.excludedPath;
 
 /**
  * This filter looks at the incoming URL maps them to handlers defined in solrconfig.xml
  *
  * @since solr 1.2
  */
-public class SolrDispatchFilter extends BaseSolrFilter {
+// todo: get rid of this class entirely! Request dispatch is the container's responsibility. Much of what we have here
+//  should be several separate but composable servlet Filters, wrapping multiple servlets that are more focused in
+//  scope. This should become possible now that we have a ServletContextListener for startup/shutdown of CoreContainer
+//  that sets up a service from which things like CoreContainer can be requested. (or better yet injected)
+public class SolrDispatchFilter extends BaseSolrFilter implements PathExcluder {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  public static final String ATTR_TRACING_SPAN = Span.class.getName();
+  public static final String ATTR_TRACING_TRACER = Tracer.class.getName();
+  public static final String ATTR_RATELIMIT_MANAGER = RateLimitManager.class.getName();
+
+  // TODO: see if we can get rid of the holder here (Servlet spec actually guarantees ContextListeners run
+  //  before filter init, but JettySolrRunner that we use for tests is complicated)
+  private ServiceHolder coreService;
 
-  protected volatile CoreContainer cores;
   protected final CountDownLatch init = new CountDownLatch(1);
 
   protected String abortErrorMessage = null;
-  //TODO using Http2Client
-  protected HttpClient httpClient;
-  private ArrayList<Pattern> excludePatterns;
-  
-  private boolean isV2Enabled = !"true".equals(System.getProperty("disable.v2.api", "false"));
 
-  private final String metricTag = SolrMetricProducer.getUniqueMetricTag(this, null);
-  private SolrMetricManager metricManager;
-  private String registryName;
-  private volatile boolean closeOnDestroy = true;
-  private Properties extraProperties;
+  @Override
+  public void setExcludePatterns(List<Pattern> excludePatterns) {
+    this.excludePatterns = excludePatterns;
+  }
+
+  private List<Pattern> excludePatterns;
 
-  private RateLimitManager rateLimitManager;
+  private final boolean isV2Enabled = !"true".equals(System.getProperty("disable.v2.api", "false"));
+
+  public HttpClient getHttpClient() {
+    try {
+      return coreService.getService().getHttpClient();
+    } catch (UnavailableException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Internal Http Client Unavailable, startup may have failed");
+    }
+  }
 
   /**
    * Enum to define action that needs to be processed.
@@ -155,288 +129,46 @@ public class SolrDispatchFilter extends BaseSolrFilter {
 
   @Override
   public void init(FilterConfig config) throws ServletException {
-    SSLConfigurationsFactory.current().init();
-    if (log.isTraceEnabled()) {
-      log.trace("SolrDispatchFilter.init(): {}", this.getClass().getClassLoader());
-    }
-    CoreContainer coresInit = null;
     try {
-      // "extra" properties must be init'ed first so we know things like "do we have a zkHost"
-      // wrap as defaults (if set) so we can modify w/o polluting the Properties provided by our caller
-      this.extraProperties = SolrXmlConfig.wrapAndSetZkHostFromSysPropIfNeeded
-        ((Properties) config.getServletContext().getAttribute(PROPERTIES_ATTRIBUTE));
-      
-      StartupLoggingUtils.checkLogDir();
-      if (log.isInfoEnabled()) {
-        log.info("Using logger factory {}", StartupLoggingUtils.getLoggerImplStr());
-      }
-      
-      logWelcomeBanner();
-      
-      String muteConsole = System.getProperty(SOLR_LOG_MUTECONSOLE);
-      if (muteConsole != null && !Arrays.asList("false","0","off","no").contains(muteConsole.toLowerCase(Locale.ROOT))) {
-        StartupLoggingUtils.muteConsole();
-      }
-      String logLevel = System.getProperty(SOLR_LOG_LEVEL);
-      if (logLevel != null) {
-        log.info("Log level override, property solr.log.level={}", logLevel);
-        StartupLoggingUtils.changeLogLevel(logLevel);
-      }
-      
-      String exclude = config.getInitParameter("excludePatterns");
-      if(exclude != null) {
-        String[] excludeArray = exclude.split(",");
-        excludePatterns = new ArrayList<>();
-        for (String element : excludeArray) {
-          excludePatterns.add(Pattern.compile(element));
-        }
+      coreService = CoreContainerProvider.serviceForContext(config.getServletContext());
+      SSLConfigurationsFactory.current().init();
+      if (log.isTraceEnabled()) {
+        log.trace("SolrDispatchFilter.init(): {}", this.getClass().getClassLoader());
       }
 
-      coresInit = createCoreContainer(computeSolrHome(config), extraProperties);
-      this.httpClient = coresInit.getUpdateShardHandler().getDefaultHttpClient();
-      setupJvmMetrics(coresInit);
-      
-      SolrZkClient zkClient = null;
-      ZkController zkController = coresInit.getZkController();
-      
-      if (zkController != null) {
-        zkClient = zkController.getZkClient();
-      }
-      
-      RateLimitManager.Builder builder = new RateLimitManager.Builder(zkClient);
-      
-      this.rateLimitManager = builder.build();
-      
-      if (zkController != null) {
-        zkController.zkStateReader.registerClusterPropertiesListener(this.rateLimitManager);
-      }
-      
-      if (log.isDebugEnabled()) {
-        log.debug("user.dir={}", System.getProperty("user.dir"));
-      }
-    } catch( Throwable t ) {
+      configExcludes(this, config.getInitParameter("excludePatterns"));
+    } catch (InterruptedException e) {
+      throw new ServletException("Interrupted while fetching core service");
+
+    } catch (Throwable t) {
       // catch this so our filter still works
-      log.error( "Could not start Solr. Check solr/home property and the logs");
-      SolrCore.log( t );
+      log.error("Could not start Dispatch Filter.");
+      SolrCore.log(t);
       if (t instanceof Error) {
         throw (Error) t;
       }
-      
-    } finally{
+    } finally {
       log.trace("SolrDispatchFilter.init() done");
-      this.cores = coresInit; // crucially final assignment 
       init.countDown();
     }
   }
 
-  private void setupJvmMetrics(CoreContainer coresInit)  {
-    metricManager = coresInit.getMetricManager();
-    registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm);
-    final Set<String> hiddenSysProps = coresInit.getConfig().getMetricsConfig().getHiddenSysProps();
-    try {
-      metricManager.registerAll(registryName, new AltBufferPoolMetricSet(), SolrMetricManager.ResolutionStrategy.IGNORE, "buffers");
-      metricManager.registerAll(registryName, new ClassLoadingGaugeSet(), SolrMetricManager.ResolutionStrategy.IGNORE, "classes");
-      metricManager.registerAll(registryName, new OperatingSystemMetricSet(), SolrMetricManager.ResolutionStrategy.IGNORE, "os");
-      metricManager.registerAll(registryName, new GarbageCollectorMetricSet(), SolrMetricManager.ResolutionStrategy.IGNORE, "gc");
-      metricManager.registerAll(registryName, new MemoryUsageGaugeSet(), SolrMetricManager.ResolutionStrategy.IGNORE, "memory");
-      metricManager.registerAll(registryName, new ThreadStatesGaugeSet(), SolrMetricManager.ResolutionStrategy.IGNORE, "threads"); // todo should we use CachedThreadStatesGaugeSet instead?
-      MetricsMap sysprops = new MetricsMap(map -> {
-        System.getProperties().forEach((k, v) -> {
-          if (!hiddenSysProps.contains(k)) {
-            map.putNoEx(String.valueOf(k), v);
-          }
-        });
-      });
-      metricManager.registerGauge(null, registryName, sysprops, metricTag, SolrMetricManager.ResolutionStrategy.IGNORE, "properties", "system");
-      MetricsMap sysenv = new MetricsMap(map -> {
-        System.getenv().forEach((k, v) -> {
-          if (!hiddenSysProps.contains(k)) {
-            map.putNoEx(String.valueOf(k), v);
-          }
-        });
-      });
-      metricManager.registerGauge(null, registryName, sysenv, metricTag, SolrMetricManager.ResolutionStrategy.IGNORE, "env", "system");
-    } catch (Exception e) {
-      log.warn("Error registering JVM metrics", e);
-    }
-  }
-
-  private void logWelcomeBanner() {
-    // _Really_ sorry about how clumsy this is as a result of the logging call checker, but this is the only one
-    // that's so ugly so far.
-    if (log.isInfoEnabled()) {
-      log.info(" ___      _       Welcome to Apache Solr™ version {}", solrVersion());
-    }
-    if (log.isInfoEnabled()) {
-      log.info("/ __| ___| |_ _   Starting in {} mode on port {}", isCloudMode() ? "cloud" : "standalone", getSolrPort());
-    }
-    if (log.isInfoEnabled()) {
-      log.info("\\__ \\/ _ \\ | '_|  Install dir: {}", System.getProperty(SOLR_INSTALL_DIR_ATTRIBUTE));
-    }
-    if (log.isInfoEnabled()) {
-      log.info("|___/\\___/_|_|    Start time: {}", Instant.now());
-    }
-  }
-
-  private String solrVersion() {
-    String specVer = Version.LATEST.toString();
-    try {
-      String implVer = SolrCore.class.getPackage().getImplementationVersion();
-      return (specVer.equals(implVer.split(" ")[0])) ? specVer : implVer;
-    } catch (Exception e) {
-      return specVer;
-    }
-  }
-
-  private String getSolrPort() {
-    return System.getProperty("jetty.port");
-  }
-
-  /** 
-   * We are in cloud mode if Java option zkRun exists OR zkHost exists and is non-empty 
-   * @see SolrXmlConfig#wrapAndSetZkHostFromSysPropIfNeeded
-   * @see #extraProperties
-   * @see #init
-   */
-  private boolean isCloudMode() {
-    assert null != extraProperties; // we should never be called w/o this being initialized
-    return (null != extraProperties.getProperty(SolrXmlConfig.ZK_HOST)) || (null != System.getProperty("zkRun"));
+  public CoreContainer getCores() throws UnavailableException {
+    return coreService.getService().getCoreContainer();
   }
 
-  /**
-   * Returns the effective Solr Home to use for this node, based on looking up the value in this order:
-   * <ol>
-   * <li>attribute in the FilterConfig</li>
-   * <li>JNDI: via java:comp/env/solr/home</li>
-   * <li>The system property solr.solr.home</li>
-   * <li>Look in the current working directory for a solr/ directory</li>
-   * </ol>
-   * <p>
-   *
-   * @return the Solr home, absolute and normalized.
-   * @see #SOLRHOME_ATTRIBUTE
-   */
-  private static Path computeSolrHome(FilterConfig config) {
-
-    // start with explicit check of servlet config...
-    String source = "servlet config: " + SOLRHOME_ATTRIBUTE;
-    String home = (String) config.getServletContext().getAttribute(SOLRHOME_ATTRIBUTE);
-
-    if (null == home) {
-      final String lookup = "java:comp/env/solr/home";
-      // Try JNDI
-      source = "JNDI: " + lookup;
-      try {
-        Context c = new InitialContext();
-        home = (String) c.lookup(lookup);
-      } catch (NoInitialContextException e) {
-        log.debug("JNDI not configured for solr (NoInitialContextEx)");
-      } catch (NamingException e) {
-        log.debug("No /solr/home in JNDI");
-      } catch (RuntimeException ex) {
-        log.warn("Odd RuntimeException while testing for JNDI: ", ex);
-      }
-    }
-
-    if (null == home) {
-      // Now try system property
-      final String prop = "solr.solr.home";
-      source = "system property: " + prop;
-      home = System.getProperty(prop);
-    }
-
-    if (null == home) {
-      // if all else fails, assume default dir
-      home = "solr/";
-      source = "defaulted to '" + home + "' ... could not find system property or JNDI";
-    }
-    final Path solrHome = Paths.get(home).toAbsolutePath().normalize();
-    log.info("Solr Home: {} (source: {})", solrHome, source);
-    
-    return solrHome;
-  }
-  
-  /**
-   * Override this to change CoreContainer initialization
-   * @return a CoreContainer to hold this server's cores
-   */
-  protected CoreContainer createCoreContainer(Path solrHome, Properties nodeProps) {
-    NodeConfig nodeConfig = loadNodeConfig(solrHome, nodeProps);
-    final CoreContainer coreContainer = new CoreContainer(nodeConfig, true);
-    coreContainer.load();
-    return coreContainer;
-  }
-
-  /**
-   * Get the NodeConfig whether stored on disk, in ZooKeeper, etc.
-   * This may also be used by custom filters to load relevant configuration.
-   * @return the NodeConfig
-   */
-  public static NodeConfig loadNodeConfig(Path solrHome, Properties nodeProperties) {
-    if (!StringUtils.isEmpty(System.getProperty("solr.solrxml.location"))) {
-      log.warn("Solr property solr.solrxml.location is no longer supported. Will automatically load solr.xml from ZooKeeper if it exists");
-    }
-    nodeProperties = SolrXmlConfig.wrapAndSetZkHostFromSysPropIfNeeded(nodeProperties);
-    String zkHost = nodeProperties.getProperty(SolrXmlConfig.ZK_HOST);
-    if (!StringUtils.isEmpty(zkHost)) {
-      int startUpZkTimeOut = Integer.getInteger("waitForZk", 30);
-      startUpZkTimeOut *= 1000;
-      try (SolrZkClient zkClient = new SolrZkClient(zkHost, startUpZkTimeOut, startUpZkTimeOut)) {
-        if (zkClient.exists("/solr.xml", true)) {
-          log.info("solr.xml found in ZooKeeper. Loading...");
-          byte[] data = zkClient.getData("/solr.xml", null, null, true);
-          return SolrXmlConfig.fromInputStream(solrHome, new ByteArrayInputStream(data), nodeProperties, true);
-        }
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Error occurred while loading solr.xml from zookeeper", e);
-      }
-      log.info("Loading solr.xml from SolrHome (not found in ZooKeeper)");
-    }
-
-    return SolrXmlConfig.fromSolrHome(solrHome, nodeProperties);
-  }
-  
-  public CoreContainer getCores() {
-    return cores;
-  }
-  
   @Override
   public void destroy() {
-    if (closeOnDestroy) {
-      close();
-    }
-  }
-  
-  public void close() {
-    CoreContainer cc = cores;
-    cores = null;
-    try {
-      if (metricManager != null) {
-        try {
-          metricManager.unregisterGauges(registryName, metricTag);
-        } catch (NullPointerException e) {
-          // okay
-        } catch (Exception e) {
-          log.warn("Exception closing FileCleaningTracker", e);
-        } finally {
-          metricManager = null;
-        }
-      }
-    } finally {
-      if (cc != null) {
-        httpClient = null;
-        cc.shutdown();
-      }
-    }
+    // CoreService shuts itself down as a ContextListener. The filter does not own anything with a lifecycle anymore! Yay!
   }
-  
+
   @Override
   public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
     try (var mdcSnapshot = MDCSnapshot.create()) {
       assert null != mdcSnapshot; // prevent compiler warning
       MDCLoggingContext.reset();
-      MDCLoggingContext.setNode(cores);
-      
+      MDCLoggingContext.setNode(getCores());
+
       doFilter(request, response, chain, false);
     }
   }
@@ -446,141 +178,70 @@ public class SolrDispatchFilter extends BaseSolrFilter {
     HttpServletRequest request = closeShield((HttpServletRequest)_request, retry);
     HttpServletResponse response = closeShield((HttpServletResponse)_response, retry);
 
-    String requestPath = ServletUtils.getPathAfterContext(request);
-    // No need to even create the HttpSolrCall object if this path is excluded.
-    if (excludePatterns != null) {
-      for (Pattern p : excludePatterns) {
-        Matcher matcher = p.matcher(requestPath);
-        if (matcher.lookingAt()) {
-          chain.doFilter(request, response);
-          return;
-        }
-      }
+    if (excludedPath(excludePatterns, request, response, chain)) {
+      return;
     }
-
-    Tracer tracer = cores == null ? GlobalTracer.get() : cores.getTracer();
-    Span span = buildSpan(tracer, request);
-    request.setAttribute(Tracer.class.getName(), tracer);
-    request.setAttribute(Span.class.getName(), span);
-    boolean accepted = false;
-    try (var scope = tracer.scopeManager().activate(span)) {
-      assert scope != null; // prevent javac warning about scope being unused
-      MDCLoggingContext.setTracerId(span.context().toTraceId()); // handles empty string
-
-      if (cores == null || cores.isShutDown()) {
-        try {
-          init.await();
-        } catch (InterruptedException e) { //well, no wait then
-        }
-        final String msg = "Error processing the request. CoreContainer is either not initialized or shutting down.";
-        if (cores == null || cores.isShutDown()) {
-          log.error(msg);
-          throw new UnavailableException(msg);
-        }
-      }
-
+    Tracer t = getCores() == null ? GlobalTracer.get() : getCores().getTracer();
+    request.setAttribute(Tracer.class.getName(), t);
+    RateLimitManager rateLimitManager = coreService.getService().getRateLimitManager();
+    request.setAttribute(RateLimitManager.class.getName(), rateLimitManager);
+    ServletUtils.rateLimitRequest(request, response, () -> {
       try {
-        accepted = rateLimitManager.handleRequest(request);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage());
-      }
-
-      if (!accepted) {
-        String errorMessage = "Too many requests for this request type." +
-            "Please try after some time or increase the quota for this request type";
-
-        response.sendError(429, errorMessage);
-      }
-
-      AtomicReference<HttpServletRequest> wrappedRequest = new AtomicReference<>();
-      if (!authenticateRequest(request, response, wrappedRequest)) { // the response and status code have already been sent
-        return;
-      }
-
-      if (wrappedRequest.get() != null) {
-        request = wrappedRequest.get();
+        dispatch(chain, request, response, retry);
+      } catch (IOException | ServletException | SolrAuthenticationException e) {
+        throw new ExceptionWhileTracing( e);
       }
+    }, true);
+  }
 
-      if (cores.getAuthenticationPlugin() != null) {
-        if (log.isDebugEnabled()) {
-          log.debug("User principal: {}", request.getUserPrincipal());
-        }
-        span.setTag(Tags.DB_USER, String.valueOf(request.getUserPrincipal()));
-      }
+  private static Span getSpan(HttpServletRequest req) {
+    return (Span) req.getAttribute(ATTR_TRACING_SPAN);
+  }
 
-      HttpSolrCall call = getHttpSolrCall(request, response, retry);
-      ExecutorUtil.setServerThreadFlag(Boolean.TRUE);
-      try {
-        Action result = call.call();
-        switch (result) {
-          case PASSTHROUGH:
-            span.log("SolrDispatchFilter PASSTHROUGH");
-            chain.doFilter(request, response);
-            break;
-          case RETRY:
-            span.log("SolrDispatchFilter RETRY");
-            doFilter(request, response, chain, true); // RECURSION
-            break;
-          case FORWARD:
-            span.log("SolrDispatchFilter FORWARD");
-            request.getRequestDispatcher(call.getPath()).forward(request, response);
-            break;
-          case ADMIN:
-          case PROCESS:
-          case REMOTEQUERY:
-          case RETURN:
-            break;
-        }
-      } finally {
-        call.destroy();
-        ExecutorUtil.setServerThreadFlag(null);
-      }
-    } finally {
-      consumeInputFully(request, response);
-      SolrRequestInfo.reset();
-      SolrRequestParsers.cleanupMultipartFiles(request);
+  private void dispatch(FilterChain chain, HttpServletRequest request, HttpServletResponse response, boolean retry) throws IOException, ServletException, SolrAuthenticationException {
 
-      if (accepted) {
-        rateLimitManager.decrementActiveRequests(request);
-      }
-      span.setTag(Tags.HTTP_STATUS, response.getStatus());
-      span.finish();
+    AtomicReference<HttpServletRequest> wrappedRequest = new AtomicReference<>();
+    authenticateRequest(request, response, wrappedRequest);
+    if (wrappedRequest.get() != null) {
+      request = wrappedRequest.get();
     }
-  }
 
-  protected Span buildSpan(Tracer tracer, HttpServletRequest request) {
-    if (tracer instanceof NoopTracer) {
-      return NoopSpan.INSTANCE;
-    }
-    Tracer.SpanBuilder spanBuilder = tracer.buildSpan("http.request") // will be changed later
-        .asChildOf(tracer.extract(Format.Builtin.HTTP_HEADERS, new HttpServletCarrier(request)))
-        .withTag(Tags.SPAN_KIND, Tags.SPAN_KIND_SERVER)
-        .withTag(Tags.HTTP_METHOD, request.getMethod())
-        .withTag(Tags.HTTP_URL, request.getRequestURL().toString());
-    if (request.getQueryString() != null) {
-      spanBuilder.withTag("http.params", request.getQueryString());
+    if (getCores().getAuthenticationPlugin() != null) {
+      if (log.isDebugEnabled()) {
+        log.debug("User principal: {}", request.getUserPrincipal());
+      }
+      getSpan(request).setTag(Tags.DB_USER, String.valueOf(request.getUserPrincipal()));
     }
-    spanBuilder.withTag(Tags.DB_TYPE, "solr");
-    return spanBuilder.start();
-  }
 
-  // we make sure we read the full client request so that the client does
-  // not hit a connection reset and we can reuse the 
-  // connection - see SOLR-8453 and SOLR-8683
-  private void consumeInputFully(HttpServletRequest req, HttpServletResponse response) {
+    HttpSolrCall call = getHttpSolrCall(request, response, retry);
+    ExecutorUtil.setServerThreadFlag(Boolean.TRUE);
     try {
-      ServletInputStream is = req.getInputStream();
-      while (!is.isFinished() && is.read() != -1) {}
-    } catch (IOException e) {
-      if (req.getHeader(HttpHeaders.EXPECT) != null && response.isCommitted()) {
-        log.debug("No input stream to consume from client");
-      } else {
-        log.info("Could not consume full client request", e);
+      Action result = call.call();
+      switch (result) {
+        case PASSTHROUGH:
+          getSpan(request).log("SolrDispatchFilter PASSTHROUGH");
+          chain.doFilter(request, response);
+          break;
+        case RETRY:
+          getSpan(request).log("SolrDispatchFilter RETRY");
+          doFilter(request, response, chain, true); // RECURSION
+          break;
+        case FORWARD:
+          getSpan(request).log("SolrDispatchFilter FORWARD");
+          request.getRequestDispatcher(call.getPath()).forward(request, response);
+          break;
+        case ADMIN:
+        case PROCESS:
+        case REMOTEQUERY:
+        case RETURN:
+          break;
       }
+    } finally {
+      call.destroy();
+      ExecutorUtil.setServerThreadFlag(null);
     }
   }
-  
+
   /**
    * Allow a subclass to modify the HttpSolrCall.  In particular, subclasses may
    * want to add attributes to the request and send errors differently
@@ -588,6 +249,12 @@ public class SolrDispatchFilter extends BaseSolrFilter {
   protected HttpSolrCall getHttpSolrCall(HttpServletRequest request, HttpServletResponse response, boolean retry) {
     String path = ServletUtils.getPathAfterContext(request);
 
+    CoreContainer cores;
+    try {
+      cores = getCores();
+    } catch (UnavailableException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Core Container Unavailable");
+    }
     if (isV2Enabled && (path.startsWith("/____v2/") || path.equals("/____v2"))) {
       return new V2HttpCall(this, cores, request, response, false);
     } else {
@@ -595,26 +262,33 @@ public class SolrDispatchFilter extends BaseSolrFilter {
     }
   }
 
-  private boolean authenticateRequest(HttpServletRequest request, HttpServletResponse response, final AtomicReference<HttpServletRequest> wrappedRequest) throws IOException {
-    boolean requestContinues = false;
+  // TODO: make this a servlet filter
+  private void authenticateRequest(HttpServletRequest request, HttpServletResponse response, final AtomicReference<HttpServletRequest> wrappedRequest) throws IOException,SolrAuthenticationException {
+    boolean requestContinues;
     final AtomicBoolean isAuthenticated = new AtomicBoolean(false);
+    CoreContainer cores;
+    try {
+      cores = getCores();
+    } catch (UnavailableException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Core Container Unavailable");
+    }
     AuthenticationPlugin authenticationPlugin = cores.getAuthenticationPlugin();
     if (authenticationPlugin == null) {
       if (shouldAudit(EventType.ANONYMOUS)) {
         cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.ANONYMOUS, request));
       }
-      return true;
+      return;
     } else {
       // /admin/info/key must be always open. see SOLR-9188
       String requestPath = ServletUtils.getPathAfterContext(request);
       if (PublicKeyHandler.PATH.equals(requestPath)) {
         log.debug("Pass through PKI authentication endpoint");
-        return true;
+        return;
       }
       // /solr/ (Admin UI) must be always open to allow displaying Admin UI with login page  
       if ("/solr/".equals(requestPath) || "/".equals(requestPath)) {
         log.debug("Pass through Admin UI entry point");
-        return true;
+        return;
       }
       String header = request.getHeader(PKIAuthenticationPlugin.HEADER);
       if (header != null && cores.getPkiAuthenticationPlugin() != null)
@@ -623,7 +297,17 @@ public class SolrDispatchFilter extends BaseSolrFilter {
         if (log.isDebugEnabled()) {
           log.debug("Request to authenticate: {}, domain: {}, port: {}", request, request.getLocalName(), request.getLocalPort());
         }
-        // upon successful authentication, this should call the chain's next filter.
+        // For legacy reasons, upon successful authentication this wants to call the chain's next filter, which
+        // obfuscates the layout of the code since one usually expects to be able to find the call to doFilter()
+        // in the implementation of javax.servlet.Filter. Supplying a trivial impl here to keep existing code happy
+        // while making the flow clearer. Chain will be called after this method completes. Eventually auth all
+        // moves to its own filter (hopefully). Most auth plugins simply return true after calling this anyway,
+        // so they obviously don't care. Kerberos plugins seem to mostly use it to satisfy the api of a wrapped
+        // instance of javax.servlet.Filter and neither of those seem to be doing anything fancy with the filter chain,
+        // so this would seem to be a hack brought on by the fact that our auth code has been  forced to be code
+        // within dispatch filter, rather than being a filter itself. The HadoopAuthPlugin has a suspicious amount
+        // of code after the call to doFilter() which seems to imply that anything in this chain can get executed before
+        // authentication completes, and I can't figure out how that's a good idea in the first place.
         requestContinues = authenticationPlugin.authenticate(request, response, (req, rsp) -> {
           isAuthenticated.set(true);
           wrappedRequest.set((HttpServletRequest) req);
@@ -644,154 +328,31 @@ public class SolrDispatchFilter extends BaseSolrFilter {
       if (shouldAudit(EventType.REJECTED)) {
         cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.REJECTED, request));
       }
-      return false;
+      throw new SolrAuthenticationException();
     }
     if (shouldAudit(EventType.AUTHENTICATED)) {
       cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.AUTHENTICATED, request));
     }
-    return true;
-  }
-  
-  public static class ClosedServletInputStream extends ServletInputStream {
-    
-    public static final ClosedServletInputStream CLOSED_SERVLET_INPUT_STREAM = new ClosedServletInputStream();
-
-    @Override
-    public int read() {
-      return -1;
-    }
-
-    @Override
-    public boolean isFinished() {
-      return false;
-    }
-
-    @Override
-    public boolean isReady() {
-      return false;
-    }
-
-    @Override
-    public void setReadListener(ReadListener arg0) {}
+    // Auth Success
   }
-  
-  public static class ClosedServletOutputStream extends ServletOutputStream {
-    
-    public static final ClosedServletOutputStream CLOSED_SERVLET_OUTPUT_STREAM = new ClosedServletOutputStream();
-    
-    @Override
-    public void write(final int b) throws IOException {
-      throw new IOException("write(" + b + ") failed: stream is closed");
-    }
-    
-    @Override
-    public void flush() throws IOException {
-      throw new IOException("flush() failed: stream is closed");
-    }
 
-    @Override
-    public boolean isReady() {
-      return false;
-    }
-
-    @Override
-    public void setWriteListener(WriteListener arg0) {
-      throw new RuntimeException("setWriteListener() failed: stream is closed");
-    }
-  }
-
-  private static String CLOSE_STREAM_MSG = "Attempted close of http request or response stream - in general you should not do this, "
-      + "you may spoil connection reuse and possibly disrupt a client. If you must close without actually needing to close, "
-      + "use a CloseShield*Stream. Closing or flushing the response stream commits the response and prevents us from modifying it. "
-      + "Closing the request stream prevents us from gauranteeing ourselves that streams are fully read for proper connection reuse."
-      + "Let the container manage the lifecycle of these streams when possible.";
- 
 
   /**
    * Check if audit logging is enabled and should happen for given event type
    * @param eventType the audit event
    */
   private boolean shouldAudit(AuditEvent.EventType eventType) {
-    return cores.getAuditLoggerPlugin() != null && cores.getAuditLoggerPlugin().shouldLog(eventType);
-  }
-  
-  /**
-   * Wrap the request's input stream with a close shield. If this is a
-   * retry, we will assume that the stream has already been wrapped and do nothing.
-   *
-   * Only the container should ever actually close the servlet output stream.
-   *
-   * @param request The request to wrap.
-   * @param retry If this is an original request or a retry.
-   * @return A request object with an {@link InputStream} that will ignore calls to close.
-   */
-  public static HttpServletRequest closeShield(HttpServletRequest request, boolean retry) {
-    if (!retry) {
-      return new HttpServletRequestWrapper(request) {
-
-        @Override
-        public ServletInputStream getInputStream() throws IOException {
-
-          return new ServletInputStreamWrapper(super.getInputStream()) {
-            @Override
-            public void close() {
-              // even though we skip closes, we let local tests know not to close so that a full understanding can take
-              // place
-              assert Thread.currentThread().getStackTrace()[2].getClassName().matches(
-                  "org\\.apache\\.(?:solr|lucene).*") ? false : true : CLOSE_STREAM_MSG;
-              this.stream = ClosedServletInputStream.CLOSED_SERVLET_INPUT_STREAM;
-            }
-          };
-
-        }
-      };
-    } else {
-      return request;
-    }
-  }
-  
-  /**
-   * Wrap the response's output stream with a close shield. If this is a
-   * retry, we will assume that the stream has already been wrapped and do nothing.
-   *
-   * Only the container should ever actually close the servlet request stream.
-   *
-   * @param response The response to wrap.
-   * @param retry If this response corresponds to an original request or a retry.
-   * @return A response object with an {@link OutputStream} that will ignore calls to close.
-   */
-  public static HttpServletResponse closeShield(HttpServletResponse response, boolean retry) {
-    if (!retry) {
-      return new HttpServletResponseWrapper(response) {
-
-        @Override
-        public ServletOutputStream getOutputStream() throws IOException {
-
-          return new ServletOutputStreamWrapper(super.getOutputStream()) {
-            @Override
-            public void close() {
-              // even though we skip closes, we let local tests know not to close so that a full understanding can take
-              // place
-              assert Thread.currentThread().getStackTrace()[2].getClassName().matches(
-                  "org\\.apache\\.(?:solr|lucene).*") ? false
-                      : true : CLOSE_STREAM_MSG;
-              stream = ClosedServletOutputStream.CLOSED_SERVLET_OUTPUT_STREAM;
-            }
-          };
-        }
-
-      };
-    } else {
-      return response;
+    CoreContainer cores;
+    try {
+      cores = getCores();
+    } catch (UnavailableException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Core Container Unavailable");
     }
-  }
-
-  public void closeOnDestroy(boolean closeOnDestroy) {
-    this.closeOnDestroy = closeOnDestroy;
+    return cores.getAuditLoggerPlugin() != null && cores.getAuditLoggerPlugin().shouldLog(eventType);
   }
 
   @VisibleForTesting
   void replaceRateLimitManager(RateLimitManager rateLimitManager) {
-    this.rateLimitManager = rateLimitManager;
+    coreService.getService().setRateLimitManager(rateLimitManager);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
index 8e1358c..55fcab8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
@@ -29,7 +29,6 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.NodeConfig;
-import org.apache.solr.servlet.SolrDispatchFilter;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.RuleChain;
@@ -90,7 +89,7 @@ public class SolrXmlInZkTest extends SolrTestCaseJ4 {
     props.setProperty("solr.test.sys.prop1", "propone");
     props.setProperty("solr.test.sys.prop2", "proptwo");
 
-    cfg = SolrDispatchFilter.loadNodeConfig(solrHome, props);
+    cfg = NodeConfig.loadNodeConfig(solrHome, props);
     if (log.isInfoEnabled()) {
       log.info("####SETUP_END {}", getTestName());
     }
diff --git a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java
index 11aa246..81889a8 100644
--- a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java
+++ b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java
@@ -40,7 +40,6 @@ import org.apache.solr.handler.admin.MetricsHandler;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.servlet.SolrDispatchFilter;
 import org.apache.solr.update.AddUpdateCommand;
 import org.apache.solr.update.CommitUpdateCommand;
 import org.apache.solr.update.UpdateHandler;
@@ -94,7 +93,7 @@ public class TestLazyCores extends SolrTestCaseJ4 {
       copyMinConf(new File(solrHomeDirectory, "collection" + idx));
     }
 
-    NodeConfig cfg = SolrDispatchFilter.loadNodeConfig(solrHomeDirectory.toPath(), null);
+    NodeConfig cfg = NodeConfig.loadNodeConfig(solrHomeDirectory.toPath(), null);
     return createCoreContainer(cfg, testCores);
   }
   
diff --git a/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallGetCoreTest.java b/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallGetCoreTest.java
index 34eb344..7c06028 100644
--- a/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallGetCoreTest.java
+++ b/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallGetCoreTest.java
@@ -20,6 +20,7 @@ package org.apache.solr.servlet;
 import javax.servlet.ReadListener;
 import javax.servlet.ServletInputStream;
 import javax.servlet.ServletOutputStream;
+import javax.servlet.UnavailableException;
 import javax.servlet.WriteListener;
 import java.io.IOException;
 import java.util.HashSet;
@@ -60,7 +61,7 @@ public class HttpSolrCallGetCoreTest extends SolrCloudTestCase {
     assertCoreChosen(NUM_SHARD * REPLICA_FACTOR, new TestRequest("/collection1/select"));
   }
 
-  private void assertCoreChosen(int numCores, TestRequest testRequest) {
+  private void assertCoreChosen(int numCores, TestRequest testRequest) throws UnavailableException {
     JettySolrRunner jettySolrRunner = cluster.getJettySolrRunner(0);
     Set<String> coreNames = new HashSet<>();
     SolrDispatchFilter dispatchFilter = jettySolrRunner.getSolrDispatchFilter();
diff --git a/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java
index ad1d38e..2f825e3 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java
@@ -77,7 +77,7 @@ abstract public class BaseTestHarness {
 
     if (tests==null || tests.length == 0) return null;
 
-    Document document = null;
+    Document document;
     try {
       document = getXmlDocumentBuilder().parse(new ByteArrayInputStream
           (xml.getBytes(StandardCharsets.UTF_8)));
@@ -102,7 +102,7 @@ abstract public class BaseTestHarness {
     throws XPathExpressionException, SAXException {
     if (null == xpath) return null;
 
-    Document document = null;
+    Document document;
     try {
       document = getXmlDocumentBuilder().parse(new ByteArrayInputStream
           (xml.getBytes(StandardCharsets.UTF_8)));
diff --git a/solr/webapp/web/WEB-INF/web.xml b/solr/webapp/web/WEB-INF/web.xml
index 2599420..33cad1f 100644
--- a/solr/webapp/web/WEB-INF/web.xml
+++ b/solr/webapp/web/WEB-INF/web.xml
@@ -21,7 +21,9 @@
          version="2.5"
          metadata-complete="true"
 >
-
+  <listener>
+   <listener-class>org.apache.solr.servlet.CoreContainerProvider</listener-class>
+  </listener>
   <!-- Any path (name) registered in solrconfig.xml will be sent to that filter -->
   <filter>
     <filter-name>SolrRequestFilter</filter-name>