You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by ge...@apache.org on 2023/03/24 18:49:59 UTC

[solr] branch main updated: SOLR-16697: Add API to install indices into specific shards (#1458)

This is an automated email from the ASF dual-hosted git repository.

gerlowskija pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new 81fe0045aaa SOLR-16697: Add API to install indices into specific shards (#1458)
81fe0045aaa is described below

commit 81fe0045aaa63b115808a8c3d87ce96dc7921e8b
Author: Jason Gerlowski <ge...@apache.org>
AuthorDate: Fri Mar 24 14:49:50 2023 -0400

    SOLR-16697: Add API to install indices into specific shards (#1458)
    
    API is available at POST /collections/collName/shards/shardName/install
    
    All of the "meat" of this API already existed in some of the utilities used
    by the RESTORE API.  This commit largely just adds API bindings to
    expose and interface suitable for the single-shard installation use-case.
    
    Callers are expected to put the involved collection into "read only" mode
    prior to installing data to shards, and must ensure that the data being
    installed is compatible with the collection's schema and config.
---
 solr/CHANGES.txt                                   |   4 +-
 .../solr/cloud/api/collections/CollApiCmds.java    |   2 +
 .../cloud/api/collections/InstallShardDataCmd.java | 127 ++++++++
 .../solr/handler/admin/CollectionsHandler.java     |  22 ++
 .../solr/handler/admin/CoreAdminHandler.java       |   3 +-
 .../solr/handler/admin/CoreAdminOperation.java     |   2 +
 .../solr/handler/admin/InstallCoreDataOp.java      |  53 +++
 .../solr/handler/admin/api/InstallCoreDataAPI.java | 135 ++++++++
 .../handler/admin/api/InstallShardDataAPI.java     | 157 +++++++++
 .../api/collections/LocalFSInstallShardTest.java   |  54 ++++
 .../org/apache/solr/gcs/GCSInstallShardTest.java   |  65 ++++
 .../org/apache/solr/s3/S3InstallShardTest.java     |  81 +++++
 .../deployment-guide/pages/shard-management.adoc   | 126 ++++++++
 .../pages/major-changes-in-solr-9.adoc             |   4 +
 .../solrj/request/CollectionAdminRequest.java      |  59 +++-
 .../solr/common/params/CollectionParams.java       |   1 +
 .../apache/solr/common/params/CoreAdminParams.java |   1 +
 .../api/collections/AbstractInstallShardTest.java  | 356 +++++++++++++++++++++
 18 files changed, 1246 insertions(+), 6 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9ae2a93a339..5f4d5db8ea3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -42,7 +42,9 @@ Other Changes
 
 New Features
 ---------------------
-(No changes)
+* Solr now provides an "Install Shard"  API that allows users who have built (per-shard) indices offline to import
+  them into SolrCloud shards.  The API is available at `POST /api/collections/collName/shards/shardName/install`
+  (v2), or at `GET /solr/admin/collections?action=INSTALLSHARD` (v1). (Jason Gerlowski)
 
 Improvements
 ---------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CollApiCmds.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CollApiCmds.java
index 8989b29907e..3bc0226c2aa 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CollApiCmds.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CollApiCmds.java
@@ -47,6 +47,7 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.DE
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESNAPSHOT;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.INSTALLSHARDDATA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MAINTAINROUTEDALIAS;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MIGRATE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOCK_COLL_TASK;
@@ -146,6 +147,7 @@ public class CollApiCmds {
               .put(DELETENODE, new DeleteNodeCmd(ccc))
               .put(BACKUP, new BackupCmd(ccc))
               .put(RESTORE, new RestoreCmd(ccc))
+              .put(INSTALLSHARDDATA, new InstallShardDataCmd(ccc))
               .put(DELETEBACKUP, new DeleteBackupCmd(ccc))
               .put(CREATESNAPSHOT, new CreateSnapshotCmd(ccc))
               .put(DELETESNAPSHOT, new DeleteSnapshotCmd(ccc))
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/InstallShardDataCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/InstallShardDataCmd.java
new file mode 100644
index 00000000000..f3c9cb1dec2
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/InstallShardDataCmd.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.lang.invoke.MethodHandles;
+import java.util.HashMap;
+import java.util.Locale;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.jersey.JacksonReflectMapWriter;
+import org.apache.zookeeper.common.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Overseer processing for the "install shard data" API.
+ *
+ * <p>Largely this overseer processing consists of ensuring that read-only mode is enabled for the
+ * specified collection, identifying the core hosting the shard leader, and sending it a core- admin
+ * 'install' request.
+ */
+public class InstallShardDataCmd implements CollApiCmds.CollectionApiCommand {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final CollectionCommandContext ccc;
+
+  public InstallShardDataCmd(CollectionCommandContext ccc) {
+    this.ccc = ccc;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList<Object> results)
+      throws Exception {
+    final RemoteMessage typedMessage =
+        new ObjectMapper().convertValue(message.getProperties(), RemoteMessage.class);
+    final CollectionHandlingUtils.ShardRequestTracker shardRequestTracker =
+        CollectionHandlingUtils.asyncRequestTracker(typedMessage.asyncId, ccc);
+    final ClusterState clusterState = ccc.getZkStateReader().getClusterState();
+    typedMessage.validate();
+
+    // Fetch the specified Slice
+    final DocCollection installCollection = clusterState.getCollection(typedMessage.collection);
+    final Slice installSlice = installCollection.getSlice(typedMessage.shard);
+    if (installSlice == null) {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "The specified shard [" + typedMessage.shard + "] does not exist.");
+    }
+
+    // Build the core-admin request
+    final ModifiableSolrParams coreApiParams = new ModifiableSolrParams();
+    coreApiParams.set(
+        CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.INSTALLCOREDATA.toString());
+    typedMessage.toMap(new HashMap<>()).forEach((k, v) -> coreApiParams.set(k, v.toString()));
+
+    // Send the core-admin request to each replica in the slice
+    final ShardHandler shardHandler = ccc.newShardHandler();
+    shardRequestTracker.sliceCmd(clusterState, coreApiParams, null, installSlice, shardHandler);
+    final String errorMessage =
+        String.format(
+            Locale.ROOT,
+            "Could not install data to collection [%s] and shard [%s]",
+            typedMessage.collection,
+            typedMessage.shard);
+    shardRequestTracker.processResponses(new NamedList<>(), shardHandler, true, errorMessage);
+  }
+
+  /** A value-type representing the message received by {@link InstallShardDataCmd} */
+  public static class RemoteMessage implements JacksonReflectMapWriter {
+
+    @JsonProperty(QUEUE_OPERATION)
+    public String operation = CollectionParams.CollectionAction.INSTALLSHARDDATA.toLower();
+
+    @JsonProperty public String collection;
+
+    @JsonProperty public String shard;
+
+    @JsonProperty public String repository;
+
+    @JsonProperty public String location;
+
+    @JsonProperty(ASYNC)
+    public String asyncId;
+
+    public void validate() {
+      if (StringUtils.isBlank(collection)) {
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "The 'Install Shard Data' API requires a valid collection name to be provided");
+      }
+      if (StringUtils.isBlank(shard)) {
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "The 'Install Shard Data' API requires a valid shard name to be provided");
+      }
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index b18c74e7026..fd7d1f2b1c7 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -53,6 +53,7 @@ import static org.apache.solr.common.params.CollectionAdminParams.PER_REPLICA_ST
 import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_NAME;
 import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_PREFIX;
 import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_VALUE;
+import static org.apache.solr.common.params.CollectionAdminParams.SHARD;
 import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
@@ -79,6 +80,7 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.DE
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESTATUS;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.DISTRIBUTEDAPIPROCESSING;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.FORCELEADER;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.INSTALLSHARDDATA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.LIST;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.LISTALIASES;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.LISTBACKUP;
@@ -218,6 +220,7 @@ import org.apache.solr.handler.admin.api.DeleteReplicaAPI;
 import org.apache.solr.handler.admin.api.DeleteReplicaPropertyAPI;
 import org.apache.solr.handler.admin.api.DeleteShardAPI;
 import org.apache.solr.handler.admin.api.ForceLeaderAPI;
+import org.apache.solr.handler.admin.api.InstallShardDataAPI;
 import org.apache.solr.handler.admin.api.ListAliasesAPI;
 import org.apache.solr.handler.admin.api.ListCollectionsAPI;
 import org.apache.solr.handler.admin.api.MigrateDocsAPI;
@@ -1548,6 +1551,24 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
           copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
           return params;
         }),
+    INSTALLSHARDDATA_OP(
+        INSTALLSHARDDATA,
+        (req, rsp, h) -> {
+          req.getParams().required().check(COLLECTION, SHARD);
+          final String collectionName = req.getParams().get(COLLECTION);
+          final String shardName = req.getParams().get(SHARD);
+          final InstallShardDataAPI.InstallShardRequestBody reqBody =
+              new InstallShardDataAPI.InstallShardRequestBody();
+          reqBody.asyncId = req.getParams().get(ASYNC);
+          reqBody.repository = req.getParams().get(BACKUP_REPOSITORY);
+          reqBody.location = req.getParams().get(BACKUP_LOCATION);
+
+          final InstallShardDataAPI installApi = new InstallShardDataAPI(h.coreContainer, req, rsp);
+          final SolrJerseyResponse installResponse =
+              installApi.installShardData(collectionName, shardName, reqBody);
+          V2ApiUtils.squashIntoSolrResponseWithoutHeader(rsp, installResponse);
+          return null;
+        }),
     DELETEBACKUP_OP(
         DELETEBACKUP,
         (req, rsp, h) -> {
@@ -2092,6 +2113,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
         DeleteAliasAPI.class,
         DeleteCollectionAPI.class,
         DeleteReplicaPropertyAPI.class,
+        InstallShardDataAPI.class,
         ListCollectionsAPI.class,
         ReplaceNodeAPI.class,
         CollectionPropertyAPI.class,
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
index 3f50824b3f2..c9420dfe1d1 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
@@ -57,6 +57,7 @@ import org.apache.solr.handler.RequestHandlerBase;
 import org.apache.solr.handler.admin.api.AllCoresStatusAPI;
 import org.apache.solr.handler.admin.api.CoreSnapshotAPI;
 import org.apache.solr.handler.admin.api.CreateCoreAPI;
+import org.apache.solr.handler.admin.api.InstallCoreDataAPI;
 import org.apache.solr.handler.admin.api.MergeIndexesAPI;
 import org.apache.solr.handler.admin.api.OverseerOperationAPI;
 import org.apache.solr.handler.admin.api.PrepareCoreRecoveryAPI;
@@ -380,7 +381,7 @@ public class CoreAdminHandler extends RequestHandlerBase implements PermissionNa
 
   @Override
   public Collection<Class<? extends JerseyResource>> getJerseyResources() {
-    return List.of(CoreSnapshotAPI.class);
+    return List.of(CoreSnapshotAPI.class, InstallCoreDataAPI.class);
   }
 
   static {
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
index 6d16bdd5fbd..dc557a5fa28 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
@@ -22,6 +22,7 @@ import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.BACK
 import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.CREATE;
 import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.CREATESNAPSHOT;
 import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.DELETESNAPSHOT;
+import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.INSTALLCOREDATA;
 import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.LISTSNAPSHOTS;
 import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.MERGEINDEXES;
 import static org.apache.solr.common.params.CoreAdminParams.CoreAdminAction.OVERSEEROP;
@@ -273,6 +274,7 @@ public enum CoreAdminOperation implements CoreAdminOp {
       }),
   BACKUPCORE_OP(BACKUPCORE, new BackupCoreOp()),
   RESTORECORE_OP(RESTORECORE, new RestoreCoreOp()),
+  INSTALLCOREDATA_OP(INSTALLCOREDATA, new InstallCoreDataOp()),
   CREATESNAPSHOT_OP(CREATESNAPSHOT, new CreateSnapshotOp()),
   DELETESNAPSHOT_OP(DELETESNAPSHOT, new DeleteSnapshotOp()),
   @SuppressWarnings({"unchecked"})
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/InstallCoreDataOp.java b/solr/core/src/java/org/apache/solr/handler/admin/InstallCoreDataOp.java
new file mode 100644
index 00000000000..c115739f6a8
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/admin/InstallCoreDataOp.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.handler.admin;
+
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CoreAdminParams.BACKUP_LOCATION;
+import static org.apache.solr.common.params.CoreAdminParams.BACKUP_REPOSITORY;
+
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.handler.admin.api.InstallCoreDataAPI;
+import org.apache.solr.handler.api.V2ApiUtils;
+
+/**
+ * v1 shim implementation of the "Install Core Data" API, a core-admin API used to implement the
+ * "Install Shard Data" Collection-Admin functionality
+ *
+ * <p>Converts v1-style query parameters into a v2-style request body and delegating to {@link
+ * InstallCoreDataAPI}.
+ */
+public class InstallCoreDataOp implements CoreAdminHandler.CoreAdminOp {
+  @Override
+  public void execute(CoreAdminHandler.CallInfo it) throws Exception {
+    final SolrParams params = it.req.getParams();
+    final String coreName = params.required().get(CoreAdminParams.CORE);
+
+    final InstallCoreDataAPI api =
+        new InstallCoreDataAPI(
+            it.handler.getCoreContainer(), it.handler.getCoreAdminAsyncTracker(), it.req, it.rsp);
+    final InstallCoreDataAPI.InstallCoreDataRequestBody requestBody =
+        new InstallCoreDataAPI.InstallCoreDataRequestBody();
+    requestBody.repository = params.get(BACKUP_REPOSITORY);
+    requestBody.location = params.get(BACKUP_LOCATION);
+    requestBody.asyncId = params.get(ASYNC);
+    V2ApiUtils.squashIntoSolrResponseWithoutHeader(
+        it.rsp, api.installCoreData(coreName, requestBody));
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/api/InstallCoreDataAPI.java b/solr/core/src/java/org/apache/solr/handler/admin/api/InstallCoreDataAPI.java
new file mode 100644
index 00000000000..53c47824f13
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/admin/api/InstallCoreDataAPI.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.handler.admin.api;
+
+import static org.apache.solr.client.solrj.impl.BinaryResponseParser.BINARY_CONTENT_TYPE_V2;
+import static org.apache.solr.security.PermissionNameProvider.Name.CORE_EDIT_PERM;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import org.apache.solr.cloud.CloudDescriptor;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.apache.solr.handler.RestoreCore;
+import org.apache.solr.handler.admin.CoreAdminHandler;
+import org.apache.solr.jersey.JacksonReflectMapWriter;
+import org.apache.solr.jersey.PermissionName;
+import org.apache.solr.jersey.SolrJerseyResponse;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * v2 implementation of the "Install Core Data" Core-Admin API
+ *
+ * <p>This is an internal API intended for use only by the Collection Admin "Install Shard Data"
+ * API.
+ */
+@Path("/cores/{coreName}/install")
+public class InstallCoreDataAPI extends CoreAdminAPIBase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public InstallCoreDataAPI(
+      CoreContainer coreContainer,
+      CoreAdminHandler.CoreAdminAsyncTracker coreAdminAsyncTracker,
+      SolrQueryRequest req,
+      SolrQueryResponse rsp) {
+    super(coreContainer, coreAdminAsyncTracker, req, rsp);
+  }
+
+  @POST
+  @Produces({"application/json", "application/xml", BINARY_CONTENT_TYPE_V2})
+  @PermissionName(CORE_EDIT_PERM)
+  public SolrJerseyResponse installCoreData(
+      @PathParam("coreName") String coreName, InstallCoreDataRequestBody requestBody)
+      throws Exception {
+    final SolrJerseyResponse response = instantiateJerseyResponse(SolrJerseyResponse.class);
+
+    if (requestBody == null) {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST, "Required request body is missing");
+    }
+
+    final ZkController zkController = coreContainer.getZkController();
+    if (zkController == null) {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "'Install Core Data' API only supported in SolrCloud clusters");
+    }
+
+    try (BackupRepository repository = coreContainer.newBackupRepository(requestBody.repository);
+        SolrCore core = coreContainer.getCore(coreName)) {
+      String location = repository.getBackupLocation(requestBody.location);
+      if (location == null) {
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "'location' is not specified as a" + " parameter or as a default repository property");
+      }
+
+      final URI locationUri = repository.createDirectoryURI(location);
+      final CloudDescriptor cd = core.getCoreDescriptor().getCloudDescriptor();
+      if (!core.readOnly) {
+        throw new SolrException(
+            SolrException.ErrorCode.SERVER_ERROR,
+            "Failed to install data to core core="
+                + core.getName()
+                + "; collection must be in read-only mode prior to installing data to a core");
+      }
+
+      final RestoreCore restoreCore = RestoreCore.create(repository, core, locationUri, "");
+      boolean success = restoreCore.doRestore();
+      if (!success) {
+        throw new SolrException(
+            SolrException.ErrorCode.SERVER_ERROR,
+            "Failed to install data to core=" + core.getName());
+      }
+
+      // other replicas to-be-created will know that they are out of date by
+      // looking at their term : 0 compare to term of this core : 1
+      zkController
+          .getShardTerms(cd.getCollectionName(), cd.getShardId())
+          .ensureHighestTermsAreNotZero();
+    }
+
+    return response;
+  }
+
+  public static class InstallCoreDataRequestBody implements JacksonReflectMapWriter {
+    // Expected to point to an index directory (e.g. data/techproducts_shard1_replica_n1/data/index)
+    // for a single core that has previously been uploaded to the backup repository previously
+    // uploaded to the backup repository.
+    @JsonProperty("location")
+    public String location;
+
+    @JsonProperty("repository")
+    public String repository;
+
+    @JsonProperty("async")
+    public String asyncId;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/api/InstallShardDataAPI.java b/solr/core/src/java/org/apache/solr/handler/admin/api/InstallShardDataAPI.java
new file mode 100644
index 00000000000..10c50c05396
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/admin/api/InstallShardDataAPI.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.handler.admin.api;
+
+import static org.apache.solr.client.solrj.impl.BinaryResponseParser.BINARY_CONTENT_TYPE_V2;
+import static org.apache.solr.handler.admin.CollectionsHandler.DEFAULT_COLLECTION_OP_TIMEOUT;
+import static org.apache.solr.security.PermissionNameProvider.Name.COLL_EDIT_PERM;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.lang.invoke.MethodHandles;
+import java.util.HashMap;
+import javax.inject.Inject;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.cloud.api.collections.InstallShardDataCmd;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.jersey.JacksonReflectMapWriter;
+import org.apache.solr.jersey.PermissionName;
+import org.apache.solr.jersey.SolrJerseyResponse;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.zookeeper.common.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A V2 API that allows users to import an index constructed offline into a shard of their
+ * collection
+ *
+ * <p>Particularly useful for installing (per-shard) indices constructed offline into a SolrCloud
+ * deployment. Callers are required to put the collection into read-only mode prior to installing
+ * data into any shards of that collection, and should exit read only mode when completed.
+ */
+@Path("/collections/{collName}/shards/{shardName}/install")
+public class InstallShardDataAPI extends AdminAPIBase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Inject
+  public InstallShardDataAPI(
+      CoreContainer coreContainer,
+      SolrQueryRequest solrQueryRequest,
+      SolrQueryResponse solrQueryResponse) {
+    super(coreContainer, solrQueryRequest, solrQueryResponse);
+  }
+
+  @POST
+  @Produces({"application/json", "application/xml", BINARY_CONTENT_TYPE_V2})
+  @PermissionName(COLL_EDIT_PERM)
+  public SolrJerseyResponse installShardData(
+      @PathParam("collName") String collName,
+      @PathParam("shardName") String shardName,
+      InstallShardRequestBody requestBody)
+      throws Exception {
+    final SolrJerseyResponse response = instantiateJerseyResponse(SolrJerseyResponse.class);
+    final CoreContainer coreContainer = fetchAndValidateZooKeeperAwareCoreContainer();
+    recordCollectionForLogAndTracing(collName, solrQueryRequest);
+    if (requestBody == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Required request body missing");
+    }
+
+    if (StringUtils.isBlank(requestBody.location)) {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "The Install Shard Data API requires a 'location' indicating the index data to install");
+    }
+
+    final ClusterState clusterState =
+        coreContainer.getZkController().getZkStateReader().getClusterState();
+    ensureCollectionAndShardExist(clusterState, collName, shardName);
+
+    // Only install data to shards which belong to a collection in read-only mode
+    final DocCollection dc =
+        coreContainer.getZkController().getZkStateReader().getCollection(collName);
+    if (!dc.isReadOnly()) {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "Collection must be in readOnly mode before installing data to shard");
+    }
+
+    final ZkNodeProps remoteMessage = createRemoteMessage(collName, shardName, requestBody);
+    final SolrResponse remoteResponse =
+        CollectionsHandler.submitCollectionApiCommand(
+            coreContainer,
+            coreContainer.getDistributedCollectionCommandRunner(),
+            remoteMessage,
+            CollectionParams.CollectionAction.INSTALLSHARDDATA,
+            DEFAULT_COLLECTION_OP_TIMEOUT);
+    if (remoteResponse.getException() != null) {
+      throw remoteResponse.getException();
+    }
+
+    return response;
+  }
+
+  public static void ensureCollectionAndShardExist(
+      ClusterState clusterState, String collectionName, String shardName) {
+    final DocCollection installCollection = clusterState.getCollection(collectionName);
+    final Slice installSlice = installCollection.getSlice(shardName);
+    if (installSlice == null) {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "The specified shard [" + shardName + "] does not exist.");
+    }
+  }
+
+  public static ZkNodeProps createRemoteMessage(
+      String collectionName, String shardName, InstallShardRequestBody requestBody) {
+    final InstallShardDataCmd.RemoteMessage messageTyped = new InstallShardDataCmd.RemoteMessage();
+    messageTyped.collection = collectionName;
+    messageTyped.shard = shardName;
+    if (requestBody != null) {
+      messageTyped.location = requestBody.location;
+      messageTyped.repository = requestBody.repository;
+      messageTyped.asyncId = requestBody.asyncId;
+    }
+
+    messageTyped.validate();
+    return new ZkNodeProps(messageTyped.toMap(new HashMap<>()));
+  }
+
+  public static class InstallShardRequestBody implements JacksonReflectMapWriter {
+    @JsonProperty(defaultValue = "location", required = true)
+    public String location;
+
+    @JsonProperty("repository")
+    public String repository;
+
+    @JsonProperty("async")
+    public String asyncId;
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSInstallShardTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSInstallShardTest.java
new file mode 100644
index 00000000000..690ff447194
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSInstallShardTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.lucene.tests.util.LuceneTestCase;
+import org.junit.BeforeClass;
+
+// Backups do checksum validation against a footer value not present in 'SimpleText'
+@LuceneTestCase.SuppressCodecs({"SimpleText"})
+public class LocalFSInstallShardTest extends AbstractInstallShardTest {
+
+  private static final String BACKUP_REPOSITORY_XML =
+      "  <str name=\"allowPaths\">ALLOWPATHS_TEMPLATE_VAL</str>\n"
+          + "  <backup>\n"
+          + "    <repository name=\"trackingBackupRepository\" class=\"org.apache.solr.core.TrackingBackupRepository\"> \n"
+          + "      <str name=\"delegateRepoName\">localfs</str>\n"
+          + "    </repository>\n"
+          + "    <repository name=\"localfs\" class=\"org.apache.solr.core.backup.repository.LocalFileSystemRepository\"> \n"
+          + "    </repository>\n"
+          + "  </backup>\n";
+
+  private static final String SOLR_XML =
+      AbstractInstallShardTest.defaultSolrXmlTextWithBackupRepository(BACKUP_REPOSITORY_XML);
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    boolean whitespacesInPath = random().nextBoolean();
+    final String tmpDirPrefix = whitespacesInPath ? "my install" : "myinstall";
+    final String backupLocation = createTempDir(tmpDirPrefix).toAbsolutePath().toString();
+
+    configureCluster(1) // nodes
+        .addConfig(
+            "conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .withSolrXml(SOLR_XML.replace("ALLOWPATHS_TEMPLATE_VAL", backupLocation))
+        .configure();
+
+    bootstrapBackupRepositoryData(backupLocation);
+  }
+}
diff --git a/solr/modules/gcs-repository/src/test/org/apache/solr/gcs/GCSInstallShardTest.java b/solr/modules/gcs-repository/src/test/org/apache/solr/gcs/GCSInstallShardTest.java
new file mode 100644
index 00000000000..51dc59d2ea0
--- /dev/null
+++ b/solr/modules/gcs-repository/src/test/org/apache/solr/gcs/GCSInstallShardTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.gcs;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import org.apache.lucene.tests.util.LuceneTestCase;
+import org.apache.solr.cloud.api.collections.AbstractInstallShardTest;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * Tests validating that the 'Install Shard API' works when used with {@link GCSBackupRepository}
+ *
+ * @see org.apache.solr.cloud.api.collections.AbstractInstallShardTest
+ * @see org.apache.solr.handler.admin.api.InstallShardDataAPI
+ */
+// Backups do checksum validation against a footer value not present in 'SimpleText'
+@LuceneTestCase.SuppressCodecs({"SimpleText"})
+@ThreadLeakLingering(linger = 10)
+public class GCSInstallShardTest extends AbstractInstallShardTest {
+
+  private static final String BACKUP_REPOSITORY_XML =
+      "  <backup>\n"
+          + "    <repository name=\"trackingBackupRepository\" class=\"org.apache.solr.core.TrackingBackupRepository\"> \n"
+          + "      <str name=\"delegateRepoName\">localfs</str>\n"
+          + "    </repository>\n"
+          + "    <repository name=\"localfs\" class=\"org.apache.solr.gcs.LocalStorageGCSBackupRepository\"> \n"
+          + "      <str name=\"gcsBucket\">someBucketName</str>\n"
+          + "      <str name=\"location\">backup1</str>\n"
+          + "    </repository>\n"
+          + "  </backup>\n";
+  private static final String SOLR_XML =
+      AbstractInstallShardTest.defaultSolrXmlTextWithBackupRepository(BACKUP_REPOSITORY_XML);
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+
+    configureCluster(1) // nodes
+        .addConfig("conf1", getFile("conf/solrconfig.xml").getParentFile().toPath())
+        .withSolrXml(SOLR_XML)
+        .configure();
+
+    bootstrapBackupRepositoryData("backup1");
+  }
+
+  @AfterClass
+  public static void tearDownClass() throws Exception {
+    LocalStorageGCSBackupRepository.clearStashedStorage();
+  }
+}
diff --git a/solr/modules/s3-repository/src/test/org/apache/solr/s3/S3InstallShardTest.java b/solr/modules/s3-repository/src/test/org/apache/solr/s3/S3InstallShardTest.java
new file mode 100644
index 00000000000..189928f3dbb
--- /dev/null
+++ b/solr/modules/s3-repository/src/test/org/apache/solr/s3/S3InstallShardTest.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.s3;
+
+import com.adobe.testing.s3mock.junit4.S3MockRule;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import org.apache.lucene.tests.util.LuceneTestCase;
+import org.apache.solr.cloud.api.collections.AbstractInstallShardTest;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import software.amazon.awssdk.regions.Region;
+
+/**
+ * Tests validating that the 'Install Shard API' works when used with {@link S3BackupRepository}
+ *
+ * @see org.apache.solr.cloud.api.collections.AbstractInstallShardTest
+ * @see org.apache.solr.handler.admin.api.InstallShardDataAPI
+ */
+// Backups do checksum validation against a footer value not present in 'SimpleText'
+@LuceneTestCase.SuppressCodecs({"SimpleText"})
+@ThreadLeakLingering(linger = 10)
+public class S3InstallShardTest extends AbstractInstallShardTest {
+
+  private static final String BUCKET_NAME = S3InstallShardTest.class.getSimpleName();
+
+  private static final String BACKUP_REPOSITORY_XML =
+      "  <backup>\n"
+          + "    <repository name=\"trackingBackupRepository\" class=\"org.apache.solr.core.TrackingBackupRepository\"> \n"
+          + "      <str name=\"delegateRepoName\">s3</str>\n"
+          + "    </repository>\n"
+          + "    <repository name=\"s3\" class=\"org.apache.solr.s3.S3BackupRepository\"> \n"
+          + "      <str name=\"s3.bucket.name\">BUCKET</str>\n"
+          + "      <str name=\"s3.region\">REGION</str>\n"
+          + "      <str name=\"s3.endpoint\">ENDPOINT</str>\n"
+          + "    </repository>\n"
+          + "  </backup>\n";
+  private static final String SOLR_XML =
+      AbstractInstallShardTest.defaultSolrXmlTextWithBackupRepository(BACKUP_REPOSITORY_XML);
+
+  @ClassRule
+  public static final S3MockRule S3_MOCK_RULE =
+      S3MockRule.builder()
+          .silent()
+          .withInitialBuckets(BUCKET_NAME)
+          .withSecureConnection(false)
+          .build();
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    System.setProperty("aws.accessKeyId", "foo");
+    System.setProperty("aws.secretAccessKey", "bar");
+
+    AbstractS3ClientTest.setS3ConfFile();
+
+    configureCluster(1) // nodes
+        .addConfig("conf1", getFile("conf/solrconfig.xml").getParentFile().toPath())
+        .withSolrXml(
+            SOLR_XML
+                .replace("BUCKET", BUCKET_NAME)
+                .replace("REGION", Region.US_EAST_1.id())
+                .replace("ENDPOINT", "http://localhost:" + S3_MOCK_RULE.getHttpPort()))
+        .configure();
+
+    bootstrapBackupRepositoryData("/");
+  }
+}
diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/shard-management.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/shard-management.adoc
index d2995f014d5..2efad25dea1 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/shard-management.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/shard-management.adoc
@@ -630,3 +630,129 @@ This parameter is required.
 
 WARNING: This is an expert level command, and should be invoked only when regular leader election is not working.
 This may potentially lead to loss of data in the event that the new leader doesn't have certain updates, possibly recent ones, which were acknowledged by the old leader before going down.
+
+[[installshard]]
+== INSTALLSHARD: Install/Import Data to Shard
+
+Under normal circumstances, data is added to Solr collections (and the shards that make them up) by xref:indexing-guide:indexing-with-update-handlers.adoc[indexing] documents.
+However some use-cases require constructing per-shard indices offline.
+Often this is done as a means of insulating query traffic from indexing load, or because the ETL pipeline in use is particularly complex.
+The INSTALLSHARD API allows installation of these pre-constructed indices into individual shards within a collection.
+Installation copies the index files into all replicas within the shard, overwriting any preexisting data held by that shard.
+
+To install data into a shard, the collection owning that shard must first be put into "readOnly" mode, using the xref:deployment-guide:collection-management.adoc#modifycollection[MODIFYCOLLECTION API].
+Once in read-only mode, shard installation may be done either serially or in parallel.
+Data can be imported from any `repository` and `location` supported by Solr's pluggable xref:deployment-guide:backup-restore.adoc#backuprestore-storage-repositories[Backup Repository] abstraction.
+
+The specified `location` must contain all files that make up a core's `data/index` directory.
+Users are responsible for ensuring that the index installed to a shard is compatible with the schema and configuration for the collection hosting that shard.
+
+
+[.dynamic-tabs]
+--
+
+[example.tab-pane#v1installshard]
+====
+[.tab-label]*V1 API*
+
+*Input*
+
+[source,text]
+----
+http://localhost:8983/solr/admin/collections?action=INSTALLSHARD&collection=techproducts&shard=shard1&repository=localfs&location=/mounts/myNFSDrive/tech/shard1/data/index
+----
+
+*Output*
+
+[source,json]
+----
+{
+  "responseHeader": {
+    "status": 0,
+    "QTime": 78
+  }
+}
+----
+====
+
+[example.tab-pane#v2installshard]
+====
+[.tab-label]*V2 API*
+*Input*
+
+[source,bash]
+----
+curl -X POST http://localhost:8983/api/collections/techproducts/shards/shard1/install -H 'Content-Type: application/json' -d '
+  {
+    "repository": "localfs",
+    "location": "/mounts/myNFSDrive/tech/shard1/data/index"
+  }
+'
+----
+*Output*
+
+[source,json]
+----
+{
+  "responseHeader": {
+    "status": 0,
+    "QTime": 125
+  }
+}
+----
+====
+--
+
+=== INSTALLSHARD Parameters
+
+`collection`::
++
+[%autowidth,frame=none]
+|===
+s|Required |Default: none
+|===
++
+The name of the collection.
+This parameter is required.
+Specified as a query parameter for v1 requests, and as a path segment for v2 requests.
+
+`shard`::
++
+[%autowidth,frame=none]
+|===
+s|Required |Default: none
+|===
++
+The name of the shard to install data to.
+This parameter is required.
+Specified as a query parameter for v1 requests, and as a path segment for v2 requests.
+
+`location`::
++
+[%autowidth,frame=none]
+|===
+s|Required|Default: none
+|===
++
+The location within the specified backup repository to find the index files to install.
+Specified as a query parameter for v1 requests, and in the request body of v2 requests.
+
+`repository`::
++
+[%autowidth,frame=none]
+|===
+|Optional|Default: none
+|===
++
+The name of the backup repository to look for index files within
+Specified as a query parameter for v1 requests, and in the request body of v2 requests.
+Solr's default Backup Repository (if one is defined in solr.xml) will be used as a fallback if no repository parameter is provided.
+
+`async`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+Request ID to track this action which will be xref:configuration-guide:collections-api.adoc#asynchronous-calls[processed asynchronously].
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
index 451cba49174..1c07cd637b4 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
@@ -64,6 +64,10 @@ It is always strongly recommended that you fully reindex your documents after a
 In Solr 8, it was possible to add docValues to a schema without re-indexing via `UninvertDocValuesMergePolicy`, an advanced/expert utility.
 Due to changes in Lucene 9, that isn't possible any more.
 
+== Solr 9.3
+=== Shard Management
+* Solr now provides an xref:deployment-guide:shard-management.adoc#installshard[INSTALLSHARD] API to allow users who have built (per-shard) indices offline to import them into SolrCloud shards.
+
 == Solr 9.2
 === Upgrade to Jetty 10.x
 * Solr upgraded to Jetty 10.x from 9.x due to Jetty 9.x is now end of life. Jetty 10.x has a Java 11 minimum and matches Solr 9 minimum Java version. Jetty logging has been replaced with slf4j again matching Solr. See https://webtide.com/jetty-10-and-11-have-arrived/ for additional Jetty 10.x highlights.
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index fad9264b8d6..2950c9bcb16 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -22,6 +22,7 @@ import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SE
 import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
 import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
 import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
+import static org.apache.solr.common.params.CoreAdminParams.BACKUP_REPOSITORY;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -1196,7 +1197,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       params.set(CoreAdminParams.NAME, name);
       params.set(CoreAdminParams.BACKUP_LOCATION, location); // note: optional
       if (repositoryName.isPresent()) {
-        params.set(CoreAdminParams.BACKUP_REPOSITORY, repositoryName.get());
+        params.set(BACKUP_REPOSITORY, repositoryName.get());
       }
       if (commitName.isPresent()) {
         params.set(CoreAdminParams.COMMIT_NAME, commitName.get());
@@ -1374,7 +1375,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
         addProperties(params, properties);
       }
       if (repositoryName.isPresent()) {
-        params.set(CoreAdminParams.BACKUP_REPOSITORY, repositoryName.get());
+        params.set(BACKUP_REPOSITORY, repositoryName.get());
       }
       if (createNodeSet.isPresent()) {
         params.set(CREATE_NODE_SET_PARAM, createNodeSet.get());
@@ -1390,6 +1391,56 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
     }
   }
 
+  /**
+   * Install index data to a specific shard of a specific collection
+   *
+   * @param collection the collection to install data to
+   * @param shard the specific shard within to install data to
+   * @param location a URI-string pointing to location of the index data within a particular backup
+   *     repository
+   * @param backupRepository the backup repository to lookup and install the index data from
+   */
+  public static InstallShard installDataToShard(
+      String collection, String shard, String location, String backupRepository) {
+    return new InstallShard(collection, shard, location, backupRepository);
+  }
+
+  /**
+   * Install index data to a specific shard of a specific collection
+   *
+   * <p>Will use Solr's "default" backup repository for locating and accessing the index data.
+   *
+   * @param collection the collection to install data to
+   * @param shard the specific shard within to install data to
+   * @param location a URI-string pointing to location of the index data within a particular backup
+   *     repository
+   */
+  public static InstallShard installDataToShard(String collection, String shard, String location) {
+    return new InstallShard(collection, shard, location, null);
+  }
+
+  public static class InstallShard extends AsyncShardSpecificAdminRequest {
+
+    protected String repositoryName;
+    protected String location;
+
+    public InstallShard(String collection, String shard, String location, String backupRepository) {
+      super(CollectionAction.INSTALLSHARDDATA, collection, shard);
+
+      this.repositoryName = backupRepository;
+      this.location = location;
+    }
+
+    @Override
+    public SolrParams getParams() {
+      ModifiableSolrParams params = (ModifiableSolrParams) super.getParams();
+      params.setNonNull(CoreAdminParams.BACKUP_REPOSITORY, repositoryName);
+      params.setNonNull(CoreAdminParams.BACKUP_LOCATION, location);
+
+      return params;
+    }
+  }
+
   // Note : This method is added since solrj module does not use Google
   // guava library. Also changes committed for SOLR-8765 result in wrong
   // error message when "collection" parameter is specified as Null.
@@ -3087,7 +3138,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
       params.set(CoreAdminParams.NAME, deleteBackupPayload.name);
       params.setNonNull(CoreAdminParams.BACKUP_LOCATION, deleteBackupPayload.location);
-      params.setNonNull(CoreAdminParams.BACKUP_REPOSITORY, deleteBackupPayload.repository);
+      params.setNonNull(BACKUP_REPOSITORY, deleteBackupPayload.repository);
       params.setNonNull(CoreAdminParams.BACKUP_ID, deleteBackupPayload.backupId);
       params.setNonNull(
           CoreAdminParams.MAX_NUM_BACKUP_POINTS, deleteBackupPayload.maxNumBackupPoints);
@@ -3155,7 +3206,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
       params.set(CoreAdminParams.NAME, listPayload.name);
       params.setNonNull(CoreAdminParams.BACKUP_LOCATION, listPayload.location);
-      params.setNonNull(CoreAdminParams.BACKUP_REPOSITORY, listPayload.repository);
+      params.setNonNull(BACKUP_REPOSITORY, listPayload.repository);
 
       return params;
     }
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
index cc7d20e94ec..a1910d4edbb 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
@@ -113,6 +113,7 @@ public interface CollectionParams {
     MODIFYCOLLECTION(true, LockLevel.COLLECTION),
     BACKUP(true, LockLevel.COLLECTION),
     RESTORE(true, LockLevel.COLLECTION),
+    INSTALLSHARDDATA(true, LockLevel.SHARD),
     LISTBACKUP(false, LockLevel.NONE),
     DELETEBACKUP(true, LockLevel.COLLECTION),
     CREATESNAPSHOT(true, LockLevel.COLLECTION),
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
index 33d908ba4cc..59f184a92c8 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
@@ -179,6 +179,7 @@ public abstract class CoreAdminParams {
     // Internal APIs to backup and restore a core
     BACKUPCORE,
     RESTORECORE,
+    INSTALLCOREDATA,
     CREATESNAPSHOT,
     DELETESNAPSHOT,
     LISTSNAPSHOTS;
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java
new file mode 100644
index 00000000000..b2e614c55e5
--- /dev/null
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import org.apache.lucene.store.Directory;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.CoreDescriptor;
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for testing the "Install Shard API" with various backup repositories.
+ *
+ * <p>Subclasses are expected to bootstrap a Solr cluster with a single configured backup
+ * repository. This base-class will populate that backup repository all data necessary for these
+ * tests.
+ *
+ * @see org.apache.solr.handler.admin.api.InstallShardDataAPI
+ */
+public abstract class AbstractInstallShardTest extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final String BACKUP_REPO_NAME = "trackingBackupRepository";
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void seedDocGenerator() {
+    docsSeed = random().nextLong();
+    System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
+  }
+
+  // Populated by 'bootstrapBackupRepositoryData'
+  private static int singleShardNumDocs = -1;
+  private static int replicasPerShard = -1;
+  private static int multiShardNumDocs = -1;
+  private static URI singleShard1Uri = null;
+  private static URI[] multiShardUris = null;
+
+  public static void bootstrapBackupRepositoryData(String baseRepositoryLocation) throws Exception {
+    final int numShards = random().nextInt(3) + 2;
+    multiShardUris = new URI[numShards];
+    replicasPerShard = random().nextInt(3) + 1;
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    // Create collections and index docs
+    final String singleShardCollName = createAndAwaitEmptyCollection(1, replicasPerShard);
+    singleShardNumDocs = indexDocs(singleShardCollName, true);
+    assertCollectionHasNumDocs(singleShardCollName, singleShardNumDocs);
+    final String multiShardCollName = createAndAwaitEmptyCollection(numShards, replicasPerShard);
+    multiShardNumDocs = indexDocs(multiShardCollName, true);
+    assertCollectionHasNumDocs(multiShardCollName, multiShardNumDocs);
+
+    // Upload shard data to BackupRepository - single shard collection
+    singleShard1Uri =
+        createBackupRepoDirectoryForShardData(
+            baseRepositoryLocation, singleShardCollName, "shard1");
+    copyShardDataToBackupRepository(singleShardCollName, "shard1", singleShard1Uri);
+    // Upload shard data to BackupRepository - multi-shard collection
+    for (int i = 0; i < multiShardUris.length; i++) {
+      final String shardName = "shard" + (i + 1);
+      multiShardUris[i] =
+          createBackupRepoDirectoryForShardData(
+              baseRepositoryLocation, multiShardCollName, shardName);
+      copyShardDataToBackupRepository(multiShardCollName, shardName, multiShardUris[i]);
+    }
+
+    // Nuke collections now that we've populated the BackupRepository
+    CollectionAdminRequest.deleteCollection(singleShardCollName).process(solrClient);
+    CollectionAdminRequest.deleteCollection(multiShardCollName).process(solrClient);
+  }
+
+  @Test
+  public void testInstallFailsIfCollectionIsNotInReadOnlyMode() throws Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, replicasPerShard);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    final BaseHttpSolrClient.RemoteSolrException rse =
+        expectThrows(
+            BaseHttpSolrClient.RemoteSolrException.class,
+            () -> {
+              CollectionAdminRequest.installDataToShard(
+                      collectionName, "shard1", singleShardLocation, BACKUP_REPO_NAME)
+                  .process(cluster.getSolrClient());
+            });
+    assertEquals(400, rse.code());
+    assertTrue(rse.getMessage().contains("Collection must be in readOnly mode"));
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, 0);
+  }
+
+  @Test
+  public void testInstallToSingleShardCollection() throws Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    CollectionAdminRequest.installDataToShard(
+            collectionName, "shard1", singleShardLocation, BACKUP_REPO_NAME)
+        .process(cluster.getSolrClient());
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, singleShardNumDocs);
+  }
+
+  @Test
+  public void testSerialInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    for (int i = 1; i <= multiShardUris.length; i++) {
+      CollectionAdminRequest.installDataToShard(
+              collectionName, "shard" + i, multiShardUris[i - 1].toString(), BACKUP_REPO_NAME)
+          .process(cluster.getSolrClient());
+    }
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  @Test
+  public void testParallelInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    runParallelShardInstalls(collectionName, multiShardUris);
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  /**
+   * Builds a string representation of a valid solr.xml configuration, with the provided
+   * backup-repository configuration inserted
+   *
+   * @param backupRepositoryText a string representing the 'backup' XML tag to put in the
+   *     constructed solr.xml
+   */
+  public static String defaultSolrXmlTextWithBackupRepository(String backupRepositoryText) {
+    return "<solr>\n"
+        + "\n"
+        + "  <str name=\"shareSchema\">${shareSchema:false}</str>\n"
+        + "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n"
+        + "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n"
+        + "\n"
+        + "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n"
+        + "    <str name=\"urlScheme\">${urlScheme:}</str>\n"
+        + "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n"
+        + "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n"
+        + "  </shardHandlerFactory>\n"
+        + "\n"
+        + "  <solrcloud>\n"
+        + "    <str name=\"host\">127.0.0.1</str>\n"
+        + "    <int name=\"hostPort\">${hostPort:8983}</int>\n"
+        + "    <str name=\"hostContext\">${hostContext:solr}</str>\n"
+        + "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n"
+        + "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n"
+        + "    <int name=\"leaderVoteWait\">10000</int>\n"
+        + "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n"
+        + "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n"
+        + "  </solrcloud>\n"
+        + "  \n"
+        + backupRepositoryText
+        + "  \n"
+        + "</solr>\n";
+  }
+
+  private static void assertCollectionHasNumDocs(String collection, int expectedNumDocs)
+      throws Exception {
+    final SolrClient solrClient = cluster.getSolrClient();
+    assertEquals(
+        expectedNumDocs,
+        solrClient.query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  private static void copyShardDataToBackupRepository(
+      String collectionName, String shardName, URI destinationUri) throws Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    final Collection<String> coreNames = cc.getAllCoreNames();
+    final String coreName =
+        coreNames.stream()
+            .filter(name -> name.contains(collectionName) && name.contains(shardName))
+            .findFirst()
+            .get();
+    final CoreDescriptor cd = cc.getCoreDescriptor(coreName);
+    final Path coreInstanceDir = cd.getInstanceDir();
+    assert coreInstanceDir.toFile().exists();
+    assert coreInstanceDir.toFile().isDirectory();
+
+    final Path coreIndexDir = coreInstanceDir.resolve("data").resolve("index");
+    assert coreIndexDir.toFile().exists();
+    assert coreIndexDir.toFile().isDirectory();
+
+    try (final BackupRepository backupRepository = cc.newBackupRepository(BACKUP_REPO_NAME);
+        final SolrCore core = cc.getCore(coreName)) {
+      final Directory dir =
+          core.getDirectoryFactory()
+              .get(
+                  coreIndexDir.toString(),
+                  DirectoryFactory.DirContext.DEFAULT,
+                  core.getSolrConfig().indexConfig.lockType);
+      try {
+        for (final String dirContent : dir.listAll()) {
+          if (dirContent.contains("write.lock")) continue;
+          backupRepository.copyFileFrom(dir, dirContent, destinationUri);
+        }
+      } finally {
+        core.getDirectoryFactory().release(dir);
+      }
+    }
+  }
+
+  private static URI createBackupRepoDirectoryForShardData(
+      String baseLocation, String collectionName, String shardName) throws Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    try (final BackupRepository backupRepository = cc.newBackupRepository(BACKUP_REPO_NAME)) {
+      final URI baseLocationUri = backupRepository.createURI(baseLocation);
+      final URI collectionLocation = backupRepository.resolve(baseLocationUri, collectionName);
+      backupRepository.createDirectory(collectionLocation);
+      final URI shardLocation = backupRepository.resolve(collectionLocation, shardName);
+      backupRepository.createDirectory(shardLocation);
+      return shardLocation;
+    }
+  }
+
+  private static int indexDocs(String collectionName, boolean useUUID) throws Exception {
+    Random random =
+        new Random(
+            docsSeed); // use a constant seed for the whole test run so that we can easily re-index.
+    int numDocs = random.nextInt(100) + 5;
+    indexDocs(collectionName, numDocs, useUUID);
+    return numDocs;
+  }
+
+  private static void indexDocs(String collectionName, int numDocs, boolean useUUID)
+      throws Exception {
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i = 0; i < numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", (useUUID ? java.util.UUID.randomUUID().toString() : i));
+      doc.addField("val_s", "some value");
+      docs.add(doc);
+    }
+
+    CloudSolrClient client = cluster.getSolrClient();
+    client.add(collectionName, docs); // batch
+    client.commit(collectionName);
+
+    log.info("Indexed {} docs to collection: {}", numDocs, collectionName);
+  }
+
+  private static String createAndAwaitEmptyCollection(int numShards, int replicasPerShard)
+      throws Exception {
+    final SolrClient solrClient = cluster.getSolrClient();
+
+    final String collectionName = UUID.randomUUID().toString().replace("-", "_");
+    CollectionAdminRequest.createCollection(collectionName, "conf1", numShards, replicasPerShard)
+        .process(solrClient);
+    cluster.waitForActiveCollection(collectionName, numShards, numShards * replicasPerShard);
+
+    assertCollectionHasNumDocs(collectionName, 0);
+    return collectionName;
+  }
+
+  private static void enableReadOnly(String collectionName) throws Exception {
+    CollectionAdminRequest.modifyCollection(collectionName, Map.of("readOnly", true))
+        .process(cluster.getSolrClient());
+  }
+
+  private void runParallelShardInstalls(String collectionName, URI[] dataLocations)
+      throws Exception {
+    final SolrClient solrClient = cluster.getSolrClient();
+    final List<Callable<Exception>> tasks = new ArrayList<>();
+    for (int i = 0; i < multiShardUris.length; i++) {
+      final String shardName = "shard" + (i + 1);
+      final String dataLocation = multiShardUris[i].toString();
+      tasks.add(
+          () -> {
+            try {
+              CollectionAdminRequest.installDataToShard(
+                      collectionName, shardName, dataLocation, BACKUP_REPO_NAME)
+                  .process(solrClient);
+              return null;
+            } catch (Exception e) {
+              return e;
+            }
+          });
+    }
+
+    final ExecutorService executor =
+        ExecutorUtil.newMDCAwareFixedThreadPool(
+            multiShardUris.length, new SolrNamedThreadFactory("shardinstall"));
+    final List<Future<Exception>> futures = executor.invokeAll(tasks, 10, TimeUnit.SECONDS);
+    futures.stream()
+        .forEach(
+            future -> {
+              assertTrue("Shard installation exceeded the test timeout", future.isDone());
+              try {
+                assertNull(
+                    "Expected shard installation to complete successfully but failed with exception "
+                        + future.get(),
+                    future.get());
+              } catch (InterruptedException | ExecutionException e) {
+                throw new RuntimeException(e);
+              }
+            });
+
+    executor.shutdown();
+    executor.awaitTermination(10, TimeUnit.SECONDS);
+  }
+}