You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/01/23 10:30:54 UTC

[24/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

SOLR-11817: Move Collections API classes to it's own package


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a3c4f738
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a3c4f738
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a3c4f738

Branch: refs/heads/jira/solr-11702
Commit: a3c4f7388c13cfdeb66d83b434b991e5e159d4cc
Parents: e2bba98
Author: Varun Thacker <va...@apache.org>
Authored: Mon Jan 15 18:07:34 2018 -0800
Committer: Varun Thacker <va...@apache.org>
Committed: Tue Jan 16 11:03:40 2018 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                |    2 +
 .../org/apache/solr/cloud/AddReplicaCmd.java    |  279 -----
 .../src/java/org/apache/solr/cloud/Assign.java  |  483 ---------
 .../java/org/apache/solr/cloud/BackupCmd.java   |  225 ----
 .../solr/cloud/CloudConfigSetService.java       |    1 +
 .../java/org/apache/solr/cloud/CloudUtil.java   |    2 +-
 .../org/apache/solr/cloud/CreateAliasCmd.java   |  101 --
 .../apache/solr/cloud/CreateCollectionCmd.java  |  533 ---------
 .../org/apache/solr/cloud/CreateShardCmd.java   |  191 ----
 .../apache/solr/cloud/CreateSnapshotCmd.java    |  179 ---
 .../org/apache/solr/cloud/DeleteAliasCmd.java   |   43 -
 .../apache/solr/cloud/DeleteCollectionCmd.java  |  141 ---
 .../org/apache/solr/cloud/DeleteNodeCmd.java    |  137 ---
 .../org/apache/solr/cloud/DeleteReplicaCmd.java |  281 -----
 .../org/apache/solr/cloud/DeleteShardCmd.java   |  178 ---
 .../apache/solr/cloud/DeleteSnapshotCmd.java    |  160 ---
 .../solr/cloud/ExclusiveSliceProperty.java      |    5 +-
 .../solr/cloud/LeaderRecoveryWatcher.java       |   88 --
 .../java/org/apache/solr/cloud/MigrateCmd.java  |  337 ------
 .../org/apache/solr/cloud/MoveReplicaCmd.java   |  302 ------
 .../java/org/apache/solr/cloud/Overseer.java    |    1 +
 .../OverseerCollectionConfigSetProcessor.java   |    1 +
 .../cloud/OverseerCollectionMessageHandler.java | 1003 -----------------
 .../org/apache/solr/cloud/OverseerRoleCmd.java  |  102 --
 .../apache/solr/cloud/OverseerStatusCmd.java    |  112 --
 .../org/apache/solr/cloud/ReplaceNodeCmd.java   |  226 ----
 .../java/org/apache/solr/cloud/RestoreCmd.java  |  363 -------
 .../cloud/RoutedAliasCreateCollectionCmd.java   |  182 ----
 .../org/apache/solr/cloud/SplitShardCmd.java    |  542 ----------
 .../org/apache/solr/cloud/UtilizeNodeCmd.java   |  120 ---
 .../cloud/api/collections/AddReplicaCmd.java    |  282 +++++
 .../solr/cloud/api/collections/Assign.java      |  483 +++++++++
 .../solr/cloud/api/collections/BackupCmd.java   |  224 ++++
 .../cloud/api/collections/CreateAliasCmd.java   |  100 ++
 .../api/collections/CreateCollectionCmd.java    |  531 +++++++++
 .../cloud/api/collections/CreateShardCmd.java   |  190 ++++
 .../api/collections/CreateSnapshotCmd.java      |  179 +++
 .../cloud/api/collections/DeleteAliasCmd.java   |   43 +
 .../api/collections/DeleteCollectionCmd.java    |  142 +++
 .../cloud/api/collections/DeleteNodeCmd.java    |  137 +++
 .../cloud/api/collections/DeleteReplicaCmd.java |  280 +++++
 .../cloud/api/collections/DeleteShardCmd.java   |  178 +++
 .../api/collections/DeleteSnapshotCmd.java      |  160 +++
 .../api/collections/LeaderRecoveryWatcher.java  |   88 ++
 .../solr/cloud/api/collections/MigrateCmd.java  |  334 ++++++
 .../cloud/api/collections/MoveReplicaCmd.java   |  303 ++++++
 .../OverseerCollectionMessageHandler.java       | 1011 +++++++++++++++++
 .../cloud/api/collections/OverseerRoleCmd.java  |  102 ++
 .../api/collections/OverseerStatusCmd.java      |  113 ++
 .../cloud/api/collections/ReplaceNodeCmd.java   |  227 ++++
 .../solr/cloud/api/collections/RestoreCmd.java  |  357 ++++++
 .../RoutedAliasCreateCollectionCmd.java         |  184 ++++
 .../cloud/api/collections/SplitShardCmd.java    |  540 ++++++++++
 .../cloud/api/collections/UtilizeNodeCmd.java   |  120 +++
 .../cloud/api/collections/package-info.java     |   23 +
 .../cloud/overseer/ClusterStateMutator.java     |    2 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   12 +-
 .../solr/cloud/overseer/SliceMutator.java       |   13 +-
 .../solr/handler/admin/CollectionsHandler.java  |   24 +-
 .../TimeRoutedAliasUpdateProcessor.java         |    2 +-
 .../AbstractCloudBackupRestoreTestCase.java     |  346 ------
 .../test/org/apache/solr/cloud/AssignTest.java  |  155 ---
 .../solr/cloud/BasicDistributedZkTest.java      |    1 +
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |    5 +
 .../apache/solr/cloud/CollectionReloadTest.java |   84 --
 .../cloud/CollectionTooManyReplicasTest.java    |  221 ----
 .../CollectionsAPIAsyncDistributedZkTest.java   |  177 ---
 .../cloud/CollectionsAPIDistributedZkTest.java  |  684 ------------
 ...ConcurrentDeleteAndCreateCollectionTest.java |  226 ----
 .../apache/solr/cloud/CustomCollectionTest.java |  198 ----
 ...verseerCollectionConfigSetProcessorTest.java |   28 +-
 .../solr/cloud/OverseerTaskQueueTest.java       |    1 +
 .../solr/cloud/ReplicaPropertiesBase.java       |  177 ---
 .../org/apache/solr/cloud/ShardSplitTest.java   | 1015 -----------------
 .../cloud/SimpleCollectionCreateDeleteTest.java |   64 --
 .../apache/solr/cloud/TestCollectionAPI.java    |  797 --------------
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  295 -----
 .../solr/cloud/TestHdfsCloudBackupRestore.java  |  203 ----
 .../cloud/TestLocalFSCloudBackupRestore.java    |   57 -
 .../solr/cloud/TestReplicaProperties.java       |  236 ----
 .../cloud/TestRequestStatusCollectionAPI.java   |  197 ----
 .../AbstractCloudBackupRestoreTestCase.java     |  348 ++++++
 .../solr/cloud/api/collections/AssignTest.java  |  156 +++
 .../api/collections/CollectionReloadTest.java   |   85 ++
 .../CollectionTooManyReplicasTest.java          |  222 ++++
 .../CollectionsAPIAsyncDistributedZkTest.java   |  178 +++
 .../CollectionsAPIDistributedZkTest.java        |  686 ++++++++++++
 ...ConcurrentDeleteAndCreateCollectionTest.java |  227 ++++
 .../api/collections/CustomCollectionTest.java   |  199 ++++
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 +++
 .../api/collections/ReplicaPropertiesBase.java  |  178 +++
 .../cloud/api/collections/ShardSplitTest.java   | 1017 ++++++++++++++++++
 .../SimpleCollectionCreateDeleteTest.java       |   66 ++
 .../api/collections/TestCollectionAPI.java      |  795 ++++++++++++++
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  297 +++++
 .../collections/TestHdfsCloudBackupRestore.java |  207 ++++
 .../TestLocalFSCloudBackupRestore.java          |   57 +
 .../api/collections/TestReplicaProperties.java  |  236 ++++
 .../TestRequestStatusCollectionAPI.java         |  198 ++++
 .../cloud/autoscaling/sim/SimCloudManager.java  |    2 +-
 .../sim/SimClusterStateProvider.java            |   14 +-
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |   11 +-
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 ---
 .../cloud/AbstractFullDistribZkTestBase.java    |   40 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java |    2 +-
 .../solr/cloud/MiniSolrCloudClusterTest.java    |    2 +-
 106 files changed, 11761 insertions(+), 11685 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 45a9a59..187976d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -150,6 +150,8 @@ Other Changes
 
 * SOLR-11218: Fail and return an error when attempting to delete a collection that's part of an alias (Erick Erickson)
 
+* SOLR-11817: Move Collections API classes to it's own package (Varun Thacker)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
deleted file mode 100644
index 71a54c14..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    addReplica(state, message, results, null);
-  }
-
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws IOException, InterruptedException {
-    log.debug("addReplica() : {}", Utils.toJSONString(message));
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-    final String asyncId = message.getStr(ASYNC);
-
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    message = assignReplicaDetails(ocmh.cloudManager, clusterState, message, sessionWrapper);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    DocCollection coll = clusterState.getCollection(collection);
-
-    String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    boolean parallel = message.getBool("parallel", false);
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (!Overseer.isLegacy(zkStateReader)) {
-      if (!skipCreateReplicaInClusterState) {
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collection,
-            ZkStateReader.SHARD_ID_PROP, shard,
-            ZkStateReader.CORE_NAME_PROP, coreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
-            ZkStateReader.NODE_NAME_PROP, node,
-            ZkStateReader.REPLICA_TYPE, replicaType.name());
-        if (coreNodeName != null) {
-          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
-        }
-        try {
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
-        }
-      }
-      params.set(CoreAdminParams.CORE_NODE_NAME,
-          ocmh.waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
-    }
-
-    String configName = zkStateReader.readConfigName(collection);
-    String routeKey = message.getStr(ShardParams._ROUTE_);
-    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
-    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
-    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
-
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-    params.set(CoreAdminParams.NAME, coreName);
-    params.set(COLL_CONF, configName);
-    params.set(CoreAdminParams.COLLECTION, collection);
-    params.set(CoreAdminParams.REPLICA_TYPE, replicaType.name());
-    if (shard != null) {
-      params.set(CoreAdminParams.SHARD, shard);
-    } else if (routeKey != null) {
-      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
-      if (slices.isEmpty()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
-      } else {
-        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
-      }
-    } else {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
-    }
-    if (dataDir != null) {
-      params.set(CoreAdminParams.DATA_DIR, dataDir);
-    }
-    if (ulogDir != null) {
-      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
-    }
-    if (instanceDir != null) {
-      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
-    }
-    if (coreNodeName != null) {
-      params.set(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
-    }
-    ocmh.addPropertyParams(message, params);
-
-    // For tracking async calls.
-    Map<String,String> requestMap = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    ocmh.sendShardRequest(node, params, shardHandler, asyncId, requestMap);
-
-    final String fnode = node;
-    final String fcoreName = coreName;
-
-    Runnable runnable = () -> {
-      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
-      ocmh.waitForCoreNodeName(collection, fnode, fcoreName);
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
-      if (onComplete != null) onComplete.run();
-    };
-
-    if (!parallel || waitForFinalState) {
-      if (waitForFinalState) {
-        SolrCloseableLatch latch = new SolrCloseableLatch(1, ocmh);
-        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collection, null, Collections.singletonList(coreName), latch);
-        try {
-          zkStateReader.registerCollectionStateWatcher(collection, watcher);
-          runnable.run();
-          if (!latch.await(timeout, TimeUnit.SECONDS)) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
-          }
-        } finally {
-          zkStateReader.removeCollectionStateWatcher(collection, watcher);
-        }
-      } else {
-        runnable.run();
-      }
-    } else {
-      ocmh.tpe.submit(runnable);
-    }
-
-
-    return new ZkNodeProps(
-        ZkStateReader.COLLECTION_PROP, collection,
-        ZkStateReader.SHARD_ID_PROP, shard,
-        ZkStateReader.CORE_NAME_PROP, coreName,
-        ZkStateReader.NODE_NAME_PROP, node
-    );
-  }
-
-  public static ZkNodeProps assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                 ZkNodeProps message, AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    if (StringUtils.isBlank(coreName)) {
-      coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
-    }
-
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (coll.getSlice(shard) == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Collection: " + collection + " shard: " + shard + " does not exist");
-    }
-
-    // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
-    if (!skipCreateReplicaInClusterState) {
-      if (CloudUtil.usePolicyFramework(coll, cloudManager)) {
-        if (node == null) {
-          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
-          node = Assign.identifyNodes(cloudManager,
-              clusterState,
-              Collections.emptyList(),
-              collection,
-              message,
-              Collections.singletonList(shard),
-              replicaType == Replica.Type.NRT ? 0 : 1,
-              replicaType == Replica.Type.TLOG ? 0 : 1,
-              replicaType == Replica.Type.PULL ? 0 : 1
-          ).get(0).node;
-          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-        }
-      } else {
-        node = Assign.getNodesForNewReplicas(clusterState, collection, shard, 1, node,
-            cloudManager).get(0).nodeName;// TODO: use replica type in this logic too
-      }
-    }
-    log.info("Node Identified {} for creating new replica", node);
-
-    if (!clusterState.liveNodesContain(node)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
-    }
-    if (coreName == null) {
-      coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
-    } else if (!skipCreateReplicaInClusterState) {
-      //Validate that the core name is unique in that collection
-      for (Slice slice : coll.getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          String replicaCoreName = replica.getStr(CORE_NAME_PROP);
-          if (coreName.equals(replicaCoreName)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" +
-                " for this collection");
-          }
-        }
-      }
-    }
-    if (coreNodeName != null) {
-      message = message.plus(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
-    }
-    message = message.plus(CoreAdminParams.NAME, coreName);
-    message = message.plus(CoreAdminParams.NODE, node);
-    return message;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Assign.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java
deleted file mode 100644
index c746c94..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/Assign.java
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.rule.ReplicaAssigner;
-import org.apache.solr.cloud.rule.Rule;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.NumberUtils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-
-public class Assign {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
-    String path = "/collections/"+collection;
-    try {
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.makePath(path);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-      path += "/counter";
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    } catch (IOException | KeeperException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    }
-
-    while (true) {
-      try {
-        int version = 0;
-        int currentId = 0;
-        VersionedData data = stateManager.getData(path, null);
-        if (data != null) {
-          currentId = NumberUtils.bytesToInt(data.getData());
-          version = data.getVersion();
-        }
-        byte[] bytes = NumberUtils.intToBytes(++currentId);
-        stateManager.setData(path, bytes, version);
-        return currentId;
-      } catch (BadVersionException e) {
-        continue;
-      } catch (IOException | KeeperException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
-      }
-    }
-  }
-
-  public static String assignCoreNodeName(DistribStateManager stateManager, DocCollection collection) {
-    // for backward compatibility;
-    int defaultValue = defaultCounterValue(collection, false);
-    String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    while (collection.getReplica(coreNodeName) != null) {
-      // there is wee chance that, the new coreNodeName id not totally unique,
-      // but this will be guaranteed unique for new collections
-      coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    }
-    return coreNodeName;
-  }
-
-  /**
-   * Assign a new unique id up to slices count - then add replicas evenly.
-   *
-   * @return the assigned shard id
-   */
-  public static String assignShard(DocCollection collection, Integer numShards) {
-    if (numShards == null) {
-      numShards = 1;
-    }
-    String returnShardId = null;
-    Map<String, Slice> sliceMap = collection != null ? collection.getActiveSlicesMap() : null;
-
-
-    // TODO: now that we create shards ahead of time, is this code needed?  Esp since hash ranges aren't assigned when creating via this method?
-
-    if (sliceMap == null) {
-      return "shard1";
-    }
-
-    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
-
-    if (shardIdNames.size() < numShards) {
-      return "shard" + (shardIdNames.size() + 1);
-    }
-
-    // TODO: don't need to sort to find shard with fewest replicas!
-
-    // else figure out which shard needs more replicas
-    final Map<String, Integer> map = new HashMap<>();
-    for (String shardId : shardIdNames) {
-      int cnt = sliceMap.get(shardId).getReplicasMap().size();
-      map.put(shardId, cnt);
-    }
-
-    Collections.sort(shardIdNames, (String o1, String o2) -> {
-      Integer one = map.get(o1);
-      Integer two = map.get(o2);
-      return one.compareTo(two);
-    });
-
-    returnShardId = shardIdNames.get(0);
-    return returnShardId;
-  }
-
-  private static String buildSolrCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
-    // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
-    return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
-  }
-
-  private static int defaultCounterValue(DocCollection collection, boolean newCollection) {
-    if (newCollection) return 0;
-    int defaultValue = collection.getReplicas().size();
-    if (collection.getReplicationFactor() != null) {
-      // numReplicas and replicationFactor * numSlices can be not equals,
-      // in case of many addReplicas or deleteReplicas are executed
-      defaultValue = Math.max(defaultValue,
-          collection.getReplicationFactor() * collection.getSlices().size());
-    }
-    return defaultValue * 20;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
-    Slice slice = collection.getSlice(shard);
-    int defaultValue = defaultCounterValue(collection, newCollection);
-    int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-    String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    while (existCoreName(coreName, slice)) {
-      replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-      coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    }
-    return coreName;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
-    return buildSolrCoreName(stateManager, collection, shard, type, false);
-  }
-
-  private static boolean existCoreName(String coreName, Slice slice) {
-    if (slice == null) return false;
-    for (Replica replica : slice.getReplicas()) {
-      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
-    // TODO: add smarter options that look at the current number of cores per
-    // node?
-    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
-
-    List<String> nodeList;
-
-    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
-    final List<String> createNodeList = (createNodeSetStr == null) ? null : StrUtils.splitSmart((CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ? "" : createNodeSetStr), ",", true);
-
-    if (createNodeList != null) {
-      nodeList = new ArrayList<>(createNodeList);
-      nodeList.retainAll(liveNodes);
-      if (message.getBool(CREATE_NODE_SET_SHUFFLE, CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
-        Collections.shuffle(nodeList, random);
-      }
-    } else {
-      nodeList = new ArrayList<>(liveNodes);
-      Collections.shuffle(nodeList, random);
-    }
-
-    return nodeList;
-  }
-
-  public static List<ReplicaPosition> identifyNodes(SolrCloudManager cloudManager,
-                                                    ClusterState clusterState,
-                                                    List<String> nodeList,
-                                                    String collectionName,
-                                                    ZkNodeProps message,
-                                                    List<String> shardNames,
-                                                    int numNrtReplicas,
-                                                    int numTlogReplicas,
-                                                    int numPullReplicas) throws IOException, InterruptedException {
-    List<Map> rulesMap = (List) message.get("rule");
-    String policyName = message.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-
-    if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      log.debug("Identify nodes using default");
-      int i = 0;
-      List<ReplicaPosition> result = new ArrayList<>();
-      for (String aShard : shardNames)
-        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-            Replica.Type.TLOG, numTlogReplicas,
-            Replica.Type.PULL, numPullReplicas
-        ).entrySet()) {
-          for (int j = 0; j < e.getValue(); j++){
-            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
-            i++;
-          }
-        }
-      return result;
-    } else {
-      if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
-      }
-    }
-
-    if (rulesMap != null && !rulesMap.isEmpty()) {
-      List<Rule> rules = new ArrayList<>();
-      for (Object map : rulesMap) rules.add(new Rule((Map) map));
-      Map<String, Integer> sharVsReplicaCount = new HashMap<>();
-
-      for (String shard : shardNames) sharVsReplicaCount.put(shard, numNrtReplicas);
-      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
-          sharVsReplicaCount,
-          (List<Map>) message.get(SNITCH),
-          new HashMap<>(),//this is a new collection. So, there are no nodes in any shard
-          nodeList,
-          cloudManager,
-          clusterState);
-
-      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
-      return nodeMappings.entrySet().stream()
-          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
-          .collect(Collectors.toList());
-    } else  {
-      if (message.getStr(CREATE_NODE_SET) == null)
-        nodeList = Collections.emptyList();// unless explicitly specified do not pass node list to Policy
-      return getPositionsUsingPolicy(collectionName,
-          shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, cloudManager, nodeList);
-    }
-  }
-
-  static class ReplicaCount {
-    public final String nodeName;
-    public int thisCollectionNodes = 0;
-    public int totalNodes = 0;
-
-    ReplicaCount(String nodeName) {
-      this.nodeName = nodeName;
-    }
-
-    public int weight() {
-      return (thisCollectionNodes * 100) + totalNodes;
-    }
-  }
-
-  // Only called from createShard and addReplica (so far).
-  //
-  // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
-  // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
-  public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
-                                                          String shard, int nrtReplicas,
-                                                          Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
-    List<String> createNodeList = null;
-
-    if (createNodeSet instanceof List) {
-      createNodeList = (List) createNodeSet;
-    } else {
-      createNodeList = createNodeSet == null ? null : StrUtils.splitSmart((String) createNodeSet, ",", true);
-    }
-
-     HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
-
-    if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
-      int availableSlots = 0;
-      for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
-        //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
-        if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
-          availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
-        }
-      }
-      if (availableSlots < nrtReplicas) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
-                nrtReplicas, collectionName, maxShardsPerNode));
-      }
-    }
-
-    List l = (List) coll.get(DocCollection.RULE);
-    List<ReplicaPosition> replicaPositions = null;
-    if (l != null) {
-      // TODO: make it so that this method doesn't require access to CC
-      replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
-    }
-    String policyName = coll.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
-          policyName, cloudManager, createNodeList);
-    }
-
-    if(replicaPositions != null){
-      List<ReplicaCount> repCounts = new ArrayList<>();
-      for (ReplicaPosition p : replicaPositions) {
-        repCounts.add(new ReplicaCount(p.node));
-      }
-      return repCounts;
-    }
-
-    ArrayList<ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
-    Collections.sort(sortedNodeList, (x, y) -> (x.weight() < y.weight()) ? -1 : ((x.weight() == y.weight()) ? 0 : 1));
-    return sortedNodeList;
-
-  }
-
-  public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
-                                                              int nrtReplicas,
-                                                              int tlogReplicas,
-                                                              int pullReplicas,
-                                                              String policyName, SolrCloudManager cloudManager,
-                                                              List<String> nodesList) throws IOException, InterruptedException {
-    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
-    List<ReplicaPosition> replicaPositions = null;
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    try {
-      Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
-      replicaPositions = PolicyHelper.getReplicaLocations(
-          collName,
-          autoScalingConfig,
-          cloudManager,
-          kvMap,
-          shardNames,
-          nrtReplicas,
-          tlogReplicas,
-          pullReplicas,
-          nodesList);
-      return replicaPositions;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error getting replica locations", e);
-    } finally {
-      if (log.isTraceEnabled()) {
-        if (replicaPositions != null)
-          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
-        log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
-      }
-    }
-  }
-
-  private static List<ReplicaPosition> getNodesViaRules(ClusterState clusterState, String shard, int numberOfNodes,
-                                                        SolrCloudManager cloudManager, DocCollection coll, List<String> createNodeList, List l) {
-    ArrayList<Rule> rules = new ArrayList<>();
-    for (Object o : l) rules.add(new Rule((Map) o));
-    Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
-    for (Slice slice : coll.getSlices()) {
-      LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
-      shardVsNodes.put(slice.getName(), n);
-      for (Replica replica : slice.getReplicas()) {
-        Integer count = n.get(replica.getNodeName());
-        if (count == null) count = 0;
-        n.put(replica.getNodeName(), ++count);
-      }
-    }
-    List snitches = (List) coll.get(SNITCH);
-    List<String> nodesList = createNodeList == null ?
-        new ArrayList<>(clusterState.getLiveNodes()) :
-        createNodeList;
-    Map<ReplicaPosition, String> positions = new ReplicaAssigner(
-        rules,
-        Collections.singletonMap(shard, numberOfNodes),
-        snitches,
-        shardVsNodes,
-        nodesList, cloudManager, clusterState).getNodeMappings();
-
-    return positions.entrySet().stream().map(e -> e.getKey().setNode(e.getValue())).collect(Collectors.toList());// getReplicaCounts(positions);
-  }
-
-  private static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
-                                                                       ClusterState clusterState, List<String> createNodeList) {
-    Set<String> nodes = clusterState.getLiveNodes();
-
-    List<String> nodeList = new ArrayList<>(nodes.size());
-    nodeList.addAll(nodes);
-    if (createNodeList != null) nodeList.retainAll(createNodeList);
-
-    HashMap<String, ReplicaCount> nodeNameVsShardCount = new HashMap<>();
-    for (String s : nodeList) {
-      nodeNameVsShardCount.put(s, new ReplicaCount(s));
-    }
-    if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
-      if (createNodeList.size() != nodeNameVsShardCount.size()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "At least one of the node(s) specified " + createNodeList + " are not currently active in "
-                + nodeNameVsShardCount.keySet() + ", no action taken.");
-      }
-      return nodeNameVsShardCount;
-    }
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
-    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
-      DocCollection c = entry.getValue();
-      //identify suitable nodes  by checking the no:of cores in each of them
-      for (Slice slice : c.getSlices()) {
-        Collection<Replica> replicas = slice.getReplicas();
-        for (Replica replica : replicas) {
-          ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
-          if (count != null) {
-            count.totalNodes++; // Used ot "weigh" whether this node should be used later.
-            if (entry.getKey().equals(collectionName)) {
-              count.thisCollectionNodes++;
-              if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
-            }
-          }
-        }
-      }
-    }
-
-    return nodeNameVsShardCount;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
deleted file mode 100644
index a4012f0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-
-import org.apache.lucene.util.Version;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public BackupCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-
-    Instant startTime = Instant.now();
-
-    CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-    BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader);
-
-    // Backup location
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-
-    //Validating if the directory already exists.
-    if (repository.exists(backupPath)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath);
-    }
-
-    // Create a directory to store backup details.
-    repository.createDirectory(backupPath);
-
-    String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-    switch (strategy) {
-      case CollectionAdminParams.COPY_FILES_STRATEGY: {
-        copyIndexFiles(backupPath, message, results);
-        break;
-      }
-      case CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY: {
-        break;
-      }
-    }
-
-    log.info("Starting to backup ZK data for backupName={}", backupName);
-
-    //Download the configs
-    String configName = ocmh.zkStateReader.readConfigName(collectionName);
-    backupMgr.downloadConfigDir(location, backupName, configName);
-
-    //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json
-    //Since we don't want to distinguish we extract the state and back it up as a separate json
-    DocCollection collectionState = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-    backupMgr.writeCollectionState(location, backupName, collectionName, collectionState);
-
-    Properties properties = new Properties();
-
-    properties.put(BackupManager.BACKUP_NAME_PROP, backupName);
-    properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName);
-    properties.put(COLL_CONF, configName);
-    properties.put(BackupManager.START_TIME_PROP, startTime.toString());
-    properties.put(BackupManager.INDEX_VERSION_PROP, Version.LATEST.toString());
-    //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same.
-    //if they are not the same then we can throw an error or have an 'overwriteConfig' flag
-    //TODO save numDocs for the shardLeader. We can use it to sanity check the restore.
-
-    backupMgr.writeBackupProperties(location, backupName, properties);
-
-    log.info("Completed backing up ZK data for backupName={}", backupName);
-  }
-
-  private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
-    // The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
-    // If that is not possible, we choose any other replica for the given shard.
-    Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
-
-    Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
-    if (leaderCore.isPresent()) {
-      log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
-      Replica r = slice.getReplica(leaderCore.get().getCoreName());
-      if ((r != null) && !r.getState().equals(State.DOWN)) {
-        return r;
-      }
-    }
-
-    Optional<Replica> r = slice.getReplicas().stream()
-                               .filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x))
-                               .findFirst();
-
-    if (!r.isPresent()) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
-    }
-
-    return r.get();
-  }
-
-  private void copyIndexFiles(URI backupPath, ZkNodeProps request, NamedList results) throws Exception {
-    String collectionName = request.getStr(COLLECTION_PROP);
-    String backupName = request.getStr(NAME);
-    String asyncId = request.getStr(ASYNC);
-    String repoName = request.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    Map<String, String> requestMap = new HashMap<>();
-
-    String commitName = request.getStr(CoreAdminParams.COMMIT_NAME);
-    Optional<CollectionSnapshotMetaData> snapshotMeta = Optional.empty();
-    if (commitName != null) {
-      SolrZkClient zkClient = ocmh.overseer.getZkController().getZkClient();
-      snapshotMeta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      if (!snapshotMeta.isPresent()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-            + " does not exist for collection " + collectionName);
-      }
-      if (snapshotMeta.get().getStatus() != SnapshotStatus.Successful) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName + " for collection " + collectionName
-            + " has not completed successfully. The status is " + snapshotMeta.get().getStatus());
-      }
-    }
-
-    log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
-        backupPath);
-
-    Collection<String> shardsToConsider = Collections.emptySet();
-    if (snapshotMeta.isPresent()) {
-      shardsToConsider = snapshotMeta.get().getShards();
-    }
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getActiveSlices()) {
-      Replica replica = null;
-
-      if (snapshotMeta.isPresent()) {
-        if (!shardsToConsider.contains(slice.getName())) {
-          log.warn("Skipping the backup for shard {} since it wasn't part of the collection {} when snapshot {} was created.",
-              slice.getName(), collectionName, snapshotMeta.get().getName());
-          continue;
-        }
-        replica = selectReplicaWithSnapshot(snapshotMeta.get(), slice);
-      } else {
-        // Note - Actually this can return a null value when there is no leader for this shard.
-        replica = slice.getLeader();
-        if (replica == null) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "No 'leader' replica available for shard " + slice.getName() + " of collection " + collectionName);
-        }
-      }
-
-      String coreName = replica.getStr(CORE_NAME_PROP);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
-      params.set(NAME, slice.getName());
-      params.set(CoreAdminParams.BACKUP_REPOSITORY, repoName);
-      params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
-      params.set(CORE_NAME_PROP, coreName);
-      if (snapshotMeta.isPresent()) {
-        params.set(CoreAdminParams.COMMIT_NAME, snapshotMeta.get().getName());
-      }
-
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-      log.debug("Sent backup request to core={} for backupName={}", coreName, backupName);
-    }
-    log.debug("Sent backup requests to all shard leaders for backupName={}", backupName);
-
-    ocmh.processResponses(results, shardHandler, true, "Could not backup all replicas", asyncId, requestMap);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
index 3cdc903..9b16d23 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
@@ -18,6 +18,7 @@ package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
 
+import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.ConfigSetService;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index 30de3d4..0d45129 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -132,7 +132,7 @@ public class CloudUtil {
 
   }
 
-  static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
+  public static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
       throws IOException, InterruptedException {
     AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
     return !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || collection.getPolicyName() != null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
deleted file mode 100644
index e10d53e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
+++ /dev/null
@@ -1,101 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-public class CreateAliasCmd implements Cmd {
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results)
-      throws Exception {
-    final String aliasName = message.getStr(NAME);
-    final List<String> canonicalCollectionList = parseCollectionsParameter(message.get("collections"));
-    final String canonicalCollectionsString = StrUtils.join(canonicalCollectionList, ',');
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    validateAllCollectionsExistAndNoDups(canonicalCollectionList, zkStateReader);
-
-    zkStateReader.aliasesHolder.applyModificationAndExportToZk(aliases -> aliases.cloneWithCollectionAlias(aliasName, canonicalCollectionsString));
-
-    // Sleep a bit to allow ZooKeeper state propagation.
-    //
-    // THIS IS A KLUDGE.
-    //
-    // Solr's view of the cluster is eventually consistent. *Eventually* all nodes and CloudSolrClients will be aware of
-    // alias changes, but not immediately. If a newly created alias is queried, things should work right away since Solr
-    // will attempt to see if it needs to get the latest aliases when it can't otherwise resolve the name.  However
-    // modifications to an alias will take some time.
-    //
-    // We could levy this requirement on the client but they would probably always add an obligatory sleep, which is
-    // just kicking the can down the road.  Perhaps ideally at this juncture here we could somehow wait until all
-    // Solr nodes in the cluster have the latest aliases?
-    Thread.sleep(100);
-  }
-
-  private void validateAllCollectionsExistAndNoDups(List<String> collectionList, ZkStateReader zkStateReader) {
-    final String collectionStr = StrUtils.join(collectionList, ',');
-
-    if (new HashSet<>(collectionList).size() != collectionList.size()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
-    }
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
-    for (String collection : collectionList) {
-      if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
-      }
-    }
-  }
-  
-  /**
-   * The v2 API directs that the 'collections' parameter be provided as a JSON array (e.g. ["a", "b"]).  We also
-   * maintain support for the legacy format, a comma-separated list (e.g. a,b).
-   */
-  @SuppressWarnings("unchecked")
-  private List<String> parseCollectionsParameter(Object colls) {
-    if (colls == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "missing collections param");
-    if (colls instanceof List) return (List<String>) colls;
-    return StrUtils.splitSmart(colls.toString(), ",", true).stream()
-        .map(String::trim)
-        .collect(Collectors.toList());
-  }
-
-}