You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by va...@apache.org on 2018/01/16 19:05:13 UTC
[07/15] lucene-solr:master: SOLR-11817: Move Collections API classes
to it's own package
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
new file mode 100644
index 0000000..8cfd0bd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.OverseerSolrResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor;
+import org.apache.solr.util.TimeZoneUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_FIELD_METADATA;
+import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_INTERVAL_METADATA;
+
+/**
+ * For "routed aliases", creates another collection and adds it to the alias. In some cases it will not
+ * add a new collection.
+ * If a collection is created, then collection creation info is returned.
+ *
+ * Note: this logic is within an Overseer because we want to leverage the mutual exclusion
+ * property afforded by the lock it obtains on the alias name.
+ * @since 7.3
+ */
+public class RoutedAliasCreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ public static final String IF_MOST_RECENT_COLL_NAME = "ifMostRecentCollName";
+
+ public static final String COLL_METAPREFIX = "collection-create.";
+
+ private final OverseerCollectionMessageHandler ocmh;
+
+ public RoutedAliasCreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+ this.ocmh = ocmh;
+ }
+
+ /* TODO:
+ There are a few classes related to time routed alias processing. We need to share some logic better.
+ */
+
+
+ @Override
+ public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+ //---- PARSE PRIMARY MESSAGE PARAMS
+ // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
+ final String aliasName = message.getStr(NAME);
+ // the client believes this is the mostRecent collection name. We assert this if provided.
+ final String ifMostRecentCollName = message.getStr(IF_MOST_RECENT_COLL_NAME); // optional
+
+ // TODO collection param (or intervalDateMath override?), useful for data capped collections
+
+ //---- PARSE ALIAS INFO FROM ZK
+ final ZkStateReader.AliasesManager aliasesHolder = ocmh.zkStateReader.aliasesHolder;
+ final Aliases aliases = aliasesHolder.getAliases();
+ final Map<String, String> aliasMetadata = aliases.getCollectionAliasMetadata(aliasName);
+ if (aliasMetadata == null) {
+ throw newAliasMustExistException(aliasName); // if it did exist, we'd have a non-null map
+ }
+
+ String routeField = aliasMetadata.get(ROUTER_FIELD_METADATA);
+ if (routeField == null) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "This command only works on time routed aliases. Expected alias metadata not found.");
+ }
+ String intervalDateMath = aliasMetadata.getOrDefault(ROUTER_INTERVAL_METADATA, "+1DAY");
+ TimeZone intervalTimeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
+
+ //TODO this is ugly; how can we organize the code related to this feature better?
+ final List<Map.Entry<Instant, String>> parsedCollections =
+ TimeRoutedAliasUpdateProcessor.parseCollections(aliasName, aliases, () -> newAliasMustExistException(aliasName));
+
+ //---- GET MOST RECENT COLL
+ final Map.Entry<Instant, String> mostRecentEntry = parsedCollections.get(0);
+ final Instant mostRecentCollTimestamp = mostRecentEntry.getKey();
+ final String mostRecentCollName = mostRecentEntry.getValue();
+ if (ifMostRecentCollName != null) {
+ if (!mostRecentCollName.equals(ifMostRecentCollName)) {
+ // Possibly due to race conditions in URPs on multiple leaders calling us at the same time
+ String msg = IF_MOST_RECENT_COLL_NAME + " expected " + ifMostRecentCollName + " but it's " + mostRecentCollName;
+ if (parsedCollections.stream().map(Map.Entry::getValue).noneMatch(ifMostRecentCollName::equals)) {
+ msg += ". Furthermore this collection isn't in the list of collections referenced by the alias.";
+ }
+ log.info(msg);
+ results.add("message", msg);
+ return;
+ }
+ } else if (mostRecentCollTimestamp.isAfter(Instant.now())) {
+ final String msg = "Most recent collection is in the future, so we won't create another.";
+ log.info(msg);
+ results.add("message", msg);
+ return;
+ }
+
+ //---- COMPUTE NEXT COLLECTION NAME
+ final Instant nextCollTimestamp = TimeRoutedAliasUpdateProcessor.computeNextCollTimestamp(mostRecentCollTimestamp, intervalDateMath, intervalTimeZone);
+ assert nextCollTimestamp.isAfter(mostRecentCollTimestamp);
+ final String createCollName = TimeRoutedAliasUpdateProcessor.formatCollectionNameFromInstant(aliasName, nextCollTimestamp);
+
+ //---- CREATE THE COLLECTION
+ // Map alias metadata starting with a prefix to a create-collection API request
+ final ModifiableSolrParams createReqParams = new ModifiableSolrParams();
+ for (Map.Entry<String, String> e : aliasMetadata.entrySet()) {
+ if (e.getKey().startsWith(COLL_METAPREFIX)) {
+ createReqParams.set(e.getKey().substring(COLL_METAPREFIX.length()), e.getValue());
+ }
+ }
+ if (createReqParams.get(COLL_CONF) == null) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+ "We require an explicit " + COLL_CONF );
+ }
+ createReqParams.set(NAME, createCollName);
+ createReqParams.set("property." + TimeRoutedAliasUpdateProcessor.TIME_PARTITION_ALIAS_NAME_CORE_PROP, aliasName);
+ // a CollectionOperation reads params and produces a message (Map) that is supposed to be sent to the Overseer.
+ // Although we could create the Map without it, there are a fair amount of rules we don't want to reproduce.
+ final Map<String, Object> createMsgMap = CollectionsHandler.CollectionOperation.CREATE_OP.execute(
+ new LocalSolrQueryRequest(null, createReqParams),
+ null,
+ ocmh.overseer.getCoreContainer().getCollectionsHandler());
+ createMsgMap.put(Overseer.QUEUE_OPERATION, "create");
+ // Since we are running in the Overseer here, send the message directly to the Overseer CreateCollectionCmd
+ ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, new ZkNodeProps(createMsgMap), results);
+
+ CollectionsHandler.waitForActiveCollection(createCollName, null, ocmh.overseer.getCoreContainer(), new OverseerSolrResponse(results));
+
+ //TODO delete some of the oldest collection(s) ?
+
+ //---- UPDATE THE ALIAS
+ aliasesHolder.applyModificationAndExportToZk(curAliases -> {
+ final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
+ if (curTargetCollections.contains(createCollName)) {
+ return curAliases;
+ } else {
+ List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
+ // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
+ newTargetCollections.add(createCollName);
+ newTargetCollections.addAll(curTargetCollections);
+ return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
+ }
+ });
+
+ }
+
+ private SolrException newAliasMustExistException(String aliasName) {
+ return new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Alias " + aliasName + " does not exist.");
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
new file mode 100644
index 0000000..03e7430
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -0,0 +1,540 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.PlainIdRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.util.TestInjection;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+
+public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ private final OverseerCollectionMessageHandler ocmh;
+
+ public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
+ this.ocmh = ocmh;
+ }
+
+ @Override
+ public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+ split(state, message, results);
+ }
+
+ public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+ boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+ String collectionName = message.getStr(CoreAdminParams.COLLECTION);
+
+ log.info("Split shard invoked");
+ ZkStateReader zkStateReader = ocmh.zkStateReader;
+ zkStateReader.forceUpdateCollection(collectionName);
+ AtomicReference<String> slice = new AtomicReference<>();
+ slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
+
+ String splitKey = message.getStr("split.key");
+ DocCollection collection = clusterState.getCollection(collectionName);
+
+ PolicyHelper.SessionWrapper sessionWrapper = null;
+
+ Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
+
+ // find the leader for the shard
+ Replica parentShardLeader = null;
+ try {
+ parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+
+ // let's record the ephemeralOwner of the parent leader node
+ Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
+ if (leaderZnodeStat == null) {
+ // we just got to know the leader but its live node is gone already!
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
+ }
+
+ List<DocRouter.Range> subRanges = new ArrayList<>();
+ List<String> subSlices = new ArrayList<>();
+ List<String> subShardNames = new ArrayList<>();
+
+ String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames);
+
+ try {
+
+ boolean oldShardsDeleted = false;
+ for (String subSlice : subSlices) {
+ Slice oSlice = collection.getSlice(subSlice);
+ if (oSlice != null) {
+ final Slice.State state = oSlice.getState();
+ if (state == Slice.State.ACTIVE) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
+ } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
+ // delete the shards
+ log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
+ propMap.put(COLLECTION_PROP, collectionName);
+ propMap.put(SHARD_ID_PROP, subSlice);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ try {
+ ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
+ } catch (Exception e) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+ e);
+ }
+
+ oldShardsDeleted = true;
+ }
+ }
+ }
+
+ if (oldShardsDeleted) {
+ // refresh the locally cached cluster state
+ // we know we have the latest because otherwise deleteshard would have failed
+ clusterState = zkStateReader.getClusterState();
+ collection = clusterState.getCollection(collectionName);
+ }
+
+ final String asyncId = message.getStr(ASYNC);
+ Map<String, String> requestMap = new HashMap<>();
+ String nodeName = parentShardLeader.getNodeName();
+
+ for (int i = 0; i < subRanges.size(); i++) {
+ String subSlice = subSlices.get(i);
+ String subShardName = subShardNames.get(i);
+ DocRouter.Range subRange = subRanges.get(i);
+
+ log.info("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
+
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
+ propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
+ propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+ propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
+ propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
+ propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
+ propMap.put("shard_parent_node", parentShardLeader.getNodeName());
+ propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+ inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+
+ // wait until we are able to see the new shard in cluster state
+ ocmh.waitForNewShard(collectionName, subSlice);
+
+ // refresh cluster state
+ clusterState = zkStateReader.getClusterState();
+
+ log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
+ + " on " + nodeName);
+ propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+ propMap.put(COLLECTION_PROP, collectionName);
+ propMap.put(SHARD_ID_PROP, subSlice);
+ propMap.put("node", nodeName);
+ propMap.put(CoreAdminParams.NAME, subShardName);
+ propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+ // copy over property params:
+ for (String key : message.keySet()) {
+ if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+ propMap.put(key, message.getStr(key));
+ }
+ }
+ // add async param
+ if (asyncId != null) {
+ propMap.put(ASYNC, asyncId);
+ }
+ ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
+ }
+
+ ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+ ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard leaders", asyncId, requestMap);
+
+ for (String subShardName : subShardNames) {
+ // wait for parent leader to acknowledge the sub-shard core
+ log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
+ String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
+ CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
+ cmd.setCoreName(subShardName);
+ cmd.setNodeName(nodeName);
+ cmd.setCoreNodeName(coreNodeName);
+ cmd.setState(Replica.State.ACTIVE);
+ cmd.setCheckLive(true);
+ cmd.setOnlyIfLeader(true);
+
+ ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
+ ocmh.sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
+ }
+
+ ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
+ asyncId, requestMap);
+
+ log.info("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
+ + " on: " + parentShardLeader);
+
+ log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
+ + collectionName + " on " + parentShardLeader);
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
+ params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
+ for (int i = 0; i < subShardNames.size(); i++) {
+ String subShardName = subShardNames.get(i);
+ params.add(CoreAdminParams.TARGET_CORE, subShardName);
+ }
+ params.set(CoreAdminParams.RANGES, rangesStr);
+
+ ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+ ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT core admin command", asyncId,
+ requestMap);
+
+ log.info("Index on shard: " + nodeName + " split into two successfully");
+
+ // apply buffered updates on sub-shards
+ for (int i = 0; i < subShardNames.size(); i++) {
+ String subShardName = subShardNames.get(i);
+
+ log.info("Applying buffered updates on : " + subShardName);
+
+ params = new ModifiableSolrParams();
+ params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+ params.set(CoreAdminParams.NAME, subShardName);
+
+ ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
+ }
+
+ ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
+ " to apply buffered updates", asyncId, requestMap);
+
+ log.info("Successfully applied buffered updates on : " + subShardNames);
+
+ // Replica creation for the new Slices
+
+ // look at the replication factor and see if it matches reality
+ // if it does not, find best nodes to create more cores
+
+ // TODO: Have replication factor decided in some other way instead of numShards for the parent
+
+ int repFactor = parentSlice.getReplicas().size();
+
+ // we need to look at every node and see how many cores it serves
+ // add our new cores to existing nodes serving the least number of cores
+ // but (for now) require that each core goes on a distinct node.
+
+ // TODO: add smarter options that look at the current number of cores per
+ // node?
+ // for now we just go random
+ Set<String> nodes = clusterState.getLiveNodes();
+ List<String> nodeList = new ArrayList<>(nodes.size());
+ nodeList.addAll(nodes);
+
+ // TODO: Have maxShardsPerNode param for this operation?
+
+ // Remove the node that hosts the parent shard for replica creation.
+ nodeList.remove(nodeName);
+
+ // TODO: change this to handle sharding a slice into > 2 sub-shards.
+
+ List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
+ clusterState,
+ new ArrayList<>(clusterState.getLiveNodes()),
+ collectionName,
+ new ZkNodeProps(collection.getProperties()),
+ subSlices, repFactor - 1, 0, 0);
+ sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
+
+ List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
+
+ for (ReplicaPosition replicaPosition : replicaPositions) {
+ String sliceName = replicaPosition.shard;
+ String subShardNodeName = replicaPosition.node;
+ String solrCoreName = collectionName + "_" + sliceName + "_replica" + (replicaPosition.index);
+
+ log.info("Creating replica shard " + solrCoreName + " as part of slice " + sliceName + " of collection "
+ + collectionName + " on " + subShardNodeName);
+
+ ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
+ ZkStateReader.COLLECTION_PROP, collectionName,
+ ZkStateReader.SHARD_ID_PROP, sliceName,
+ ZkStateReader.CORE_NAME_PROP, solrCoreName,
+ ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+ ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(subShardNodeName),
+ ZkStateReader.NODE_NAME_PROP, subShardNodeName,
+ CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+ Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+
+ HashMap<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+ propMap.put(COLLECTION_PROP, collectionName);
+ propMap.put(SHARD_ID_PROP, sliceName);
+ propMap.put("node", subShardNodeName);
+ propMap.put(CoreAdminParams.NAME, solrCoreName);
+ // copy over property params:
+ for (String key : message.keySet()) {
+ if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+ propMap.put(key, message.getStr(key));
+ }
+ }
+ // add async param
+ if (asyncId != null) {
+ propMap.put(ASYNC, asyncId);
+ }
+ // special flag param to instruct addReplica not to create the replica in cluster state again
+ propMap.put(OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
+
+ propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+
+ replicas.add(propMap);
+ }
+
+ assert TestInjection.injectSplitFailureBeforeReplicaCreation();
+
+ long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
+ // compare against the ephemeralOwner of the parent leader node
+ leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
+ if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
+ // put sub-shards in recovery_failed state
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+ for (String subSlice : subSlices) {
+ propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
+ }
+ propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ inQueue.offer(Utils.toJSON(m));
+
+ if (leaderZnodeStat == null) {
+ // the leader is not live anymore, fail the split!
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
+ } else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
+ // there's a new leader, fail the split!
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+ "The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
+ + ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
+ }
+ }
+
+ // we must set the slice state into recovery before actually creating the replica cores
+ // this ensures that the logic inside Overseer to update sub-shard state to 'active'
+ // always gets a chance to execute. See SOLR-7673
+
+ if (repFactor == 1) {
+ // switch sub shard states to 'active'
+ log.info("Replication factor is 1 so switching shard states");
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+ propMap.put(slice.get(), Slice.State.INACTIVE.toString());
+ for (String subSlice : subSlices) {
+ propMap.put(subSlice, Slice.State.ACTIVE.toString());
+ }
+ propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ inQueue.offer(Utils.toJSON(m));
+ } else {
+ log.info("Requesting shard state be set to 'recovery'");
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+ Map<String, Object> propMap = new HashMap<>();
+ propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+ for (String subSlice : subSlices) {
+ propMap.put(subSlice, Slice.State.RECOVERY.toString());
+ }
+ propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+ ZkNodeProps m = new ZkNodeProps(propMap);
+ inQueue.offer(Utils.toJSON(m));
+ }
+
+ // now actually create replica cores on sub shard nodes
+ for (Map<String, Object> replica : replicas) {
+ ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
+ }
+
+ ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard replicas", asyncId, requestMap);
+
+ log.info("Successfully created all replica shards for all sub-slices " + subSlices);
+
+ ocmh.commit(results, slice.get(), parentShardLeader);
+
+ return true;
+ } catch (SolrException e) {
+ throw e;
+ } catch (Exception e) {
+ log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
+ } finally {
+ if (sessionWrapper != null) sessionWrapper.release();
+ }
+ }
+
+ public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
+ DocCollection collection = clusterState.getCollection(collectionName);
+ DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+
+ Slice parentSlice;
+
+ if (slice.get() == null) {
+ if (router instanceof CompositeIdRouter) {
+ Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
+ if (searchSlices.isEmpty()) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
+ }
+ if (searchSlices.size() > 1) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
+ }
+ parentSlice = searchSlices.iterator().next();
+ slice.set(parentSlice.getName());
+ log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
+ } else {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
+ + router.getClass().getName());
+ }
+ } else {
+ parentSlice = collection.getSlice(slice.get());
+ }
+
+ if (parentSlice == null) {
+ // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
+ // an exception already
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
+ }
+ return parentSlice;
+ }
+
+ public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
+ List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames) {
+ String splitKey = message.getStr("split.key");
+ DocRouter.Range range = parentSlice.getRange();
+ if (range == null) {
+ range = new PlainIdRouter().fullRange();
+ }
+ DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+
+ String rangesStr = message.getStr(CoreAdminParams.RANGES);
+ if (rangesStr != null) {
+ String[] ranges = rangesStr.split(",");
+ if (ranges.length == 0 || ranges.length == 1) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
+ } else {
+ for (int i = 0; i < ranges.length; i++) {
+ String r = ranges[i];
+ try {
+ subRanges.add(DocRouter.DEFAULT.fromString(r));
+ } catch (Exception e) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
+ }
+ if (!subRanges.get(i).isSubsetOf(range)) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
+ }
+ }
+ List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
+ Collections.sort(temp);
+ if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+ "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
+ }
+ for (int i = 1; i < temp.size(); i++) {
+ if (temp.get(i - 1).max + 1 != temp.get(i).min) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
+ + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
+ }
+ }
+ }
+ } else if (splitKey != null) {
+ if (router instanceof CompositeIdRouter) {
+ CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
+ List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
+ if (tmpSubRanges.size() == 1) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
+ + " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
+ }
+ for (DocRouter.Range subRange : tmpSubRanges) {
+ if (subRange.min == subRange.max) {
+ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
+ }
+ }
+ subRanges.addAll(tmpSubRanges);
+ log.info("Partitioning parent shard " + parentSlice.getName() + " range: " + parentSlice.getRange() + " yields: " + subRanges);
+ rangesStr = "";
+ for (int i = 0; i < subRanges.size(); i++) {
+ DocRouter.Range subRange = subRanges.get(i);
+ rangesStr += subRange.toString();
+ if (i < subRanges.size() - 1) rangesStr += ',';
+ }
+ }
+ } else {
+ // todo: fixed to two partitions?
+ subRanges.addAll(router.partitionRange(2, range));
+ }
+
+ for (int i = 0; i < subRanges.size(); i++) {
+ String subSlice = parentSlice.getName() + "_" + i;
+ subSlices.add(subSlice);
+ String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice, Replica.Type.NRT);
+ subShardNames.add(subShardName);
+ }
+ return rangesStr;
+ }
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
new file mode 100644
index 0000000..60da61a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.params.AutoScalingParams.NODE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class UtilizeNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private final OverseerCollectionMessageHandler ocmh;
+
+ public UtilizeNodeCmd(OverseerCollectionMessageHandler ocmh) {
+ this.ocmh = ocmh;
+ }
+
+ @Override
+ public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+ ocmh.checkRequired(message, NODE);
+ String nodeName = message.getStr(NODE);
+ String async = message.getStr(ASYNC);
+ AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
+
+ //first look for any violation that may use this replica
+ List<ZkNodeProps> requests = new ArrayList<>();
+ //first look for suggestions if any
+ List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, ocmh.overseer.getSolrCloudManager());
+ for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
+ log.info("op: " + suggestionInfo.getOperation());
+ String coll = null;
+ List<String> pieces = StrUtils.splitSmart(suggestionInfo.getOperation().getPath(), '/');
+ if (pieces.size() > 1) {
+ coll = pieces.get(2);
+ } else {
+ continue;
+ }
+ log.info("coll: " + coll);
+ if (suggestionInfo.getOperation() instanceof V2Request) {
+ String targetNode = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/targetNode");
+ if (Objects.equals(targetNode, nodeName)) {
+ String replica = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/replica");
+ requests.add(new ZkNodeProps(COLLECTION_PROP, coll,
+ CollectionParams.TARGET_NODE, targetNode,
+ ASYNC, async,
+ REPLICA_PROP, replica));
+ }
+ }
+ }
+ executeAll(requests);
+ PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(ocmh.overseer.getSolrCloudManager());
+ Policy.Session session = sessionWrapper.get();
+ for (; ; ) {
+ Suggester suggester = session.getSuggester(MOVEREPLICA)
+ .hint(Suggester.Hint.TARGET_NODE, nodeName);
+ session = suggester.getSession();
+ SolrRequest request = suggester.getSuggestion();
+ if (request == null) break;
+ requests.add(new ZkNodeProps(COLLECTION_PROP, request.getParams().get(COLLECTION_PROP),
+ CollectionParams.TARGET_NODE, request.getParams().get(CollectionParams.TARGET_NODE),
+ REPLICA_PROP, request.getParams().get(REPLICA_PROP),
+ ASYNC, request.getParams().get(ASYNC)));
+ }
+ sessionWrapper.returnSession(session);
+ try {
+ executeAll(requests);
+ } finally {
+ sessionWrapper.release();
+ }
+ }
+
+ private void executeAll(List<ZkNodeProps> requests) throws Exception {
+ if (requests.isEmpty()) return;
+ for (ZkNodeProps props : requests) {
+ NamedList result = new NamedList();
+ ocmh.commandMap.get(MOVEREPLICA)
+ .call(ocmh.overseer.getSolrCloudManager().getClusterStateProvider().getClusterState(),
+ props,
+ result);
+ }
+ requests.clear();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
new file mode 100644
index 0000000..651d4fe
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package related to internal implementations of the SolrCloud collections api
+ */
+package org.apache.solr.cloud.api.collections;
+
+
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
index 55d6a7e..e5303de 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
@@ -26,7 +26,7 @@ import java.util.Map;
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index dbcdd3d..f2c9a2f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -17,7 +17,6 @@
package org.apache.solr.cloud.overseer;
import java.lang.invoke.MethodHandles;
-
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
@@ -31,9 +30,9 @@ import org.apache.commons.lang.StringUtils;
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.Assign;
import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@@ -45,7 +44,6 @@ import org.apache.solr.common.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
import static org.apache.solr.cloud.overseer.CollectionMutator.checkKeyExistence;
import static org.apache.solr.common.params.CommonParams.NAME;
@@ -113,7 +111,7 @@ public class ReplicaMutator {
String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
- if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
+ if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
}
property = property.toLowerCase(Locale.ROOT);
@@ -177,7 +175,7 @@ public class ReplicaMutator {
String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
- if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
+ if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
}
@@ -284,7 +282,7 @@ public class ReplicaMutator {
replicaProps.put(ZkStateReader.REPLICA_TYPE, oldReplica.getType().toString());
// Move custom props over.
for (Map.Entry<String, Object> ent : oldReplica.getProperties().entrySet()) {
- if (ent.getKey().startsWith(COLL_PROP_PREFIX)) {
+ if (ent.getKey().startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
replicaProps.put(ent.getKey(), ent.getValue());
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index 6718a80..87bf481 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -16,20 +16,18 @@
*/
package org.apache.solr.cloud.overseer;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
-import static org.apache.solr.common.util.Utils.makeMap;
-
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
+import com.google.common.collect.ImmutableSet;
import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.Assign;
import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
@@ -41,12 +39,13 @@ import org.apache.solr.common.cloud.ZkStateReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.ImmutableSet;
+import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
+import static org.apache.solr.common.util.Utils.makeMap;
public class SliceMutator {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- public static final String PREFERRED_LEADER_PROP = COLL_PROP_PREFIX + "preferredleader";
+ public static final String PREFERRED_LEADER_PROP = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + "preferredleader";
public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 74d4764..56f979d 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -42,7 +42,7 @@ import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
import org.apache.solr.client.solrj.response.RequestStatusState;
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.cloud.OverseerSolrResponse;
import org.apache.solr.cloud.OverseerTaskQueue;
import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
@@ -100,17 +100,17 @@ import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND
import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING;
import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED;
import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.REQUESTID;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
import static org.apache.solr.common.cloud.DocCollection.RULE;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
index bc242ba..6f71acc 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
@@ -42,7 +42,7 @@ import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.RoutedAliasCreateCollectionCmd;
+import org.apache.solr.cloud.api.collections.RoutedAliasCreateCollectionCmd;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.Aliases;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
deleted file mode 100644
index a90783a..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-import java.util.TreeMap;
-
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the logic required to test Solr cloud backup/restore capability.
- */
-public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
-
- int replFactor;
- int numTlogReplicas;
- int numPullReplicas;
-
- private static long docsSeed; // see indexDocs()
-
- @BeforeClass
- public static void createCluster() throws Exception {
- docsSeed = random().nextLong();
- }
-
- /**
- * @return The name of the collection to use.
- */
- public abstract String getCollectionName();
-
- /**
- * @return The name of the backup repository to use.
- */
- public abstract String getBackupRepoName();
-
- /**
- * @return The absolute path for the backup location.
- * Could return null.
- */
- public abstract String getBackupLocation();
-
- @Test
- public void test() throws Exception {
- boolean isImplicit = random().nextBoolean();
- boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
- replFactor = TestUtil.nextInt(random(), 1, 2);
- numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
- numPullReplicas = TestUtil.nextInt(random(), 0, 1);
-
- CollectionAdminRequest.Create create = isImplicit ?
- // NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
- CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
- CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
-
- if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
- create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
- if (doSplitShardOperation) {
- create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
- }
- }
- if (random().nextBoolean()) {
- create.setAutoAddReplicas(true);//just to assert it survives the restoration
- }
- Properties coreProps = new Properties();
- coreProps.put("customKey", "customValue");//just to assert it survives the restoration
- create.setProperties(coreProps);
- if (isImplicit) { //implicit router
- create.setRouterField("shard_s");
- } else {//composite id router
- if (random().nextBoolean()) {
- create.setRouterField("shard_s");
- }
- }
-
- CloudSolrClient solrClient = cluster.getSolrClient();
- create.process(solrClient);
-
- indexDocs(getCollectionName());
-
- if (doSplitShardOperation) {
- // shard split the first shard
- int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
- CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
- splitShard.setShardName("shard1");
- splitShard.process(solrClient);
- // wait until we see one more active slice...
- for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) {
- assertTrue(i < 30);
- Thread.sleep(500);
- }
- // issue a hard commit. Split shard does a soft commit which isn't good enough for the backup/snapshooter to see
- solrClient.commit(getCollectionName());
- }
-
- testBackupAndRestore(getCollectionName());
- testConfigBackupOnly("conf1", getCollectionName());
- testInvalidPath(getCollectionName());
- }
-
- /**
- * This test validates the backup of collection configuration using
- * {@linkplain CollectionAdminParams#NO_INDEX_BACKUP_STRATEGY}.
- *
- * @param configName The config name for the collection to be backed up.
- * @param collectionName The name of the collection to be backed up.
- * @throws Exception in case of errors.
- */
- protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
- // This is deliberately no-op since we want to run this test only for one of the backup repository
- // implementation (mainly to avoid redundant test execution). Currently HDFS backup repository test
- // implements this.
- }
-
- // This test verifies the system behavior when the backup location cluster property is configured with an invalid
- // value for the specified repository (and the default backup location is not configured in solr.xml).
- private void testInvalidPath(String collectionName) throws Exception {
- // Execute this test only if the default backup location is NOT configured in solr.xml
- if (getBackupLocation() == null) {
- return;
- }
-
- String backupName = "invalidbackuprequest";
- CloudSolrClient solrClient = cluster.getSolrClient();
-
- ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist");
- assertEquals(0, req.process(solrClient).getStatus());
-
- // Do not specify the backup location.
- CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
- .setRepositoryName(getBackupRepoName());
- try {
- backup.process(solrClient);
- fail("This request should have failed since the cluster property value for backup location property is invalid.");
- } catch (SolrException ex) {
- assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
- }
-
- String restoreCollectionName = collectionName + "_invalidrequest";
- CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
- .setRepositoryName(getBackupRepoName());
- try {
- restore.process(solrClient);
- fail("This request should have failed since the cluster property value for backup location property is invalid.");
- } catch (SolrException ex) {
- assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
- }
- }
-
- private int getActiveSliceCount(String collectionName) {
- return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName).getActiveSlices().size();
- }
-
- private void indexDocs(String collectionName) throws Exception {
- Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
- int numDocs = random.nextInt(100);
- if (numDocs == 0) {
- log.info("Indexing ZERO test docs");
- return;
- }
- List<SolrInputDocument> docs = new ArrayList<>(numDocs);
- for (int i=0; i<numDocs; i++) {
- SolrInputDocument doc = new SolrInputDocument();
- doc.addField("id", i);
- doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
- docs.add(doc);
- }
- CloudSolrClient client = cluster.getSolrClient();
- client.add(collectionName, docs);// batch
- client.commit(collectionName);
- }
-
- private void testBackupAndRestore(String collectionName) throws Exception {
- String backupLocation = getBackupLocation();
- String backupName = "mytestbackup";
-
- CloudSolrClient client = cluster.getSolrClient();
- DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
-
- Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
- assert origShardToDocCount.isEmpty() == false;
-
- log.info("Triggering Backup command");
-
- {
- CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
- .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
- if (random().nextBoolean()) {
- assertEquals(0, backup.process(client).getStatus());
- } else {
- assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));//async
- }
- }
-
- log.info("Triggering Restore command");
-
- String restoreCollectionName = collectionName + "_restored";
- boolean sameConfig = random().nextBoolean();
-
- {
- CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
- .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
-
-
- //explicitly specify the replicationFactor/pullReplicas/nrtReplicas/tlogReplicas .
- //Value is still the same as the original. maybe test with different values that the original for better test coverage
- if (random().nextBoolean()) {
- restore.setReplicationFactor(replFactor);
- }
- if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
- // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
- restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
- }
-
-
- if (rarely()) { // Try with createNodeSet configuration
- int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
- List<String> nodeStrs = new ArrayList<>(nodeSetSize);
- Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
- for (int i = 0; i < nodeSetSize ; i++) {
- nodeStrs.add(iter.next().getNodeName());
- }
- restore.setCreateNodeSet(String.join(",", nodeStrs));
- restore.setCreateNodeSetShuffle(usually());
- // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
- if (restore.getMaxShardsPerNode() != null) {
- restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
- } else {
- restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
- }
- }
-
- Properties props = new Properties();
- props.setProperty("customKey", "customVal");
- restore.setProperties(props);
-
- if (sameConfig==false) {
- restore.setConfigName("customConfigName");
- }
- if (random().nextBoolean()) {
- assertEquals(0, restore.process(client).getStatus());
- } else {
- assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));//async
- }
- AbstractDistribZkTestBase.waitForRecoveriesToFinish(
- restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
- }
-
- //Check the number of results are the same
- DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
- assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
- //Re-index same docs (should be identical docs given same random seed) and test we have the same result. Helps
- // test we reconstituted the hash ranges / doc router.
- if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
- indexDocs(restoreCollectionName);
- assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
- }
-
- assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
- assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
- assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(),
- restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
- assertEquals(sameConfig ? "conf1" : "customConfigName",
- cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
-
- Map<String, Integer> numReplicasByNodeName = new HashMap<>();
- restoreCollection.getReplicas().forEach(x -> {
- numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
- });
- numReplicasByNodeName.forEach((k, v) -> {
- assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode() ,
- v <= restoreCollection.getMaxShardsPerNode());
- });
-
- assertEquals("Different count of nrtReplicas. Backup collection state=" + backupCollection + "\nRestore " +
- "collection state=" + restoreCollection, replFactor, restoreCollection.getNumNrtReplicas().intValue());
- assertEquals("Different count of pullReplicas. Backup collection state=" + backupCollection + "\nRestore" +
- " collection state=" + restoreCollection, numPullReplicas, restoreCollection.getNumPullReplicas().intValue());
- assertEquals("Different count of TlogReplica. Backup collection state=" + backupCollection + "\nRestore" +
- " collection state=" + restoreCollection, numTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
-
- assertEquals("Restore collection should use stateFormat=2", 2, restoreCollection.getStateFormat());
-
-
- // assert added core properties:
- // DWS: did via manual inspection.
- // TODO Find the applicable core.properties on the file system but how?
- }
-
- private Map<String, Integer> getShardToDocCountMap(CloudSolrClient client, DocCollection docCollection) throws SolrServerException, IOException {
- Map<String,Integer> shardToDocCount = new TreeMap<>();
- for (Slice slice : docCollection.getActiveSlices()) {
- String shardName = slice.getName();
- try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
- long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
- .getResults().getNumFound();
- shardToDocCount.put(shardName, (int) docsInShard);
- }
- }
- return shardToDocCount;
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
deleted file mode 100644
index cf26de4..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class AssignTest extends SolrTestCaseJ4 {
-
- @Override
- @Before
- public void setUp() throws Exception {
- super.setUp();
-
- }
-
- @Override
- @After
- public void tearDown() throws Exception {
- super.tearDown();
- }
-
- @Test
- public void testAssignNode() throws Exception {
- assumeWorkingMockito();
-
- SolrZkClient zkClient = mock(SolrZkClient.class);
- Map<String, byte[]> zkClientData = new HashMap<>();
- when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
- zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
- return null;
- }
- );
- when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
- zkClientData.get(invocation.getArgument(0)));
- // TODO: fix this to be independent of ZK
- ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
- String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
- assertEquals("core_node1", nodeName);
- nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
- assertEquals("core_node1", nodeName);
- nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
- assertEquals("core_node2", nodeName);
- }
-
- @Test
- public void testIdIsUnique() throws Exception {
- String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
- ZkTestServer server = new ZkTestServer(zkDir);
- Object fixedValue = new Object();
- String[] collections = new String[]{"c1","c2","c3","c4","c5","c6","c7","c8","c9"};
- Map<String, ConcurrentHashMap<Integer, Object>> collectionUniqueIds = new HashMap<>();
- for (String c : collections) {
- collectionUniqueIds.put(c, new ConcurrentHashMap<>());
- }
-
- ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("threadpool");
- try {
- server.run();
-
- try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
- assertTrue(zkClient.isConnected());
- zkClient.makePath("/", true);
- for (String c : collections) {
- zkClient.makePath("/collections/"+c, true);
- }
- // TODO: fix this to be independent of ZK
- ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
- List<Future<?>> futures = new ArrayList<>();
- for (int i = 0; i < 1000; i++) {
- futures.add(executor.submit(() -> {
- String collection = collections[random().nextInt(collections.length)];
- int id = Assign.incAndGetId(stateManager, collection, 0);
- Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
- if (val != null) {
- fail("ZkController do not generate unique id for " + collection);
- }
- }));
- }
- for (Future<?> future : futures) {
- future.get();
- }
- }
- assertEquals(1000, (long) collectionUniqueIds.values().stream()
- .map(ConcurrentHashMap::size)
- .reduce((m1, m2) -> m1 + m2).get());
- } finally {
- server.shutdown();
- ExecutorUtil.shutdownAndAwaitTermination(executor);
- }
- }
-
-
- @Test
- public void testBuildCoreName() throws IOException, InterruptedException, KeeperException {
- String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
- ZkTestServer server = new ZkTestServer(zkDir);
- server.run();
- try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
- zkClient.makePath("/", true);
- // TODO: fix this to be independent of ZK
- ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
- Map<String, Slice> slices = new HashMap<>();
- slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
- slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
-
- DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
- assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
- assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
- } finally {
- server.shutdown();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 66b7866..2190c80 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -37,6 +37,7 @@ import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 1a01386..22862b4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -24,6 +24,11 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.ElectionContext;
+import org.apache.solr.cloud.LeaderElector;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.api.collections.ShardSplitTest;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
deleted file mode 100644
index e886bb6..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.util.RetryUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Verifies cluster state remains consistent after collection reload.
- */
-@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-public class CollectionReloadTest extends SolrCloudTestCase {
-
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- @BeforeClass
- public static void setupCluster() throws Exception {
- configureCluster(1)
- .addConfig("conf", configset("cloud-minimal"))
- .configure();
- }
-
- @Test
- public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
-
- log.info("testReloadedLeaderStateAfterZkSessionLoss initialized OK ... running test logic");
-
- final String testCollectionName = "c8n_1x1";
- CollectionAdminRequest.createCollection(testCollectionName, "conf", 1, 1)
- .process(cluster.getSolrClient());
-
- Replica leader
- = cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
-
- long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
- CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
-
- RetryUtil.retryUntil("Timed out waiting for core to reload", 30, 1000, TimeUnit.MILLISECONDS, () -> {
- long restartTime = 0;
- try {
- restartTime = getCoreStatus(leader).getCoreStartTime().getTime();
- } catch (Exception e) {
- log.warn("Exception getting core start time: {}", e.getMessage());
- return false;
- }
- return restartTime > coreStartTime;
- });
-
- final int initialStateVersion = getCollectionState(testCollectionName).getZNodeVersion();
-
- cluster.expireZkSession(cluster.getReplicaJetty(leader));
-
- waitForState("Timed out waiting for core to re-register as ACTIVE after session expiry", testCollectionName, (n, c) -> {
- log.info("Collection state: {}", c.toString());
- Replica expiredReplica = c.getReplica(leader.getName());
- return expiredReplica.getState() == Replica.State.ACTIVE && c.getZNodeVersion() > initialStateVersion;
- });
-
- log.info("testReloadedLeaderStateAfterZkSessionLoss succeeded ... shutting down now!");
- }
-}