You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2020/11/10 01:01:33 UTC
[lucene-solr] branch reference_impl_dev updated: @1126 Get BASE_URL
out of the state.json.
This is an automated email from the ASF dual-hosted git repository.
markrmiller pushed a commit to branch reference_impl_dev
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git
The following commit(s) were added to refs/heads/reference_impl_dev by this push:
new 3bc448b @1126 Get BASE_URL out of the state.json.
3bc448b is described below
commit 3bc448b626956e4b8057aae4a8c182bc389872d9
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Mon Nov 9 18:59:59 2020 -0600
@1126 Get BASE_URL out of the state.json.
---
.../solr/prometheus/scraper/SolrCloudScraper.java | 2 +-
.../src/java/org/apache/solr/cloud/CloudUtil.java | 13 +-
.../apache/solr/cloud/ExclusiveSliceProperty.java | 370 ---------------------
.../src/java/org/apache/solr/cloud/Overseer.java | 57 ++--
.../apache/solr/cloud/OverseerElectionContext.java | 23 +-
.../solr/cloud/OverseerTaskExecutorTask.java | 2 +-
.../org/apache/solr/cloud/RecoveryStrategy.java | 14 +-
.../solr/cloud/ShardLeaderElectionContext.java | 7 +-
.../java/org/apache/solr/cloud/SyncStrategy.java | 13 +-
.../java/org/apache/solr/cloud/ZkController.java | 17 +-
.../solr/cloud/api/collections/AddReplicaCmd.java | 8 +-
.../cloud/api/collections/CreateCollectionCmd.java | 18 +-
.../cloud/api/collections/DeleteReplicaCmd.java | 2 +-
.../api/collections/MaintainRoutedAliasCmd.java | 2 +-
.../OverseerCollectionMessageHandler.java | 24 +-
.../solr/cloud/api/collections/RestoreCmd.java | 2 +-
.../solr/cloud/api/collections/SplitShardCmd.java | 5 +-
.../solr/cloud/overseer/ClusterStateMutator.java | 4 +-
.../solr/cloud/overseer/CollectionMutator.java | 2 +-
.../apache/solr/cloud/overseer/NodeMutator.java | 6 +-
.../apache/solr/cloud/overseer/ReplicaMutator.java | 29 +-
.../apache/solr/cloud/overseer/SliceMutator.java | 44 ++-
.../apache/solr/cloud/overseer/ZkStateWriter.java | 31 +-
.../java/org/apache/solr/core/BlobRepository.java | 3 +-
.../java/org/apache/solr/core/CoreContainer.java | 3 +-
.../org/apache/solr/core/backup/BackupManager.java | 2 +-
.../org/apache/solr/handler/admin/ColStatus.java | 6 +-
.../solr/handler/admin/CollectionsHandler.java | 4 +-
.../solr/handler/admin/RebalanceLeaders.java | 2 +-
.../solr/handler/admin/RequestSyncShardOp.java | 3 +-
.../solr/handler/component/CloudReplicaSource.java | 2 +-
.../solr/handler/component/HttpShardHandler.java | 2 +-
.../handler/component/HttpShardHandlerFactory.java | 2 +-
.../java/org/apache/solr/servlet/HttpSolrCall.java | 5 +-
.../org/apache/solr/update/SolrCmdDistributor.java | 24 +-
.../processor/DistributedZkUpdateProcessor.java | 46 +--
.../processor/RoutedAliasUpdateProcessor.java | 6 +-
.../src/java/org/apache/solr/util/ExportTool.java | 13 +-
.../apache/solr/cloud/ClusterStateMockUtil.java | 7 +-
.../org/apache/solr/cloud/ClusterStateTest.java | 12 +-
.../apache/solr/cloud/ClusterStateUpdateTest.java | 4 +-
.../apache/solr/cloud/CollectionsAPISolrJTest.java | 2 -
.../org/apache/solr/cloud/DeleteReplicaTest.java | 6 +-
.../solr/cloud/DistributedVersionInfoTest.java | 3 +-
.../org/apache/solr/cloud/HttpPartitionTest.java | 3 +-
.../org/apache/solr/cloud/LeaderElectionTest.java | 12 +-
.../solr/cloud/LeaderVoteWaitTimeoutTest.java | 3 +-
.../org/apache/solr/cloud/NodeMutatorTest.java | 6 +-
.../test/org/apache/solr/cloud/OverseerTest.java | 21 +-
.../apache/solr/cloud/ReplicationFactorTest.java | 3 +-
.../org/apache/solr/cloud/SSLMigrationTest.java | 8 +-
.../apache/solr/cloud/SolrCloudExampleTest.java | 2 +-
.../apache/solr/cloud/TestCloudConsistency.java | 3 +-
.../org/apache/solr/cloud/TestHashPartitioner.java | 2 +-
.../cloud/TestLeaderElectionWithEmptyReplica.java | 2 -
.../org/apache/solr/cloud/TestPullReplica.java | 4 +-
.../solr/cloud/TestPullReplicaErrorHandling.java | 4 +-
.../solr/cloud/TestRandomRequestDistribution.java | 5 +-
.../CollectionsAPIDistClusterPerZkTest.java | 4 +-
.../solr/cloud/api/collections/ShardSplitTest.java | 3 +-
.../solr/core/ConfigureRecoveryStrategyTest.java | 4 +-
.../test/org/apache/solr/core/CoreSorterTest.java | 4 +-
.../core/snapshots/TestSolrCloudSnapshots.java | 10 +-
.../solr/core/snapshots/TestSolrCoreSnapshots.java | 6 +-
.../org/apache/solr/handler/TestBlobHandler.java | 2 +-
.../org/apache/solr/handler/TestConfigReload.java | 2 +-
.../solr/handler/TestHdfsBackupRestoreCore.java | 5 +-
.../org/apache/solr/handler/TestReqParamsAPI.java | 2 +-
.../solr/handler/TestSolrConfigHandlerCloud.java | 4 +-
.../handler/TestSolrConfigHandlerConcurrent.java | 2 +-
.../test/org/apache/solr/util/TestExportTool.java | 12 +-
.../client/solrj/impl/BaseCloudSolrClient.java | 5 +-
.../solrj/impl/BaseHttpClusterStateProvider.java | 13 +-
.../solrj/impl/ZkClientClusterStateProvider.java | 10 +-
.../routing/AffinityReplicaListTransformer.java | 2 +-
.../routing/NodePreferenceRulesComparator.java | 1 +
.../RequestReplicaListTransformerGenerator.java | 1 +
.../org/apache/solr/common/cloud/ClusterState.java | 14 +-
.../apache/solr/common/cloud/ClusterStateUtil.java | 62 +---
.../java/org/apache/solr/common/cloud/Replica.java | 46 ++-
.../java/org/apache/solr/common/cloud/Slice.java | 14 +-
.../apache/solr/common/cloud/ZkCoreNodeProps.java | 18 -
.../apache/solr/common/cloud/ZkStateReader.java | 5 +-
.../java/org/apache/solr/common/util/Utils.java | 6 +-
.../solrj/impl/BaseSolrClientWireMockTest.java | 3 +-
.../solrj/impl/CloudHttp2SolrClientTest.java | 10 +-
.../solrj/impl/CloudSolrClientCacheTest.java | 38 +--
.../client/solrj/impl/CloudSolrClientTest.java | 10 +-
.../solrj/io/stream/CloudAuthStreamTest.java | 2 +-
.../routing/NodePreferenceRulesComparatorTest.java | 22 +-
.../solrj/routing/ReplicaListTransformerTest.java | 20 +-
...RequestReplicaListTransformerGeneratorTest.java | 15 +-
.../ShufflingReplicaListTransformerTest.java | 2 +-
.../solr/cloud/AbstractFullDistribZkTestBase.java | 17 +-
.../apache/solr/cloud/MiniSolrCloudCluster.java | 2 +-
95 files changed, 447 insertions(+), 881 deletions(-)
diff --git a/solr/contrib/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java b/solr/contrib/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java
index eb50f3d..c25d515 100644
--- a/solr/contrib/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java
+++ b/solr/contrib/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java
@@ -134,7 +134,7 @@ public class SolrCloudScraper extends SolrScraper {
.stream()
.map(DocCollection::getReplicas)
.flatMap(List::stream)
- .map(Replica::getBaseUrl)
+ .map(replica -> replica.getBaseUrl())
.collect(Collectors.toSet());
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index 0f5cc48..48f1ee6 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -76,7 +76,7 @@ public class CloudUtil {
for (Replica replica : slice.getReplicas()) {
String replicaName = replica.getName();
- String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String baseUrl = replica.getBaseUrl();
log.debug("compare against coreName={} baseUrl={}", replicaName, baseUrl);
if (coreName != null && coreName.equals(replicaName)
@@ -106,17 +106,6 @@ public class CloudUtil {
}
}
- public static boolean replicaExists(ClusterState clusterState, String collection, String shard, String coreName) {
- DocCollection docCollection = clusterState.getCollectionOrNull(collection);
- if (docCollection != null) {
- Slice slice = docCollection.getSlice(shard);
- if (slice != null) {
- return slice.getReplica(coreName) != null;
- }
- }
- return false;
- }
-
/**
* Returns a displayable unified path to the given resource. For non-solrCloud that will be the
* same as getConfigDir, but for Cloud it will be getConfigSetZkPath ending in a /
diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
deleted file mode 100644
index 448f455..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
-import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.cloud.overseer.CollectionMutator;
-import org.apache.solr.cloud.overseer.SliceMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
-
-// Class to encapsulate processing replica properties that have at most one replica hosting a property per slice.
-class ExclusiveSliceProperty {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- private ClusterState clusterState;
- private final boolean onlyActiveNodes;
- private final String property;
- private final DocCollection collection;
- private final String collectionName;
-
- // Key structure. For each node, list all replicas on it regardless of whether they have the property or not.
- private final Map<String, List<SliceReplica>> nodesHostingReplicas = new HashMap<>();
- // Key structure. For each node, a list of the replicas _currently_ hosting the property.
- private final Map<String, List<SliceReplica>> nodesHostingProp = new HashMap<>();
- Set<String> shardsNeedingHosts = new HashSet<>();
- Map<String, Slice> changedSlices = new HashMap<>(); // Work on copies rather than the underlying cluster state.
-
- private int origMaxPropPerNode = 0;
- private int origModulo = 0;
- private int tmpMaxPropPerNode = 0;
- private int tmpModulo = 0;
- Random rand = new Random();
-
- private int assigned = 0;
-
- ExclusiveSliceProperty(ClusterState clusterState, ZkNodeProps message) {
- this.clusterState = clusterState;
- String tmp = message.getStr(ZkStateReader.PROPERTY_PROP);
- if (StringUtils.startsWith(tmp, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
- tmp = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + tmp;
- }
- this.property = tmp.toLowerCase(Locale.ROOT);
- collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-
- if (StringUtils.isBlank(collectionName) || StringUtils.isBlank(property)) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Overseer '" + message.getStr(Overseer.QUEUE_OPERATION) + "' requires both the '" + ZkStateReader.COLLECTION_PROP + "' and '" +
- ZkStateReader.PROPERTY_PROP + "' parameters. No action taken ");
- }
-
- Boolean shardUnique = Boolean.parseBoolean(message.getStr(SHARD_UNIQUE));
- if (shardUnique == false &&
- SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(this.property) == false) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
- + " the property be a pre-defined property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true' " +
- " Property: " + this.property + " shardUnique: " + Boolean.toString(shardUnique));
- }
-
- collection = clusterState.getCollection(collectionName);
- if (collection == null) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "Could not find collection ' " + collectionName + "' for overseer operation '" +
- message.getStr(Overseer.QUEUE_OPERATION) + "'. No action taken.");
- }
- onlyActiveNodes = Boolean.parseBoolean(message.getStr(ONLY_ACTIVE_NODES, "true"));
- }
-
-
- DocCollection getDocCollection() {
- return collection;
- }
-
- private boolean isActive(Replica replica) {
- return replica.getState() == Replica.State.ACTIVE;
- }
-
- // Collect a list of all the nodes that _can_ host the indicated property. Along the way, also collect any of
- // the replicas on that node that _already_ host the property as well as any slices that do _not_ have the
- // property hosted.
- //
- // Return true if anything node needs it's property reassigned. False if the property is already balanced for
- // the collection.
-
- private boolean collectCurrentPropStats() {
- int maxAssigned = 0;
- // Get a list of potential replicas that can host the property _and_ their counts
- // Move any obvious entries to a list of replicas to change the property on
- Set<String> allHosts = new HashSet<>();
- for (Slice slice : collection.getSlices()) {
- boolean sliceHasProp = false;
- for (Replica replica : slice.getReplicas()) {
- if (onlyActiveNodes && isActive(replica) == false) {
- if (StringUtils.isNotBlank(replica.getStr(property))) {
- removeProp(slice, replica.getName()); // Note, we won't be committing this to ZK until later.
- }
- continue;
- }
- allHosts.add(replica.getNodeName());
- String nodeName = replica.getNodeName();
- if (StringUtils.isNotBlank(replica.getStr(property))) {
- if (sliceHasProp) {
- throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
- "'" + BALANCESHARDUNIQUE + "' should only be called for properties that have at most one member " +
- "in any slice with the property set. No action taken.");
- }
- if (nodesHostingProp.containsKey(nodeName) == false) {
- nodesHostingProp.put(nodeName, new ArrayList<>());
- }
- nodesHostingProp.get(nodeName).add(new SliceReplica(slice, replica));
- ++assigned;
- maxAssigned = Math.max(maxAssigned, nodesHostingProp.get(nodeName).size());
- sliceHasProp = true;
- }
- if (nodesHostingReplicas.containsKey(nodeName) == false) {
- nodesHostingReplicas.put(nodeName, new ArrayList<>());
- }
- nodesHostingReplicas.get(nodeName).add(new SliceReplica(slice, replica));
- }
- }
-
- // If the total number of already-hosted properties assigned to nodes
- // that have potential to host leaders is equal to the slice count _AND_ none of the current nodes has more than
- // the max number of properties, there's nothing to do.
- origMaxPropPerNode = collection.getSlices().size() / allHosts.size();
-
- // Some nodes can have one more of the proeprty if the numbers aren't exactly even.
- origModulo = collection.getSlices().size() % allHosts.size();
- if (origModulo > 0) {
- origMaxPropPerNode++; // have to have some nodes with 1 more property.
- }
-
- // We can say for sure that we need to rebalance if we don't have as many assigned properties as slices.
- if (assigned != collection.getSlices().size()) {
- return true;
- }
-
- // Make sure there are no more slices at the limit than the "leftovers"
- // Let's say there's 7 slices and 3 nodes. We need to distribute the property as 3 on node1, 2 on node2 and 2 on node3
- // (3, 2, 2) We need to be careful to not distribute them as 3, 3, 1. that's what this check is all about.
- int counter = origModulo;
- for (List<SliceReplica> list : nodesHostingProp.values()) {
- if (list.size() == origMaxPropPerNode) --counter;
- }
- if (counter == 0) return false; // nodes with 1 extra leader are exactly the needed number
-
- return true;
- }
-
- private void removeSliceAlreadyHostedFromPossibles(String sliceName) {
- for (Map.Entry<String, List<SliceReplica>> entReplica : nodesHostingReplicas.entrySet()) {
-
- ListIterator<SliceReplica> iter = entReplica.getValue().listIterator();
- while (iter.hasNext()) {
- SliceReplica sr = iter.next();
- if (sr.slice.getName().equals(sliceName))
- iter.remove();
- }
- }
- }
-
- private void balanceUnassignedReplicas() {
- tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
- tmpModulo = origModulo;
-
- // Get the nodeName and shardName for the node that has the least room for this
-
- while (shardsNeedingHosts.size() > 0) {
- String nodeName = "";
- int minSize = Integer.MAX_VALUE;
- SliceReplica srToChange = null;
- for (String slice : shardsNeedingHosts) {
- for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
- // A little tricky. If we don't set this to something below, then it means all possible places to
- // put this property are full up, so just put it somewhere.
- if (srToChange == null && ent.getValue().size() > 0) {
- srToChange = ent.getValue().get(0);
- }
- ListIterator<SliceReplica> iter = ent.getValue().listIterator();
- while (iter.hasNext()) {
- SliceReplica sr = iter.next();
- if (StringUtils.equals(slice, sr.slice.getName()) == false) {
- continue;
- }
- if (nodesHostingProp.containsKey(ent.getKey()) == false) {
- nodesHostingProp.put(ent.getKey(), new ArrayList<SliceReplica>());
- }
- if (minSize > nodesHostingReplicas.get(ent.getKey()).size() && nodesHostingProp.get(ent.getKey()).size() < tmpMaxPropPerNode) {
- minSize = nodesHostingReplicas.get(ent.getKey()).size();
- srToChange = sr;
- nodeName = ent.getKey();
- }
- }
- }
- }
- // Now, you have a slice and node to put it on
- shardsNeedingHosts.remove(srToChange.slice.getName());
- if (nodesHostingProp.containsKey(nodeName) == false) {
- nodesHostingProp.put(nodeName, new ArrayList<SliceReplica>());
- }
- nodesHostingProp.get(nodeName).add(srToChange);
- adjustLimits(nodesHostingProp.get(nodeName));
- removeSliceAlreadyHostedFromPossibles(srToChange.slice.getName());
- addProp(srToChange.slice, srToChange.replica.getName());
- // When you set the property, you must insure that it is _removed_ from any other replicas.
- for (Replica rep : srToChange.slice.getReplicas()) {
- if (rep.getName().equals(srToChange.replica.getName())) {
- continue;
- }
- if (rep.getProperty(property) != null) {
- removeProp(srToChange.slice, srToChange.replica.getName());
- }
- }
- }
- }
-
- // Adjust the min/max counts per allowed per node. Special handling here for dealing with the fact
- // that no node should have more than 1 more replica with this property than any other.
- private void adjustLimits(List<SliceReplica> changeList) {
- if (changeList.size() == tmpMaxPropPerNode) {
- if (tmpModulo < 0) return;
-
- --tmpModulo;
- if (tmpModulo == 0) {
- --tmpMaxPropPerNode;
- --tmpModulo; // Prevent dropping tmpMaxPropPerNode again.
- }
- }
- }
-
- // Go through the list of presently-hosted properties and remove any that have too many replicas that host the property
- private void removeOverallocatedReplicas() {
- tmpMaxPropPerNode = origMaxPropPerNode; // A bit clumsy, but don't want to duplicate code.
- tmpModulo = origModulo;
-
- for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingProp.entrySet()) {
- while (ent.getValue().size() > tmpMaxPropPerNode) { // remove delta nodes
- ent.getValue().remove(rand.nextInt(ent.getValue().size()));
- }
- adjustLimits(ent.getValue());
- }
- }
-
- private void removeProp(Slice origSlice, String replicaName) {
- if (log.isDebugEnabled()) {
- log.debug("Removing property {} from slice {}, replica {}", property, origSlice.getName(), replicaName);
- }
- getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property);
- }
-
- private void addProp(Slice origSlice, String replicaName) {
- if (log.isDebugEnabled()) {
- log.debug("Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName);
- }
- getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true");
- }
-
- // Just a place to encapsulate the fact that we need to have new slices (copy) to update before we
- // put this all in the cluster state.
- private Replica getReplicaFromChanged(Slice origSlice, String replicaName) {
- Slice newSlice = changedSlices.get(origSlice.getName());
- Replica replica;
- if (newSlice != null) {
- replica = newSlice.getReplica(replicaName);
- } else {
- newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy(), origSlice.collection);
- changedSlices.put(origSlice.getName(), newSlice);
- replica = newSlice.getReplica(replicaName);
- }
- if (replica == null) {
- throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Should have been able to find replica '" +
- replicaName + "' in slice '" + origSlice.getName() + "'. No action taken");
- }
- return replica;
-
- }
- // Main entry point for carrying out the action. Returns "true" if we have actually moved properties around.
-
- boolean balanceProperty() {
- if (collectCurrentPropStats() == false) {
- return false;
- }
-
- // we have two lists based on nodeName
- // 1> all the nodes that _could_ host a property for the slice
- // 2> all the nodes that _currently_ host a property for the slice.
-
- // So, remove a replica from the nodes that have too many
- removeOverallocatedReplicas();
-
- // prune replicas belonging to a slice that have the property currently assigned from the list of replicas
- // that could host the property.
- for (Map.Entry<String, List<SliceReplica>> entProp : nodesHostingProp.entrySet()) {
- for (SliceReplica srHosting : entProp.getValue()) {
- removeSliceAlreadyHostedFromPossibles(srHosting.slice.getName());
- }
- }
-
- // Assemble the list of slices that do not have any replica hosting the property:
- for (Map.Entry<String, List<SliceReplica>> ent : nodesHostingReplicas.entrySet()) {
- ListIterator<SliceReplica> iter = ent.getValue().listIterator();
- while (iter.hasNext()) {
- SliceReplica sr = iter.next();
- shardsNeedingHosts.add(sr.slice.getName());
- }
- }
-
- // At this point, nodesHostingProp should contain _only_ lists of replicas that belong to slices that do _not_
- // have any replica hosting the property. So let's assign them.
-
- balanceUnassignedReplicas();
- for (Slice newSlice : changedSlices.values()) {
- DocCollection docCollection = CollectionMutator.updateSlice(collectionName, clusterState.getCollection(collectionName), newSlice);
- clusterState = ClusterStateMutator.newState(clusterState, collectionName, docCollection);
- }
- return true;
- }
-
- private static class SliceReplica {
- Slice slice;
- Replica replica;
-
- SliceReplica(Slice slice, Replica replica) {
- this.slice = slice;
- this.replica = replica;
- }
- public String toString() {
- StringBuilder sb = new StringBuilder(System.lineSeparator()).append(System.lineSeparator());
- sb.append(" :").append(System.lineSeparator()).append("slice: ").append(slice.toString()).append(System.lineSeparator()).append(" replica: ").append(replica.toString()).append(System.lineSeparator());
- return sb.toString();
- }
- }
-}
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index d921019..3535dfb 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -27,6 +27,9 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiConsumer;
@@ -55,9 +58,11 @@ import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.ObjectReleaseTracker;
+import org.apache.solr.common.util.OrderedExecutor;
import org.apache.solr.common.util.Pair;
import org.apache.solr.core.CloudConfig;
import org.apache.solr.core.CoreContainer;
@@ -158,15 +163,15 @@ public class Overseer implements SolrCloseable {
private volatile boolean initedHttpClient = false;
private volatile QueueWatcher queueWatcher;
private volatile WorkQueueWatcher.CollectionWorkQueueWatcher collectionQueueWatcher;
+ private volatile ExecutorService taskExecutor;
public boolean isDone() {
return closeAndDone;
}
-
- // public ExecutorService getTaskExecutor() {
-// return taskExecutor;
-// }
+ public ExecutorService getTaskExecutor() {
+ return taskExecutor;
+ }
private static class StringBiConsumer implements BiConsumer<String, Object> {
boolean firstPair = true;
@@ -280,8 +285,8 @@ public class Overseer implements SolrCloseable {
//
// stateManagmentExecutor = ParWork.getParExecutorService("stateManagmentExecutor",
// 1, 1, 3000, new SynchronousQueue());
-// taskExecutor = ParWork.getParExecutorService("overseerTaskExecutor",
-// 4, 16, 3000, new SynchronousQueue());
+ taskExecutor = ParWork.getParExecutorService("overseerTaskExecutor",
+ 4, 16, 10000, new SynchronousQueue());
// try {
// if (context != null) context.close();
@@ -475,11 +480,7 @@ public class Overseer implements SolrCloseable {
}
public void close() {
- close(false);
- }
-
- public void close(boolean fromCSUpdateThread) {
- log.info("Overseer (id={}) closing closeAndDone={} frp,CSUpdateThread={}", id, closeAndDone, fromCSUpdateThread);
+ log.info("Overseer (id={}) closing closeAndDone={} frp}", id, closeAndDone);
if (closeAndDone) {
if (overseerOnlyClient != null) {
@@ -509,13 +510,9 @@ public class Overseer implements SolrCloseable {
}
void doClose() {
- doClose(false);
- }
-
- void doClose(boolean fromCSUpdateThread) {
closed = true;
- log.info("doClose() - start fromCSUpdateThread={} closeAndDone={}", fromCSUpdateThread, closeAndDone);
+ log.info("doClose() - start closeAndDone={}", closeAndDone);
this.zkStateWriter = null;
if (queueWatcher != null) {
@@ -532,10 +529,6 @@ public class Overseer implements SolrCloseable {
// stateManagmentExecutor.shutdown();
// }
//
-// if (taskExecutor != null) {
-// log.info("shutdown taskExecutor");
-// taskExecutor.shutdown();
-// }
// if (stateManagmentExecutor != null) {
// stateManagmentExecutor.shutdownNow();
@@ -544,11 +537,15 @@ public class Overseer implements SolrCloseable {
// ExecutorUtil.shutdownAndAwaitTermination(stateManagmentExecutor );
-// if (taskExecutor != null) {
-// taskExecutor.shutdownNow();
-// }
+ if (taskExecutor != null) {
+ try {
+ taskExecutor.shutdownNow();
+ } catch (NullPointerException okay) {
+ // okay
+ }
+ }
- // ExecutorUtil.shutdownAndAwaitTermination(taskExecutor );
+ ExecutorUtil.shutdownAndAwaitTermination(taskExecutor );
if (log.isDebugEnabled()) {
log.debug("doClose() - end");
@@ -769,15 +766,15 @@ public class Overseer implements SolrCloseable {
}}, true);
} catch (KeeperException.SessionExpiredException e) {
log.warn("ZooKeeper session expired");
- overseer.doClose(false);
+ overseer.doClose();
return;
} catch (InterruptedException | AlreadyClosedException e) {
log.info("Already closed");
- overseer.doClose(false);
+ overseer.doClose();
return;
} catch (Exception e) {
log.error("Unexpected error in Overseer state update loop", e);
- overseer.doClose(false);
+ overseer.doClose();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
@@ -821,15 +818,15 @@ public class Overseer implements SolrCloseable {
return children;
} catch (KeeperException.SessionExpiredException e) {
log.warn("ZooKeeper session expired");
- overseer.doClose(false);
+ overseer.doClose();
return null;
} catch (InterruptedException | AlreadyClosedException e) {
log.info("Already closed");
- overseer.doClose(false);
+ overseer.doClose();
return null;
} catch (Exception e) {
log.error("Unexpected error in Overseer state update loop", e);
- overseer.doClose(false);
+ overseer.doClose();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java
index 71fa2d3..d96c5fe 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerElectionContext.java
@@ -31,6 +31,7 @@ import org.apache.solr.common.cloud.ConnectionManager;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.Pair;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
@@ -45,14 +46,15 @@ final class OverseerElectionContext extends ShardLeaderElectionContextBase {
private volatile boolean isClosed = false;
public OverseerElectionContext(final String zkNodeName, SolrZkClient zkClient, Overseer overseer) {
- super(zkNodeName, Overseer.OVERSEER_ELECT, Overseer.OVERSEER_ELECT + "/leader", new Replica(ID, getIDMap(zkNodeName), null, null), zkClient);
+ super(zkNodeName, Overseer.OVERSEER_ELECT, Overseer.OVERSEER_ELECT + "/leader", new Replica(overseer.getZkController().getNodeName(), getIDMap(zkNodeName, overseer), null, null, overseer.getZkStateReader()), zkClient);
this.overseer = overseer;
this.zkClient = zkClient;
}
- private static Map<String,Object> getIDMap(String zkNodeName) {
- Map<String,Object> idMap = new HashMap<>(1);
+ private static Map<String,Object> getIDMap(String zkNodeName, Overseer overseer) {
+ Map<String,Object> idMap = new HashMap<>(2);
idMap.put(ID, zkNodeName);
+ idMap.put(ZkStateReader.NODE_NAME_PROP, overseer.getZkController().getNodeName());
return idMap;
}
@@ -104,11 +106,7 @@ final class OverseerElectionContext extends ShardLeaderElectionContextBase {
}
@Override
- public void cancelElection() throws KeeperException, InterruptedException {
- cancelElection(false);
- }
-
- public void cancelElection(boolean fromCSUpdateThread) throws InterruptedException, KeeperException {
+ public void cancelElection() throws InterruptedException, KeeperException {
try (ParWork closer = new ParWork(this, true)) {
if (zkClient.isConnected()) {
closer.collect("cancelElection", () -> {
@@ -122,7 +120,7 @@ final class OverseerElectionContext extends ShardLeaderElectionContextBase {
}
closer.collect("overseer", () -> {
try {
- overseer.doClose(fromCSUpdateThread);
+ overseer.doClose();
} catch (Exception e) {
ParWork.propagateInterrupt(e);
log.error("Exception closing Overseer", e);
@@ -133,14 +131,9 @@ final class OverseerElectionContext extends ShardLeaderElectionContextBase {
@Override
public void close() {
- close(false);
- }
-
-
- public void close(boolean fromCSUpdateThread) {
this.isClosed = true;
try {
- cancelElection(fromCSUpdateThread);
+ cancelElection();
} catch (Exception e) {
ParWork.propagateInterrupt(e);
log.error("Exception canceling election", e);
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskExecutorTask.java b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskExecutorTask.java
index 90d5079..8e95c17 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskExecutorTask.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskExecutorTask.java
@@ -116,7 +116,7 @@ public class OverseerTaskExecutorTask implements Runnable {
// }
// break;
case DOWNNODE:
- return new NodeMutator().downNode(clusterState, message);
+ return new NodeMutator().downNode(zkController.zkStateReader, clusterState, message);
default:
throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
index 595344b..844d4f1 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
@@ -248,14 +248,14 @@ public class RecoveryStrategy implements Runnable, Closeable {
*
* @lucene.experimental
*/
- protected String getReplicateLeaderUrl(Replica leaderprops) {
+ protected String getReplicateLeaderUrl(Replica leaderprops, ZkStateReader zkStateReader) {
return leaderprops.getCoreUrl();
}
final private void replicate(String nodeName, SolrCore core, Replica leaderprops)
throws SolrServerException, IOException {
- final String leaderUrl = getReplicateLeaderUrl(leaderprops);
+ final String leaderUrl = getReplicateLeaderUrl(leaderprops, zkStateReader);
log.info("Attempting to replicate from [{}].", leaderprops);
@@ -404,12 +404,12 @@ public class RecoveryStrategy implements Runnable, Closeable {
CloudDescriptor cloudDesc = this.coreDescriptor.getCloudDescriptor();
Replica leaderprops = zkStateReader.getLeaderRetry(
cloudDesc.getCollectionName(), cloudDesc.getShardId());
- final String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
- final String leaderCoreName = leaderprops.getStr(ZkStateReader.CORE_NAME_PROP);
+ final String leaderBaseUrl = leaderprops.getBaseUrl();
+ final String leaderCoreName = leaderprops.getName();
- String leaderUrl = ZkCoreNodeProps.getCoreUrl(leaderBaseUrl, leaderCoreName);
+ String leaderUrl = leaderprops.getCoreUrl();
- String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
+ String ourUrl = Replica.getCoreUrl(baseUrl, coreName);
boolean isLeader = leaderUrl.equals(ourUrl); // TODO: We can probably delete most of this code if we say this
// strategy can only be used for pull replicas
@@ -630,7 +630,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
zkController.stopReplicationFromLeader(coreName);
}
- final String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
+ final String ourUrl = Replica.getCoreUrl(baseUrl, coreName);
Future<RecoveryInfo> replayFuture = null;
while (!successfulRecovery && !isClosed()) { // don't use interruption or
// it will close channels
diff --git a/solr/core/src/java/org/apache/solr/cloud/ShardLeaderElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ShardLeaderElectionContext.java
index 8914197..f90c1495 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ShardLeaderElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ShardLeaderElectionContext.java
@@ -262,10 +262,9 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
assert shardId != null;
core.getCoreDescriptor().getCloudDescriptor().setLeader(true);
- publishActive(core);
+
ZkNodeProps zkNodes = ZkNodeProps
- .fromKeyVals(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(), ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection, ZkStateReader.BASE_URL_PROP,
- leaderProps.get(ZkStateReader.BASE_URL_PROP), ZkStateReader.NODE_NAME_PROP, leaderProps.get(ZkStateReader.NODE_NAME_PROP), ZkStateReader.CORE_NAME_PROP,
+ .fromKeyVals(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(), ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection, ZkStateReader.NODE_NAME_PROP, leaderProps.get(ZkStateReader.NODE_NAME_PROP), ZkStateReader.CORE_NAME_PROP,
leaderProps.getName(), ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
assert zkController != null;
assert zkController.getOverseer() != null;
@@ -273,7 +272,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
log.info("Publish leader state");
zkController.getOverseer().offerStateUpdate(Utils.toJSON(zkNodes));
- log.info("I am the new leader: " + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
+ log.info("I am the new leader: " + leaderProps.getCoreUrl() + " " + shardId);
} catch (AlreadyClosedException | InterruptedException e) {
ParWork.propagateInterrupt("Already closed or interrupted, bailing..", e);
diff --git a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
index 4e88454..5ce4e2b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
@@ -77,7 +77,7 @@ public class SyncStrategy implements Closeable {
}
if (log.isInfoEnabled()) {
- log.info("Sync replicas to {}", ZkCoreNodeProps.getCoreUrl(leaderProps));
+ log.info("Sync replicas to {}", leaderProps.getCoreUrl());
}
if (core.getUpdateHandler().getUpdateLog() == null) {
@@ -167,7 +167,7 @@ public class SyncStrategy implements Closeable {
cd.getName());
if (nodes == null) {
if (log.isInfoEnabled()) {
- log.info("{} has no replicas", ZkCoreNodeProps.getCoreUrl(leaderProps));
+ log.info("{} has no replicas", leaderProps.getCoreUrl());
}
return;
}
@@ -176,10 +176,11 @@ public class SyncStrategy implements Closeable {
for (Replica node : nodes) {
try {
if (log.isInfoEnabled()) {
- log.info("{}: try and ask {} to sync", ZkCoreNodeProps.getCoreUrl(leaderProps), node.getCoreUrl());
+ log.info("{}: try and ask {} to sync", leaderProps.getCoreUrl(), node.getCoreUrl());
}
- requestSync(node.getBaseUrl(), node.getCoreUrl(), leaderProps.getCoreUrl(), node.getName(), nUpdates);
+ requestSync(node.getBaseUrl(), node.getCoreUrl(),
+ leaderProps.getCoreUrl(), node.getName(), nUpdates);
} catch (Exception e) {
ParWork.propagateInterrupt(e);
@@ -198,11 +199,11 @@ public class SyncStrategy implements Closeable {
if (!success) {
if (log.isInfoEnabled()) {
- log.info("{}: Sync failed - replica ({}) should try to recover.", ZkCoreNodeProps.getCoreUrl(leaderProps), srsp.getShardAddress());
+ log.info("{}: Sync failed - replica ({}) should try to recover.", leaderProps.getCoreUrl(), srsp.getShardAddress());
}
} else {
if (log.isInfoEnabled()) {
- log.info("{}: sync completed with {}", ZkCoreNodeProps.getCoreUrl(leaderProps), srsp.getShardAddress());
+ log.info("{}: sync completed with {}", leaderProps.getCoreUrl(), srsp.getShardAddress());
}
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index 081a736..2bf1349 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -107,12 +107,10 @@ import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTIONS_ZKNODE;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
@@ -1410,7 +1408,7 @@ public class ZkController implements Closeable, Runnable {
log.info("Wait to see leader for {}, {}", collection, shardId);
Replica leader = zkStateReader.getLeaderRetry(collection, shardId, 10000);
- String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
+ String ourUrl = replica.getCoreUrl();
boolean isLeader = leader.getName() .equals(coreName);
log.info("We are {} and leader is {} isLeader={}", ourUrl, leader.getCoreUrl(), isLeader);
@@ -1589,7 +1587,7 @@ public class ZkController implements Closeable, Runnable {
byte[] data = zkClient.getData(ZkStateReader.getShardLeadersPath(collection, slice), null, null);
ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(ZkNodeProps.load(data));
// nocommit - right key for leader name?
- return new Replica(leaderProps.getNodeProps().getStr("name"), leaderProps.getNodeProps().getProperties(), collection, slice);
+ return new Replica(leaderProps.getNodeProps().getStr("name"), leaderProps.getNodeProps().getProperties(), collection, slice, zkStateReader);
} catch (Exception e) {
SolrZkClient.checkInterrupted(e);
@@ -1619,10 +1617,9 @@ public class ZkController implements Closeable, Runnable {
Map<String, Object> props = new HashMap<>();
// we only put a subset of props into the leader node
- props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
- Replica replica = new Replica(cd.getName(), props, null, null);
+ Replica replica = new Replica(cd.getName(), props, collection, shardId, zkStateReader);
LeaderElector leaderElector;
synchronized (leaderElectors) {
leaderElector = leaderElectors.get(replica.getName());
@@ -1727,9 +1724,8 @@ public class ZkController implements Closeable, Runnable {
Map<String,Object> props = new HashMap<>();
props.put(Overseer.QUEUE_OPERATION, "state");
props.put(ZkStateReader.STATE_PROP, state.toString());
- props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
- props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
props.put(ZkStateReader.ROLES_PROP, cd.getCloudDescriptor().getRoles());
+ props.put(CORE_NAME_PROP, cd.getName());
props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
props.put(ZkStateReader.SHARD_ID_PROP, cd.getCloudDescriptor().getShardId());
props.put(ZkStateReader.COLLECTION_PROP, collection);
@@ -2077,7 +2073,6 @@ public class ZkController implements Closeable, Runnable {
String shardId = params.get(SHARD_ID_PROP);
String coreName = params.get(CORE_NAME_PROP);
String electionNode = params.get(ELECTION_NODE_PROP);
- String baseUrl = params.get(BASE_URL_PROP);
try {
MDCLoggingContext.setCoreDescriptor(cc, cc.getCoreDescriptor(coreName));
@@ -2090,11 +2085,9 @@ public class ZkController implements Closeable, Runnable {
if (prevContext != null) prevContext.close();
Map<String, Object> props = new HashMap<>();
- props.put(ZkStateReader.BASE_URL_PROP, baseUrl);
- props.put(ZkStateReader.CORE_NAME_PROP, coreName);
props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
- Replica replica = new Replica(coreName, props, null, null);
+ Replica replica = new Replica(coreName, props, collectionName, shardId, zkStateReader);
LeaderElector elect = ((ShardLeaderElectionContext) prevContext).getLeaderElector();
ShardLeaderElectionContext context = new ShardLeaderElectionContext(elect, shardId, collectionName,
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
index 2a1da0d..33b987a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -173,12 +173,9 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
collection = clusterState.getCollection(collectionName);
CreateReplica cr = assignReplicaDetails(collection, message, replicaPosition);
- message = message.plus(ZkStateReader.BASE_URL_PROP, ocmh.zkStateReader.getBaseUrlForNodeName(replicaPosition.node));
-
message = message.plus(NODE_NAME_PROP, replicaPosition.node);
message = message.plus(ZkStateReader.REPLICA_TYPE, cr.replicaType.name());
-
clusterState = new SliceMutator(ocmh.cloudManager).addReplica(clusterState, message);
createReplicas.add(cr);
@@ -311,10 +308,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
ZkStateReader zkStateReader = ocmh.zkStateReader;
String collectionName = collection.getName();
ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(), ZkStateReader.COLLECTION_PROP, collectionName, ZkStateReader.SHARD_ID_PROP, createReplica.sliceName,
- ZkStateReader.CORE_NAME_PROP, createReplica.coreName, ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(), ZkStateReader.BASE_URL_PROP,
- zkStateReader.getBaseUrlForNodeName(createReplica.node), ZkStateReader.NODE_NAME_PROP, createReplica.node, ZkStateReader.REPLICA_TYPE, createReplica.replicaType.name());
-
- String coreUrl = ZkCoreNodeProps.getCoreUrl(props.getStr(ZkStateReader.BASE_URL_PROP), createReplica.coreName);
+ ZkStateReader.CORE_NAME_PROP, createReplica.coreName, ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(), ZkStateReader.NODE_NAME_PROP, createReplica.node, ZkStateReader.REPLICA_TYPE, createReplica.replicaType.name());
String configName = zkStateReader.readConfigName(collectionName);
String routeKey = message.getStr(ShardParams._ROUTE_);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 7f133bd..f664dd0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -95,14 +95,12 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final OverseerCollectionMessageHandler ocmh;
private final TimeSource timeSource;
- private final DistribStateManager stateManager;
private final ZkStateReader zkStateReader;
private final SolrCloudManager cloudManager;
public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh, CoreContainer cc, SolrCloudManager cloudManager) {
log.info("create CreateCollectionCmd");
this.ocmh = ocmh;
- this.stateManager = ocmh.cloudManager.getDistribStateManager();
this.timeSource = ocmh.cloudManager.getTimeSource();
this.zkStateReader = ocmh.zkStateReader;
this.cloudManager = cloudManager;
@@ -177,11 +175,11 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
// throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '"+collectionName+"' already exists!");
// }
- createCollectionZkNode(stateManager, collectionName, collectionParams, configName);
+ createCollectionZkNode(cloudManager.getDistribStateManager(), collectionName, collectionParams, configName);
- OverseerCollectionMessageHandler.createConfNode(stateManager, configName, collectionName);
+ OverseerCollectionMessageHandler.createConfNode(cloudManager.getDistribStateManager(), configName, collectionName);
- DocCollection docCollection = buildDocCollection(stateManager, message, true);
+ DocCollection docCollection = buildDocCollection(cloudManager, message, true);
clusterState = clusterState.copyWith(collectionName, docCollection);
try {
replicaPositions = buildReplicaPositions(cloudManager, message, shardNames);
@@ -228,7 +226,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
ZkNodeProps props = new ZkNodeProps();
//props.getProperties().putAll(message.getProperties());
ZkNodeProps addReplicaProps = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toString(), ZkStateReader.COLLECTION_PROP, collectionName, ZkStateReader.SHARD_ID_PROP,
- replicaPosition.shard, ZkStateReader.CORE_NAME_PROP, coreName, ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(), ZkStateReader.BASE_URL_PROP, baseUrl, ZkStateReader.NODE_NAME_PROP,
+ replicaPosition.shard, ZkStateReader.CORE_NAME_PROP, coreName, ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(), ZkStateReader.NODE_NAME_PROP,
nodeName, "node", nodeName, ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(), ZkStateReader.NUM_SHARDS_PROP, message.getStr(ZkStateReader.NUM_SHARDS_PROP), "shards",
message.getStr("shards"), CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState)); props.getProperties().putAll(addReplicaProps.getProperties());
if (log.isDebugEnabled()) log.debug("Sending state update to populate clusterstate with new replica {}", props);
@@ -433,7 +431,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
}
}
- public static DocCollection buildDocCollection(DistribStateManager stateManager, ZkNodeProps message, boolean withDocRouter) {
+ public static DocCollection buildDocCollection(SolrCloudManager cloudManager, ZkNodeProps message, boolean withDocRouter) {
log.info("buildDocCollection {}", message);
String cName = message.getStr(NAME);
DocRouter router = null;
@@ -447,7 +445,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
Map<String,Slice> slices;
if (messageShardsObj instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore)
- slices = Slice.loadAllFromMap(message.getStr(ZkStateReader.COLLECTION_PROP), (Map<String,Object>) messageShardsObj);
+ slices = Slice.loadAllFromMap((Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider(), message.getStr(ZkStateReader.COLLECTION_PROP), (Map<String,Object>) messageShardsObj);
} else {
List<String> shardNames = new ArrayList<>();
if (withDocRouter) {
@@ -476,7 +474,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
sliceProps.put(Slice.RANGE, ranges == null ? null : ranges.get(i));
}
- slices.put(sliceName, new Slice(sliceName, null, sliceProps, message.getStr(ZkStateReader.COLLECTION_PROP)));
+ slices.put(sliceName, new Slice(sliceName, null, sliceProps, message.getStr(ZkStateReader.COLLECTION_PROP), (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider()));
}
}
@@ -499,7 +497,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
collectionProps.put("autoCreated", "true");
}
}
-
+ DistribStateManager stateManager = cloudManager.getDistribStateManager();
// TODO need to make this makePath calls efficient and not use zkSolrClient#makePath
for (String shardName : slices.keySet()) {
try {
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
index e2b342a..91634d7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
@@ -348,7 +348,7 @@ public class DeleteReplicaCmd implements Cmd {
ZkNodeProps rep = new ZkNodeProps();
rep.getProperties().put("replica", replicaName);
rep.getProperties().put("collection", replica.getCollection());
- rep.getProperties().put(ZkStateReader.BASE_URL_PROP, replica.getBaseUrl());
+ rep.getProperties().put(ZkStateReader.NODE_NAME_PROP, replica.getNodeName());
log.info("Before slice remove replica {} {}", rep, clusterState);
clusterState = new SliceMutator(ocmh.cloudManager).removeReplica(clusterState, rep);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
index 260d66f..6dee0f2 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
@@ -125,7 +125,7 @@ public class MaintainRoutedAliasCmd extends AliasCmd {
switch (action.actionType) {
case ENSURE_REMOVED:
if (exists) {
- ocmh.tpe.submit(
+ ocmh.overseer.getTaskExecutor().submit(
() -> {
try {
deleteTargetCollection(clusterState, results, aliasName, aliasesManager, action);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index 633e54a..980c47a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -104,10 +104,10 @@ import static org.apache.solr.client.solrj.response.RequestStatusState.FAILED;
import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND;
import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING;
import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
@@ -175,9 +175,9 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
final private LockTree lockTree = new LockTree();
- ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
- new SynchronousQueue<>(),
- new SolrNamedThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
+// ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
+// new SynchronousQueue<>(),
+// new SolrNamedThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
public static final Random RANDOM;
static {
@@ -374,19 +374,18 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
@SuppressWarnings("unchecked")
private AddReplicaCmd.Response processRebalanceLeaders(ClusterState clusterState, ZkNodeProps message, @SuppressWarnings({"rawtypes"})NamedList results)
throws Exception {
- checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
- BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
+ checkRequired(message, COLLECTION_PROP, NODE_NAME_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP, REJOIN_AT_HEAD_PROP);
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
+ params.set(NODE_NAME_PROP, message.getStr(NODE_NAME_PROP));
params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
- params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
- String baseUrl = message.getStr(BASE_URL_PROP);
+ String baseUrl = zkStateReader.getBaseUrlForNodeName(message.getStr(message.getStr(NODE_NAME_PROP)));
ShardRequest sreq = new ShardRequest();
sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
// yes, they must use same admin handler path everywhere...
@@ -479,8 +478,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
ZkStateReader.CORE_NAME_PROP, core,
ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
- ZkStateReader.COLLECTION_PROP, collectionName,
- ZkStateReader.BASE_URL_PROP, replica.getStr(ZkStateReader.BASE_URL_PROP));
+ ZkStateReader.COLLECTION_PROP, collectionName);
overseer.offerStateUpdate(Utils.toJSON(m));
}
@@ -988,12 +986,6 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
@Override
public void close() throws IOException {
this.isClosed = true;
-
- if (tpe != null) {
- if (!tpe.isShutdown()) {
- ExecutorUtil.shutdownAndAwaitTermination(tpe);
- }
- }
latches.forEach(countDownLatch -> countDownLatch.countDown());
latches.clear();
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
index 9916c35..89cd1b1 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
@@ -202,7 +202,7 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
for (Slice backupSlice : backupSlices) {
newSlices.put(backupSlice.getName(),
- new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties(),restoreCollectionName));
+ new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties(),restoreCollectionName, ocmh.zkStateReader));
}
propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, newSlices);
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index 24dac32..a2b9b22 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -517,7 +517,8 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
propMap.put(COLLECTION_PROP, collectionName);
propMap.put(SHARD_ID_PROP, sliceName);
propMap.put(REPLICA_TYPE, replicaPosition.type.name());
- propMap.put("node", subShardNodeName);
+ propMap.put(ZkStateReader.NODE_NAME_PROP, subShardNodeName);
+ //propMap.put("replica", solrCoreName);
propMap.put(CoreAdminParams.NAME, solrCoreName);
// copy over property params:
for (String key : message.keySet()) {
@@ -664,7 +665,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
collection = clusterState.getCollection(collectionName);
for (Map<String,Object> replica : replicas) {
clusterState = checkAndCompleteShardSplit(clusterState, collection, replica.get("name").toString(), replica.get("shard").toString(),
- new Replica(replica.get("name").toString(), replica, replica.get("collection").toString(), replica.get("shard").toString()));
+ new Replica(replica.get("name").toString(), replica, replica.get("collection").toString(), replica.get("shard").toString(), ocmh.zkStateReader));
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
index e129941..b85230a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
@@ -72,7 +72,7 @@ public class ClusterStateMutator {
Map<String, Slice> slices;
if (messageShardsObj instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore)
- slices = Slice.loadAllFromMap(cName, (Map<String, Object>)messageShardsObj);
+ slices = Slice.loadAllFromMap((Replica.NodeNameToBaseUrl) dataProvider.getClusterStateProvider(), cName, (Map<String, Object>)messageShardsObj);
} else {
List<String> shardNames = new ArrayList<>();
@@ -93,7 +93,7 @@ public class ClusterStateMutator {
Map<String, Object> sliceProps = new LinkedHashMap<>(1);
sliceProps.put(Slice.RANGE, ranges == null ? null : ranges.get(i));
- slices.put(sliceName, new Slice(sliceName, null, sliceProps,cName));
+ slices.put(sliceName, new Slice(sliceName, null, sliceProps,cName, (Replica.NodeNameToBaseUrl) dataProvider.getClusterStateProvider()));
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
index 152a1eb..77d0638 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
@@ -84,7 +84,7 @@ public class CollectionMutator {
if (shardParentNode != null) {
sliceProps.put("shard_parent_node", shardParentNode);
}
- collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps, collectionName));
+ collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps, collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider()));
// TODO - fix, no makePath (ensure every path part exists), async, single node
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java
index a2be959..d095257 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java
@@ -37,7 +37,7 @@ public class NodeMutator {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- public ClusterState downNode(ClusterState clusterState, ZkNodeProps message) {
+ public ClusterState downNode(Replica.NodeNameToBaseUrl nodeNameToBaseUrl, ClusterState clusterState, ZkNodeProps message) {
String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
@@ -65,13 +65,13 @@ public class NodeMutator {
log.debug("Update replica state for {} to {}", replica, Replica.State.DOWN);
Map<String, Object> props = replica.shallowCopy();
props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
- Replica newReplica = new Replica(replica.getName(), props, collection, slice.getName());
+ Replica newReplica = new Replica(replica.getName(), props, collection, slice.getName(), nodeNameToBaseUrl);
newReplicas.put(replica.getName(), newReplica);
needToUpdateCollection = true;
}
}
- Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy(),collection);
+ Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy(),collection, nodeNameToBaseUrl);
slicesCopy.put(slice.getName(), newSlice);
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index 73d5c2a..22e2a22 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -77,7 +77,20 @@ public class ReplicaMutator {
Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
replicaProps.put(key, value);
- return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice());
+ return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice(), replica.getBaseUrl());
+ }
+
+ protected Replica setProperty(Replica replica, String key, String value, String key2, String value2) {
+ assert key != null;
+ assert value != null;
+
+ if (StringUtils.equalsIgnoreCase(replica.getStr(key), value))
+ return replica; // already the value we're going to set
+
+ Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
+ replicaProps.put(key, value);
+ replicaProps.put(key2, value2);
+ return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice(), (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
}
protected Replica unsetProperty(Replica replica, String key) {
@@ -86,11 +99,11 @@ public class ReplicaMutator {
if (!replica.containsKey(key)) return replica;
Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
replicaProps.remove(key);
- return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice());
+ return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice(), (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
}
protected Replica setLeader(Replica replica) {
- return setProperty(replica, ZkStateReader.LEADER_PROP, "true");
+ return setProperty(replica, ZkStateReader.LEADER_PROP, "true", ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
}
protected Replica unsetLeader(Replica replica) {
@@ -165,7 +178,7 @@ public class ReplicaMutator {
}
}
}
- Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy(),collectionName);
+ Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy(),collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection,
newSlice);
return clusterState.copyWith(collectionName, newCollection);
@@ -203,7 +216,7 @@ public class ReplicaMutator {
if (curProp == null) return null; // not there anyway, nothing to do.
Slice slice = collection.getSlice(sliceName);
- DocCollection newCollection = SliceMutator.updateReplica(collection,
+ DocCollection newCollection = SliceMutator.updateReplica((Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider(), collection,
slice.getName(), replicaName, unsetProperty(replica, property));
return clusterState.copyWith(collectionName, newCollection);
}
@@ -245,7 +258,7 @@ public class ReplicaMutator {
String coreName = message.getStr(ZkStateReader.CORE_NAME_PROP);
DocCollection collection = clusterState.getCollectionOrNull(collectionName);
- if (!CloudUtil.replicaExists(clusterState, collectionName, sliceName, coreName)) {
+ if (collection.getReplica(coreName) == null) {
log.info("Failed to update state because the replica does not exist, {}", message);
return clusterState;
}
@@ -324,7 +337,7 @@ public class ReplicaMutator {
String shardParent = (String) replicaProps.remove(ZkStateReader.SHARD_PARENT_PROP);
- Replica replica = new Replica(coreName, replicaProps, collectionName, sliceName);
+ Replica replica = new Replica(coreName, replicaProps, collectionName, sliceName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
log.debug("Will update state for replica: {}", replica);
@@ -339,7 +352,7 @@ public class ReplicaMutator {
replicas = slice.getReplicasCopy();
replicas.put(replica.getName(), replica);
- slice = new Slice(sliceName, replicas, sliceProps, collectionName);
+ slice = new Slice(sliceName, replicas, sliceProps, collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection, slice);
log.debug("Collection is now: {}", newCollection);
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index 4d60a81..9d092db 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -74,26 +74,27 @@ public class SliceMutator {
}
Replica replica = new Replica(coreName,
Utils.makeNonNullMap(
- ZkStateReader.BASE_URL_PROP, message.getStr(ZkStateReader.BASE_URL_PROP),
ZkStateReader.STATE_PROP, Replica.State.DOWN,
ZkStateReader.NODE_NAME_PROP, message.getStr(ZkStateReader.NODE_NAME_PROP),
ZkStateReader.NUM_SHARDS_PROP, message.getStr(ZkStateReader.NUM_SHARDS_PROP),
"shards", message.getStr("shards"),
- ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE)), coll, slice);
+ ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE)), coll, slice, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
if (log.isDebugEnabled()) {
log.debug("addReplica(ClusterState, ZkNodeProps) - end");
}
- return clusterState.copyWith(coll, updateReplica(collection, slice, replica.getName(), replica));
+ return clusterState.copyWith(coll, updateReplica((Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider(), collection, slice, replica.getName(), replica));
}
public ClusterState removeReplica(ClusterState clusterState, ZkNodeProps message) {
log.info("removeReplica(ClusterState clusterState={}, ZkNodeProps message={}) - start", clusterState, message);
- final String coreName = message.getStr(ZkStateReader.REPLICA_PROP);
+ String coreName = message.getStr(ZkStateReader.REPLICA_PROP);
+ if (coreName == null) {
+ coreName = message.getStr(ZkStateReader.CORE_NAME_PROP);
+ }
final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
- final String baseUrl = message.getStr(ZkStateReader.BASE_URL_PROP);
DocCollection coll = clusterState.getCollectionOrNull(collection);
if (coll == null) {
@@ -104,10 +105,10 @@ public class SliceMutator {
for (Slice slice : coll.getSlices()) {
Replica replica = slice.getReplica(coreName);
- if (replica != null && (baseUrl == null || baseUrl.equals(replica.getBaseUrl()))) {
+ if (replica != null) {
Map<String, Replica> newReplicas = slice.getReplicasCopy();
newReplicas.remove(coreName);
- slice = new Slice(slice.getName(), newReplicas, slice.getProperties(), collection);
+ slice = new Slice(slice.getName(), newReplicas, slice.getProperties(), collection, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
}
newSlices.put(slice.getName(), slice);
}
@@ -125,14 +126,8 @@ public class SliceMutator {
}
StringBuilder sb = new StringBuilder();
- String baseUrl = message.getStr(ZkStateReader.BASE_URL_PROP);
- String coreName = message.getStr(ZkStateReader.CORE_NAME_PROP);
- sb.append(baseUrl);
- if (baseUrl != null && !baseUrl.endsWith("/")) sb.append("/");
- sb.append(coreName == null ? "" : coreName);
- if (!(sb.substring(sb.length() - 1).equals("/"))) sb.append("/");
- String leaderUrl = sb.length() > 0 ? sb.toString() : null;
+ String coreName = message.getStr(ZkStateReader.CORE_NAME_PROP);
String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
@@ -150,7 +145,6 @@ public class SliceMutator {
return clusterState;
}
- Replica oldLeader = slice.getLeader();
final Map<String, Replica> newReplicas = new LinkedHashMap<>();
for (Replica replica : slice.getReplicas()) {
// TODO: this should only be calculated once and cached somewhere?
@@ -169,12 +163,12 @@ public class SliceMutator {
Map<String, Object> newSliceProps = slice.shallowCopy();
newSliceProps.put(Slice.REPLICAS, newReplicas);
- slice = new Slice(slice.getName(), newReplicas, slice.getProperties(), collectionName);
+ slice = new Slice(slice.getName(), newReplicas, slice.getProperties(), collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
- if (log.isDebugEnabled()) {
- log.debug("setShardLeader(ClusterState, ZkNodeProps) - end");
- }
- return clusterState.copyWith(collectionName, CollectionMutator.updateSlice(collectionName, coll, slice));
+ clusterState = clusterState.copyWith(collectionName, CollectionMutator.updateSlice(collectionName, coll, slice));
+ log.info("setShardLeader {} {}", sliceName, clusterState);
+
+ return clusterState;
}
public ClusterState updateShardState(ClusterState clusterState, ZkNodeProps message) {
@@ -207,7 +201,7 @@ public class SliceMutator {
props.put(ZkStateReader.STATE_PROP, message.getStr(key));
// we need to use epoch time so that it's comparable across Overseer restarts
props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
- Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props, collectionName);
+ Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props, collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
slicesCopy.put(slice.getName(), newSlice);
}
@@ -258,7 +252,7 @@ public class SliceMutator {
Map<String, Object> props = slice.shallowCopy();
props.put("routingRules", routingRules);
- Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props, collectionName);
+ Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props, collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
return clusterState.copyWith( collectionName,
CollectionMutator.updateSlice(collectionName, collection, newSlice));
}
@@ -287,7 +281,7 @@ public class SliceMutator {
routingRules.remove(routeKeyStr); // no rules left
Map<String, Object> props = slice.shallowCopy();
props.put("routingRules", routingRules);
- Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props, collectionName);
+ Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props, collectionName, (Replica.NodeNameToBaseUrl) cloudManager.getClusterStateProvider());
if (log.isDebugEnabled()) {
log.debug("removeRoutingRule(ClusterState, ZkNodeProps) - end");
@@ -301,7 +295,7 @@ public class SliceMutator {
return null;
}
- public static DocCollection updateReplica(DocCollection collection, final String shard, String coreNodeName, final Replica replica) {
+ public static DocCollection updateReplica(Replica.NodeNameToBaseUrl nodeNameToBaseUrl, DocCollection collection, final String shard, String coreNodeName, final Replica replica) {
Slice slice;
if (log.isDebugEnabled()) {
log.debug("updateReplica(DocCollection collection={}, Slice slice={}, String coreNodeName={}, Replica replica={}) - start", collection, shard, coreNodeName, replica);
@@ -317,7 +311,7 @@ public class SliceMutator {
} else {
replicasCopy.put(replica.getName(), replica);
}
- Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties(), collection.getName());
+ Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties(), collection.getName(), nodeNameToBaseUrl);
if (log.isDebugEnabled()) {
log.debug("Old Slice: {}", slice);
log.debug("New Slice: {}", newSlice);
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
index 221ff1f..7573453 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
@@ -53,9 +53,6 @@ public class ZkStateWriter {
protected volatile Stats stats;
-
-
-
AtomicReference<Exception> lastFailedException = new AtomicReference<>();
private final Map<String,Integer> trackVersions = new ConcurrentHashMap<>();
@@ -161,6 +158,27 @@ public class ZkStateWriter {
});
cs.forEachCollection(collection -> {
+ Object removed = collection.getProperties().remove("replicationFactor");
+ if (removed != null) {
+ changed.set(true); // nocommit - only if really changed
+ }
+ removed = collection.getProperties().remove("pullReplicas");
+ if (removed != null) {
+ changed.set(true); // nocommit - only if really changed
+ }
+ removed = collection.getProperties().remove("maxShardsPerNode");
+ if (removed != null) {
+ changed.set(true); // nocommit - only if really changed
+ }
+ removed = collection.getProperties().remove("nrtReplicas");
+ if (removed != null) {
+ changed.set(true); // nocommit - only if really changed
+ }
+ removed = collection.getProperties().remove("tlogReplicas");
+ if (removed != null) {
+ changed.set(true); // nocommit - only if really changed
+ }
+
for (Slice slice : collection) {
Replica leader = slice.getLeader();
if (leader != null && leader.getState() != Replica.State.ACTIVE) {
@@ -168,6 +186,7 @@ public class ZkStateWriter {
leader.getProperties().remove("leader");
changed.set(true);
}
+
for (Replica replica : slice) {
String isLeader = replica.getProperty("leader");
log.info("isleader={} slice={} state={} sliceLeader={}", isLeader, slice.getName(), slice.getState(), slice.getLeader());
@@ -177,7 +196,11 @@ public class ZkStateWriter {
changed.set(true); // nocommit - only if really changed
}
- Object removed = replica.getProperties().remove("numShards");
+ removed = replica.getProperties().remove("numShards");
+ if (removed != null) {
+ changed.set(true); // nocommit - only if really changed
+ }
+ removed = replica.getProperties().remove("base_url");
if (removed != null) {
changed.set(true); // nocommit - only if really changed
}
diff --git a/solr/core/src/java/org/apache/solr/core/BlobRepository.java b/solr/core/src/java/org/apache/solr/core/BlobRepository.java
index 2e5d765..a69ea7a 100644
--- a/solr/core/src/java/org/apache/solr/core/BlobRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/BlobRepository.java
@@ -54,7 +54,6 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
import static org.apache.solr.common.SolrException.ErrorCode.SERVICE_UNAVAILABLE;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
/**
* The purpose of this class is to store the Jars loaded in memory and to keep only one copy of the Jar in a single node.
@@ -206,7 +205,7 @@ public class BlobRepository {
*/
ByteBuffer fetchBlob(String key) {
Replica replica = getSystemCollReplica();
- String url = replica.getStr(BASE_URL_PROP) + "/" + CollectionAdminParams.SYSTEM_COLL + "/blob/" + key + "?wt=filestream";
+ String url = replica.getBaseUrl() + "/" + CollectionAdminParams.SYSTEM_COLL + "/blob/" + key + "?wt=filestream";
return fetchFromUrl(key, url);
}
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index b857a7d..379a2eb 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -1847,8 +1847,7 @@ public class CoreContainer implements Closeable {
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
ZkStateReader.CORE_NAME_PROP, core,
ZkStateReader.NODE_NAME_PROP, nodeName,
- ZkStateReader.COLLECTION_PROP, collectionName,
- ZkStateReader.BASE_URL_PROP, baseUrl);
+ ZkStateReader.COLLECTION_PROP, collectionName);
getZkController().getOverseer().offerStateUpdate(Utils.toJSON(m));
}
diff --git a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
index 693f9b8..116f2bc 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
@@ -139,7 +139,7 @@ public class BackupManager {
try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) {
byte[] arr = new byte[(int) is.length()]; // probably ok since the json file should be small.
is.readBytes(arr, 0, (int) is.length());
- ClusterState c_state = ClusterState.createFromJson(-1, arr, Collections.emptySet());
+ ClusterState c_state = ClusterState.createFromJson(zkStateReader, -1, arr, Collections.emptySet());
return c_state.getCollection(collectionName);
}
}
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java b/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java
index 30c00c5..447c11c 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java
@@ -64,11 +64,13 @@ public class ColStatus {
public static final String RAW_SIZE_DETAILS_PROP = SegmentsInfoRequestHandler.RAW_SIZE_DETAILS_PARAM;
public static final String RAW_SIZE_SAMPLING_PERCENT_PROP = SegmentsInfoRequestHandler.RAW_SIZE_SAMPLING_PERCENT_PARAM;
public static final String SEGMENTS_PROP = "segments";
+ private final ZkStateReader zkStateReader;
- public ColStatus(SolrClientCache solrClientCache, ClusterState clusterState, ZkNodeProps props) {
+ public ColStatus(SolrClientCache solrClientCache, ZkStateReader zkStateReader, ClusterState clusterState, ZkNodeProps props) {
this.props = props;
this.solrClientCache = solrClientCache;
this.clusterState = clusterState;
+ this.zkStateReader = zkStateReader;
}
@SuppressWarnings({"unchecked"})
@@ -168,7 +170,7 @@ public class ColStatus {
if (!leader.isActive(clusterState.getLiveNodes())) {
continue;
}
- String url = ZkCoreNodeProps.getCoreUrl(leader);
+ String url = leader.getCoreUrl();
SolrClient client = solrClientCache.getHttpSolrClient(url);
try {
ModifiableSolrParams params = new ModifiableSolrParams();
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index e97fa99..f344d4d 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -532,7 +532,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
if (props.containsKey(CoreAdminParams.NAME) && !props.containsKey(COLLECTION_PROP)) {
props.put(COLLECTION_PROP, props.get(CoreAdminParams.NAME));
}
- new ColStatus(h.coreContainer.getSolrClientCache(),
+ new ColStatus(h.coreContainer.getSolrClientCache(), h.coreContainer.getZkController().getZkStateReader(),
h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
.getColStatus(rsp.getValues());
return null;
@@ -590,7 +590,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
Replica leaderProps = docCollection.getLeader(shard);
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
- try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
+ try (HttpSolrClient client = new Builder(leaderProps.getBaseUrl())
.withConnectionTimeout(15000)
.withSocketTimeout(60000)
.markInternalRequest()
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
index a235fc4..9e0e90b0 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
@@ -404,7 +404,7 @@ class RebalanceLeaders {
propMap.put(SHARD_ID_PROP, slice.getName());
propMap.put(QUEUE_OPERATION, REBALANCELEADERS.toLower());
propMap.put(CORE_NAME_PROP, core);
- propMap.put(ZkStateReader.BASE_URL_PROP, replica.getProperties().get(ZkStateReader.BASE_URL_PROP));
+ propMap.put(ZkStateReader.NODE_NAME_PROP, replica.getNodeName());
propMap.put(REJOIN_AT_HEAD_PROP, Boolean.toString(rejoinAtHead)); // Get ourselves to be first in line.
propMap.put(ELECTION_NODE_PROP, electionNode);
String asyncId = REBALANCELEADERS.toLower() + "_" + core + "_" + Math.abs(System.nanoTime());
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java b/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java
index 7b9c562..8b23eca 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/RequestSyncShardOp.java
@@ -60,11 +60,10 @@ class RequestSyncShardOp implements CoreAdminHandler.CoreAdminOp {
syncStrategy = new SyncStrategy(core.getCoreContainer());
Map<String, Object> props = new HashMap<>();
- props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cname);
props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName());
- Replica replica = new Replica(cname, props, null, null);
+ Replica replica = new Replica(cname, props, null, null, zkController.zkStateReader);
boolean success = syncStrategy.sync(zkController, core, replica, true).isSuccess();
// solrcloud_debug
diff --git a/solr/core/src/java/org/apache/solr/handler/component/CloudReplicaSource.java b/solr/core/src/java/org/apache/solr/handler/component/CloudReplicaSource.java
index f9d759d..3509aee 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/CloudReplicaSource.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/CloudReplicaSource.java
@@ -136,7 +136,7 @@ class CloudReplicaSource implements ReplicaSource {
.filter(replica -> !builder.onlyNrt || (replica.getType() == Replica.Type.NRT || (replica.getType() == Replica.Type.TLOG && isShardLeader.test(replica))))
.collect(Collectors.toList());
builder.replicaListTransformer.transform(list);
- List<String> collect = list.stream().map(Replica::getCoreUrl).collect(Collectors.toList());
+ List<String> collect = list.stream().map(replica -> replica.getCoreUrl()).collect(Collectors.toList());
builder.hostChecker.checkWhitelist(clusterState, shardsParam, collect);
return collect;
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
index d89d02b..4a01c29 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
@@ -320,7 +320,7 @@ public class HttpShardHandler extends ShardHandler {
if (canShortCircuit(rb.slices, onlyNrt, params, cloudDescriptor)) {
rb.isDistrib = false;
- rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
+ rb.shortCircuitedURL = Replica.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
return;
// We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
index 740edd5..fbd9c20 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
@@ -100,7 +100,7 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
protected final Random r = new Random();
- private RequestReplicaListTransformerGenerator requestReplicaListTransformerGenerator = new RequestReplicaListTransformerGenerator();
+ private volatile RequestReplicaListTransformerGenerator requestReplicaListTransformerGenerator;
// URL scheme to be used in distributed search.
static final String INIT_URL_SCHEME = "urlScheme";
diff --git a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
index 269e319..76f89c1 100644
--- a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
+++ b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
@@ -111,7 +111,6 @@ import org.eclipse.jetty.http.HttpVersion;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
@@ -1132,13 +1131,13 @@ public class HttpSolrCall {
// if it's by core name, make sure they match
continue;
}
- if (replica.getStr(BASE_URL_PROP).equals(cores.getZkController().getBaseUrl())) {
+ if (replica.getBaseUrl().equals(cores.getZkController().getBaseUrl())) {
// don't count a local core
continue;
}
if (origCorename != null) {
- coreUrl = replica.getStr(BASE_URL_PROP) + "/" + origCorename;
+ coreUrl = replica.getBaseUrl() + "/" + origCorename;
} else {
coreUrl = replica.getCoreUrl();
if (coreUrl.endsWith("/")) {
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
index e21ceed..571dd61 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
@@ -54,6 +54,7 @@ public class SolrCmdDistributor implements Closeable {
private static final int MAX_RETRIES_ON_FORWARD = 1;
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final ConnectionManager.IsClosed isClosed;
+ private final ZkStateReader zkStateReader;
private volatile boolean finished = false; // see finish()
@@ -64,14 +65,16 @@ public class SolrCmdDistributor implements Closeable {
private final Http2SolrClient solrClient;
private volatile boolean closed;
- public SolrCmdDistributor(UpdateShardHandler updateShardHandler) {
+ public SolrCmdDistributor(ZkStateReader zkStateReader, UpdateShardHandler updateShardHandler) {
assert ObjectReleaseTracker.track(this);
+ this.zkStateReader = zkStateReader;
this.solrClient = new Http2SolrClient.Builder().withHttpClient(updateShardHandler.getTheSharedHttpClient()).idleTimeout((int) TimeUnit.MINUTES.toMillis(5)).build();
isClosed = null;
}
- public SolrCmdDistributor(UpdateShardHandler updateShardHandler, ConnectionManager.IsClosed isClosed) {
+ public SolrCmdDistributor(ZkStateReader zkStateReader, UpdateShardHandler updateShardHandler, ConnectionManager.IsClosed isClosed) {
assert ObjectReleaseTracker.track(this);
+ this.zkStateReader = zkStateReader;
this.solrClient = new Http2SolrClient.Builder().withHttpClient(updateShardHandler.getTheSharedHttpClient()).idleTimeout((int) TimeUnit.MINUTES.toMillis(5)).build();
this.isClosed = isClosed;
}
@@ -396,21 +399,23 @@ public class SolrCmdDistributor implements Closeable {
}
public static class StdNode extends Node {
+ private final ZkStateReader zkStateReader;
protected Replica nodeProps;
protected String collection;
protected String shardId;
private final boolean retry;
private final int maxRetries;
- public StdNode(Replica nodeProps) {
- this(nodeProps, null, null, 0);
+ public StdNode(ZkStateReader zkStateReader, Replica nodeProps) {
+ this(zkStateReader, nodeProps, null, null, 0);
}
- public StdNode(Replica nodeProps, String collection, String shardId) {
- this(nodeProps, collection, shardId, 0);
+ public StdNode(ZkStateReader zkStateReader, Replica nodeProps, String collection, String shardId) {
+ this(zkStateReader, nodeProps, collection, shardId, 0);
}
- public StdNode(Replica nodeProps, String collection, String shardId, int maxRetries) {
+ public StdNode(ZkStateReader zkStateReader, Replica nodeProps, String collection, String shardId, int maxRetries) {
+ this.zkStateReader = zkStateReader;
this.nodeProps = nodeProps;
this.collection = collection;
this.shardId = shardId;
@@ -506,9 +511,8 @@ public class SolrCmdDistributor implements Closeable {
private ZkStateReader zkStateReader;
- public ForwardNode(Replica nodeProps, ZkStateReader zkStateReader, String collection, String shardId) {
- super(nodeProps, collection, shardId);
- this.zkStateReader = zkStateReader;
+ public ForwardNode(ZkStateReader zkStateReader, Replica nodeProps, String collection, String shardId) {
+ super(zkStateReader, nodeProps, collection, shardId);
this.collection = collection;
this.shardId = shardId;
}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
index abe4b26..3bf4ff1 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
@@ -115,7 +115,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
desc = req.getCore().getCoreDescriptor();
cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
zkController = cc.getZkController();
- cmdDistrib = new SolrCmdDistributor(cc.getUpdateShardHandler(), new IsCCClosed(req));
+ cmdDistrib = new SolrCmdDistributor(zkController.getZkStateReader(), cc.getUpdateShardHandler(), new IsCCClosed(req));
try {
cloneRequiredOnLeader = isCloneRequiredOnLeader(next);
collection = cloudDesc.getCollectionName();
@@ -243,7 +243,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (log.isDebugEnabled()) log.debug("processCommit - send commit to replicas nodes={}",
useNodes);
- params.set(DISTRIB_FROM, ZkCoreNodeProps
+ params.set(DISTRIB_FROM, Replica
.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
List<SolrCmdDistributor.Node> finalUseNodes = useNodes;
@@ -265,7 +265,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (useNodes != null && useNodes.size() > 0) {
if (log.isDebugEnabled()) log.debug("processCommit - send commit to leaders nodes={}",
useNodes);
- params.set(DISTRIB_FROM, ZkCoreNodeProps
+ params.set(DISTRIB_FROM, Replica
.getCoreUrl(zkController.getBaseUrl(),
req.getCore().getName()));
@@ -310,7 +310,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (subShardLeaders != null && !subShardLeaders.isEmpty()) {
ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
params.set(DISTRIB_FROM_PARENT, cloudDesc.getShardId());
cmdDistrib.distribAdd(cmd, subShardLeaders, params, true);
}
@@ -319,7 +319,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (nodesByRoutingRules != null && !nodesByRoutingRules.isEmpty()) {
ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
params.set(DISTRIB_FROM_COLLECTION, collection);
params.set(DISTRIB_FROM_SHARD, cloudDesc.getShardId());
@@ -339,7 +339,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (nodes != null) {
ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(DISTRIB_UPDATE_PARAM, (isLeader || isSubShardLeader ? DistribPhase.FROMLEADER.toString() : DistribPhase.TOLEADER.toString()));
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(zkController.getBaseUrl(), req.getCore().getName()));
if (req.getParams().get(UpdateRequest.MIN_REPFACT) != null) {
// TODO: Kept for rolling upgrades only. Should be removed in Solr 9
@@ -406,7 +406,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (subShardLeaders != null && !subShardLeaders.isEmpty()) {
ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(
zkController.getBaseUrl(), req.getCore().getName()));
params.set(DISTRIB_FROM_PARENT, cloudDesc.getShardId());
cmdDistrib.distribDelete(cmd, subShardLeaders, params, true, null, null);
@@ -416,7 +416,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (nodesByRoutingRules != null && !nodesByRoutingRules.isEmpty()) {
ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(
zkController.getBaseUrl(), req.getCore().getName()));
params.set(DISTRIB_FROM_COLLECTION, collection);
params.set(DISTRIB_FROM_SHARD, cloudDesc.getShardId());
@@ -429,7 +429,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
params.set(DISTRIB_UPDATE_PARAM,
(isLeader || isSubShardLeader ? DistribPhase.FROMLEADER.toString()
: DistribPhase.TOLEADER.toString()));
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(
zkController.getBaseUrl(), req.getCore().getName()));
if (req.getParams().get(UpdateRequest.MIN_REPFACT) != null) {
@@ -465,7 +465,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
ModifiableSolrParams outParams = new ModifiableSolrParams(filterParams(req.getParams()));
outParams.set(DISTRIB_UPDATE_PARAM, DistribPhase.TOLEADER.toString());
- outParams.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
+ outParams.set(DISTRIB_FROM, Replica.getCoreUrl(
zkController.getBaseUrl(), req.getCore().getName()));
SolrParams params = req.getParams();
@@ -496,7 +496,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
// don't forward to ourself
leaderForAnyShard = true;
} else {
- leaders.add(new SolrCmdDistributor.ForwardNode(leader, zkController.getZkStateReader(), collection, sliceName));
+ leaders.add(new SolrCmdDistributor.ForwardNode(zkController.getZkStateReader(), leader, collection, sliceName));
}
}
@@ -543,7 +543,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(CommonParams.VERSION_FIELD, Long.toString(cmd.getVersion()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(
zkController.getBaseUrl(), req.getCore().getName()));
boolean subShardLeader = false;
@@ -559,7 +559,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (replicaProps != null) {
final List<SolrCmdDistributor.Node> myReplicas = new ArrayList<>(replicaProps.size());
for (Replica replicaProp : replicaProps) {
- myReplicas.add(new SolrCmdDistributor.StdNode(replicaProp, collection, myShardId));
+ myReplicas.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), replicaProp, collection, myShardId));
}
cmdDistrib.distribDelete(cmd, myReplicas, params, false, rollupReplicationTracker, leaderReplicationTracker);
}
@@ -577,7 +577,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (nodesByRoutingRules != null && !nodesByRoutingRules.isEmpty()) {
params = new ModifiableSolrParams(filterParams(req.getParams()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
- params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
+ params.set(DISTRIB_FROM, Replica.getCoreUrl(
zkController.getBaseUrl(), req.getCore().getName()));
params.set(DISTRIB_FROM_COLLECTION, collection);
params.set(DISTRIB_FROM_SHARD, cloudDesc.getShardId());
@@ -607,7 +607,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (replicaProps != null) {
nodes = new ArrayList<>(replicaProps.size());
for (Replica props : replicaProps) {
- nodes.add(new SolrCmdDistributor.StdNode(props, collection, shardId));
+ nodes.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), props, collection, shardId));
}
}
} catch (InterruptedException e) {
@@ -770,7 +770,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
} else if (!clusterState.getLiveNodes().contains(replica.getNodeName()) || replica.getState() == Replica.State.DOWN) {
skippedCoreNodeNames.add(replica.getName());
} else {
- nodes.add(new SolrCmdDistributor.StdNode(replica, collection, shardId, maxRetriesToFollowers));
+ nodes.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), replica, collection, shardId, maxRetriesToFollowers));
}
}
return nodes;
@@ -779,7 +779,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
// I need to forward on to the leader...
forwardToLeader = true;
return Collections.singletonList(
- new SolrCmdDistributor.ForwardNode(leaderReplica, zkController.getZkStateReader(), collection, shardId));
+ new SolrCmdDistributor.ForwardNode(zkController.getZkStateReader(), leaderReplica, collection, shardId));
}
} catch (InterruptedException e) {
@@ -835,7 +835,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
if (replica != null) {
Replica nodeProps = replica;
nodeProps.getProperties().put(ZkStateReader.CORE_NAME_PROP, replica.getName());
- urls.add(new SolrCmdDistributor.StdNode(nodeProps, collection, replicas.getName()));
+ urls.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), nodeProps, collection, replicas.getName()));
}
continue;
}
@@ -848,7 +848,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
Replica nodeProps = entry.getValue();
nodeProps.getProperties().put(ZkStateReader.CORE_NAME_PROP, entry.getValue().getName());
if (clusterState.liveNodesContain(nodeProps.getNodeName())) {
- urls.add(new SolrCmdDistributor.StdNode(nodeProps, collection, replicas.getName()));
+ urls.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), nodeProps, collection, replicas.getName()));
}
}
}
@@ -930,7 +930,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
|| replica.getState() == Replica.State.DOWN) {
skippedCoreNodeNames.add(replica.getName());
} else {
- nodes.add(new SolrCmdDistributor.StdNode(replica, collection, shardId));
+ nodes.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), replica, collection, shardId));
}
}
return nodes;
@@ -953,7 +953,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
// slice leader can be null because node/shard is created zk before leader election
if (sliceLeader != null && clusterState.liveNodesContain(sliceLeader.getNodeName())) {
if (nodes == null) nodes = new ArrayList<>();
- nodes.add(new SolrCmdDistributor.StdNode(sliceLeader, coll.getName(), aslice.getName()));
+ nodes.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), sliceLeader, coll.getName(), aslice.getName()));
}
}
}
@@ -981,7 +981,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
final Slice[] activeSlices = docCollection.getActiveSlicesArr();
Slice any = activeSlices[0];
if (nodes == null) nodes = new ArrayList<>();
- nodes.add(new SolrCmdDistributor.StdNode(any.getLeader()));
+ nodes.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), any.getLeader()));
}
}
return nodes;
@@ -1005,7 +1005,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
}
Replica targetLeader = targetColl.getLeader(activeSlices.iterator().next().getName());
nodes = new ArrayList<>(1);
- nodes.add(new SolrCmdDistributor.StdNode(targetLeader));
+ nodes.add(new SolrCmdDistributor.StdNode(zkController.getZkStateReader(), targetLeader));
break;
}
}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/RoutedAliasUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/RoutedAliasUpdateProcessor.java
index e8e9978..1b30e6f 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/RoutedAliasUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/RoutedAliasUpdateProcessor.java
@@ -146,7 +146,7 @@ public class RoutedAliasUpdateProcessor extends UpdateRequestProcessor {
this.thisCollection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
this.req = req;
this.zkController = cc.getZkController();
- this.cmdDistrib = new SolrCmdDistributor(cc.getUpdateShardHandler());
+ this.cmdDistrib = new SolrCmdDistributor(zkController.zkStateReader, cc.getUpdateShardHandler());
@@ -163,7 +163,7 @@ public class RoutedAliasUpdateProcessor extends UpdateRequestProcessor {
outParams.set(DISTRIB_UPDATE_PARAM, DistribPhase.NONE.toString());
// Signal this is a distributed search from this URP (see #wrap())
outParams.set(ALIAS_DISTRIB_UPDATE_PARAM, DistribPhase.TOLEADER.toString());
- outParams.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), core.getName()));
+ outParams.set(DISTRIB_FROM, Replica.getCoreUrl(zkController.getBaseUrl(), core.getName()));
outParamsToLeader = outParams;
}
@@ -267,7 +267,7 @@ public class RoutedAliasUpdateProcessor extends UpdateRequestProcessor {
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
"No 'leader' replica available for shard " + slice.getName() + " of collection " + collection);
}
- return new SolrCmdDistributor.ForwardNode(leader, zkController.getZkStateReader(),
+ return new SolrCmdDistributor.ForwardNode(zkController.zkStateReader, leader,
collection, slice.getName());
}
diff --git a/solr/core/src/java/org/apache/solr/util/ExportTool.java b/solr/core/src/java/org/apache/solr/util/ExportTool.java
index a08e1cf..979f466 100644
--- a/solr/core/src/java/org/apache/solr/util/ExportTool.java
+++ b/solr/core/src/java/org/apache/solr/util/ExportTool.java
@@ -67,6 +67,7 @@ import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.CursorMarkParams;
import org.apache.solr.common.params.MapSolrParams;
@@ -184,7 +185,7 @@ public class ExportTool extends SolrCLI.ToolBase {
@Override
protected void runImpl(CommandLine cli) throws Exception {
String url = cli.getOptionValue("url");
- Info info = new MultiThreadedRunner(url);
+ Info info = new MultiThreadedRunner(null, url);
info.query = cli.getOptionValue("query", "*:*");
info.setOutFormat(cli.getOptionValue("out"), cli.getOptionValue("format"));
info.fields = cli.getOptionValue("fields");
@@ -378,6 +379,7 @@ public class ExportTool extends SolrCLI.ToolBase {
}
static class MultiThreadedRunner extends Info {
+ private final ZkStateReader zkStateReader;
ExecutorService producerThreadpool, consumerThreadpool;
ArrayBlockingQueue<SolrDocument> queue = new ArrayBlockingQueue(1000);
SolrDocument EOFDOC = new SolrDocument();
@@ -386,8 +388,9 @@ public class ExportTool extends SolrCLI.ToolBase {
private final long startTime ;
@SuppressForbidden(reason = "Need to print out time")
- public MultiThreadedRunner(String url) {
+ public MultiThreadedRunner(ZkStateReader zkStateReader, String url) {
super(url);
+ this.zkStateReader = zkStateReader;
startTime= System.currentTimeMillis();
}
@@ -458,7 +461,7 @@ public class ExportTool extends SolrCLI.ToolBase {
Slice slice = entry.getValue();
Replica replica = slice.getLeader();
if (replica == null) replica = slice.getReplicas().iterator().next();// get a random replica
- CoreHandler coreHandler = new CoreHandler(replica);
+ CoreHandler coreHandler = new CoreHandler(replica, zkStateReader);
corehandlers.put(replica.getName(), coreHandler);
}
}
@@ -492,10 +495,12 @@ public class ExportTool extends SolrCLI.ToolBase {
class CoreHandler {
final Replica replica;
+ private final ZkStateReader zkStateReader;
long expectedDocs;
AtomicLong receivedDocs = new AtomicLong();
- CoreHandler(Replica replica) {
+ CoreHandler(Replica replica, ZkStateReader zkStateReader) {
+ this.zkStateReader = zkStateReader;
this.replica = replica;
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java
index 87629d2..d059dd5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java
@@ -137,7 +137,7 @@ public class ClusterStateMockUtil {
case "s":
replicas = new HashMap<>();
if(collName == null) collName = "collection" + (collectionStates.size() + 1);
- slice = new Slice(sliceName = "slice" + (slices.size() + 1), replicas, null, collName);
+ slice = new Slice(sliceName = "slice" + (slices.size() + 1), replicas, null, collName, nodeName -> "http://" + nodeName);
slices.put(slice.getName(), slice);
// hack alert: the DocCollection constructor copies over active slices to its active slice map in the constructor
@@ -171,11 +171,11 @@ public class ClusterStateMockUtil {
if (!leaderFound && !m.group(1).equals("p")) {
replicaPropMap.put(Slice.LEADER, "true");
}
- replica = new Replica(replicaName, replicaPropMap, collName, sliceName);
+ replica = new Replica(replicaName, replicaPropMap, collName, sliceName, nodeName -> "http://" + nodeName);
replicas.put(replica.getName(), replica);
// hack alert: re-create slice with existing data and new replicas map so that it updates its internal leader attribute
- slice = new Slice(slice.getName(), replicas, null, collName);
+ slice = new Slice(slice.getName(), replicas, null, collName, nodeName -> "http://" + nodeName);
slices.put(slice.getName(), slice);
// we don't need to update doc collection again because we aren't adding a new slice or changing its state
break;
@@ -232,7 +232,6 @@ public class ClusterStateMockUtil {
Map<String,Object> replicaPropMap = new HashMap<>();
replicaPropMap.put(ZkStateReader.NODE_NAME_PROP, "baseUrl" + node + "_");
- replicaPropMap.put(ZkStateReader.BASE_URL_PROP, "http://baseUrl" + node);
replicaPropMap.put(ZkStateReader.STATE_PROP, state.toString());
replicaPropMap.put(ZkStateReader.CORE_NAME_PROP, sliceName + "_" + replicaName);
replicaPropMap.put(ZkStateReader.REPLICA_TYPE, replicaType.name());
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java
index 2177672..9d9db69 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java
@@ -48,11 +48,11 @@ public class ClusterStateTest extends SolrTestCaseJ4 {
props.put("prop1", "value");
props.put("prop2", "value2");
- Replica replica = new Replica("node1", props, "collection1", "shard1");
+ Replica replica = new Replica("node1", props, "collection1", "shard1", nodeName -> "http://" + nodeName);
sliceToProps.put("node1", replica);
- Slice slice = new Slice("shard1", sliceToProps, null, "collection1");
+ Slice slice = new Slice("shard1", sliceToProps, null, "collection1", nodeName -> "http://" + nodeName);
slices.put("shard1", slice);
- Slice slice2 = new Slice("shard2", sliceToProps, null, "collection1");
+ Slice slice2 = new Slice("shard2", sliceToProps, null, "collection1", nodeName -> "http://" + nodeName);
slices.put("shard2", slice2);
collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));
collectionStates.put("collection2", new DocCollection("collection2", slices, null, DocRouter.DEFAULT));
@@ -60,7 +60,7 @@ public class ClusterStateTest extends SolrTestCaseJ4 {
ClusterState clusterState = new ClusterState(liveNodes, collectionStates);
byte[] bytes = Utils.toJSON(clusterState);
// System.out.println("#################### " + new String(bytes));
- ClusterState loadedClusterState = ClusterState.createFromJson(-1, bytes, liveNodes);
+ ClusterState loadedClusterState = ClusterState.createFromJson(nodeName -> "http://" + nodeName ,-1, bytes, liveNodes);
assertEquals("Provided liveNodes not used properly", 2, loadedClusterState
.getLiveNodes().size());
@@ -68,13 +68,13 @@ public class ClusterStateTest extends SolrTestCaseJ4 {
assertEquals("Properties not copied properly", replica.getStr("prop1"), loadedClusterState.getCollection("collection1").getSlice("shard1").getReplicasMap().get("node1").getStr("prop1"));
assertEquals("Properties not copied properly", replica.getStr("prop2"), loadedClusterState.getCollection("collection1").getSlice("shard1").getReplicasMap().get("node1").getStr("prop2"));
- loadedClusterState = ClusterState.createFromJson(-1, new byte[0], liveNodes);
+ loadedClusterState = ClusterState.createFromJson(nodeName -> "http://" + nodeName, -1, new byte[0], liveNodes);
assertEquals("Provided liveNodes not used properly", 2, loadedClusterState
.getLiveNodes().size());
assertEquals("Should not have collections", 0, loadedClusterState.getCollectionsMap().size());
- loadedClusterState = ClusterState.createFromJson(-1, (byte[])null, liveNodes);
+ loadedClusterState = ClusterState.createFromJson(nodeName -> "http://" + nodeName, -1, (byte[])null, liveNodes);
assertEquals("Provided liveNodes not used properly", 2, loadedClusterState
.getLiveNodes().size());
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
index b3711bd..0b5e640 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
@@ -104,8 +104,8 @@ public class ClusterStateUpdateTest extends SolrCloudTestCase {
assertEquals(host + ":" +cluster.getJettySolrRunner(0).getLocalPort()+"_solr", zkProps.getStr(ZkStateReader.NODE_NAME_PROP));
- assertTrue(zkProps.getStr(ZkStateReader.BASE_URL_PROP).contains("http://" + host + ":"+cluster.getJettySolrRunner(0).getLocalPort()+"/solr")
- || zkProps.getStr(ZkStateReader.BASE_URL_PROP).contains("https://" + host + ":"+cluster.getJettySolrRunner(0).getLocalPort()+"/solr") );
+ assertTrue(zkProps.getCoreUrl().contains("http://" + host + ":"+cluster.getJettySolrRunner(0).getLocalPort()+"/solr")
+ || zkProps.getBaseUrl().contains("https://" + host + ":"+cluster.getJettySolrRunner(0).getLocalPort()+"/solr") );
// assert there are 3 live nodes
Set<String> liveNodes = clusterState2.getLiveNodes();
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index dc9f6a7..54bda46 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -290,8 +290,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
// Add a shard to the implicit collection
response = CollectionAdminRequest.createShard(collectionName, "shardC").process(cluster.getSolrClient());
- cluster.waitForActiveCollection(collectionName, 3, 9);
-
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 6dd40e8..bbbcdc8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -241,8 +241,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
ZkStateReader.CORE_NAME_PROP, replica.getName(),
ZkStateReader.NODE_NAME_PROP, replica.getNodeName(),
- ZkStateReader.COLLECTION_PROP, collectionName,
- ZkStateReader.BASE_URL_PROP, replica.getBaseUrl());
+ ZkStateReader.COLLECTION_PROP, collectionName);
cluster.getOpenOverseer().getStateUpdateQueue().offer(Utils.toJSON(m));
@@ -303,8 +302,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
ZkStateReader.CORE_NAME_PROP, replica1.getName(),
ZkStateReader.NODE_NAME_PROP, replica1.getNodeName(),
- ZkStateReader.COLLECTION_PROP, collectionName,
- ZkStateReader.BASE_URL_PROP, replica1.getBaseUrl());
+ ZkStateReader.COLLECTION_PROP, collectionName);
cluster.getOpenOverseer().getStateUpdateQueue().offer(Utils.toJSON(m));
boolean replicaDeleted = false;
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java b/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
index 292b02f..5db1454 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
@@ -359,10 +359,9 @@ public class DistributedVersionInfoTest extends SolrCloudTestCase {
}
protected boolean reloadCollection(Replica replica, String testCollectionName) throws Exception {
- ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
String coreName = replica.getName();
boolean reloadedOk = false;
- try (Http2SolrClient client = SolrTestCaseJ4.getHttpSolrClient(coreProps.getBaseUrl())) {
+ try (Http2SolrClient client = SolrTestCaseJ4.getHttpSolrClient(replica.getBaseUrl())) {
CoreAdminResponse statusResp = CoreAdminRequest.getStatus(coreName, client);
long leaderCoreStartTime = statusResp.getStartTime(coreName).getTime();
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 8ee5569..2ff41f6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -577,8 +577,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
}
protected Http2SolrClient getHttpSolrClient(Replica replica, String coll) throws Exception {
- ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
- String url = zkProps.getBaseUrl() + "/" + coll;
+ String url = replica.getBaseUrl() + "/" + coll;
return getHttpSolrClient(url);
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
index 29acefe..d8aa516 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
@@ -91,7 +91,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
zkClient.mkdir("/collections/collection2/election");
}
- class TestLeaderElectionContext extends ShardLeaderElectionContextBase {
+ static class TestLeaderElectionContext extends ShardLeaderElectionContextBase {
private long runLeaderDelay = 0;
public TestLeaderElectionContext(LeaderElector leaderElector,
@@ -155,10 +155,10 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
this.runLeaderDelay = runLeaderDelay;
Map<String,Object> props = new HashMap<>();
- props.put(ZkStateReader.BASE_URL_PROP, Integer.toString(nodeNumber));
+ props.put(ZkStateReader.NODE_NAME_PROP, Integer.toString(nodeNumber));
props.put(ZkStateReader.CORE_NAME_PROP, "");
- replica = new Replica("", props, "", shard);
+ replica = new Replica("", props, "", shard, zkStateReader);
this.es = es;
if (this.es == null) {
@@ -174,9 +174,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
private void setupOnConnect() throws InterruptedException, KeeperException,
IOException {
assertNotNull(es);
- TestLeaderElectionContext context = new TestLeaderElectionContext(
- es.elector, shard, "collection1", nodeName,
- replica, es.zkController, runLeaderDelay);
+ TestLeaderElectionContext context = new TestLeaderElectionContext(es.elector, shard, "collection1", nodeName, replica, es.zkController, runLeaderDelay);
es.elector.setup(context);
// nocommit - we have to get the seq another way, now returns if become leader first try
//seq = es.elector.joinElection(context, false);
@@ -268,7 +266,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(
ZkNodeProps.load(data));
// nocommit
- Replica replica = new Replica("", leaderProps.getNodeProps().getProperties(), collection, slice);
+ Replica replica = new Replica("", leaderProps.getNodeProps().getProperties(), collection, slice, zkStateReader);
return replica.getCoreUrl();
} catch (NoNodeException | SessionExpiredException e) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java
index 7eef9f5..f569892 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java
@@ -295,8 +295,7 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
}
protected Http2SolrClient getHttpSolrClient(Replica replica, String coll) throws Exception {
- ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
- String url = zkProps.getBaseUrl() + "/" + coll;
+ String url = replica.getBaseUrl() + "/" + coll;
return SolrTestCaseJ4.getHttpSolrClient(url);
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java b/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java
index 103f027..57350c0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java
@@ -31,13 +31,13 @@ import org.junit.Test;
public class NodeMutatorTest extends SolrTestCaseJ4Test {
private static final String NODE3 = "baseUrl3_";
- private static final String NODE3_URL = "http://baseUrl3";
+ private static final String NODE3_URL = "http://baseUrl3_";
private static final String NODE2 = "baseUrl2_";
- private static final String NODE2_URL = "http://baseUrl2";
+ private static final String NODE2_URL = "http://baseUrl2_";
private static final String NODE1 = "baseUrl1_";
- private static final String NODE1_URL = "http://baseUrl1";
+ private static final String NODE1_URL = "http://baseUrl1_";
@Test
public void downNodeReportsAllImpactedCollectionsAndNothingElse() throws IOException {
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index 6dd6280..f8cd2a7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -217,8 +217,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.COLLECTION_PROP, collection,
ZkStateReader.SHARD_ID_PROP, shard,
- ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
- ZkStateReader.BASE_URL_PROP, "http://" + nodeName + "/solr/");
+ ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards));
ZkDistributedQueue q = overseer.getStateUpdateQueue();
q.offer(Utils.toJSON(m));
}
@@ -237,14 +236,13 @@ public class OverseerTest extends SolrTestCaseJ4 {
zkClient.mkdir("/collections/" + collection + "/leader_elect/"
+ shardId + "/election");
} catch (NodeExistsException nee) {}
- ZkNodeProps props = new ZkNodeProps(ZkStateReader.BASE_URL_PROP,
- "http://" + nodeName + "/solr/", ZkStateReader.NODE_NAME_PROP,
+ ZkNodeProps props = new ZkNodeProps(ZkStateReader.NODE_NAME_PROP,
nodeName, ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.SHARD_ID_PROP, shardId,
ZkStateReader.COLLECTION_PROP, collection);
LeaderElector elector = new LeaderElector(overseer.getZkController(), new ZkController.ContextKey("overseer",
"overseer"), new ConcurrentHashMap<>());
- Replica replica = new Replica(coreName, props.getProperties(), collection, shardId);
+ Replica replica = new Replica(coreName, props.getProperties(), collection, shardId, zkStateReader);
ShardLeaderElectionContextBase ctx = new ShardLeaderElectionContextBase(
nodeName + "_" + coreName, shardId, collection, replica,
zkStateReader.getZkClient());
@@ -523,7 +521,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
}
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DOWNNODE.toLower(),
ZkStateReader.NODE_NAME_PROP, "127.0.0.1");
- ClusterState commands = new NodeMutator().downNode(reader.getClusterState(), m);
+ ClusterState commands = new NodeMutator().downNode(reader, reader.getClusterState(), m);
ZkDistributedQueue q = overseers.get(0).getStateUpdateQueue();
@@ -587,7 +585,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
createCollection(COLLECTION, 1);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.SHARD_ID_PROP, "shard1",
@@ -602,7 +599,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
//publish node state (active)
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.SHARD_ID_PROP, "shard1",
@@ -1068,8 +1064,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
ZkStateReader.CORE_NAME_PROP, "core" + k,
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.COLLECTION_PROP, "perf" + j,
- ZkStateReader.NUM_SHARDS_PROP, "1",
- ZkStateReader.BASE_URL_PROP, "http://" + "node1" + "/solr/");
+ ZkStateReader.NUM_SHARDS_PROP, "1");
ZkDistributedQueue q = overseers.get(0).getStateUpdateQueue();
q.offer(Utils.toJSON(m));
if (j >= MAX_COLLECTIONS - 1) j = 0;
@@ -1157,7 +1152,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
createCollection("c1", 1);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, "c1",
@@ -1171,7 +1165,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
verifyReplicaStatus(reader, "c1", "shard1", "core_node1", Replica.State.DOWN);
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, "c1",
@@ -1183,7 +1176,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, "c1",
@@ -1221,7 +1213,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.ADDREPLICA.toLower(),
"collection", testCollectionName,
ZkStateReader.SHARD_ID_PROP, "x",
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.CORE_NAME_PROP, "core1",
ZkStateReader.STATE_PROP, Replica.State.DOWN.toString()
@@ -1366,7 +1357,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
for (int ss = 1; ss <= numShards; ++ss) {
final int N = (numReplicas-rr)*numShards + ss;
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard"+ss,
ZkStateReader.NODE_NAME_PROP, "node"+N,
ZkStateReader.COLLECTION_PROP, COLLECTION,
@@ -1390,7 +1380,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
for (int ss = 1; ss <= numShards; ++ss) {
final int N = (numReplicas-rr)*numShards + ss;
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard"+ss,
ZkStateReader.NODE_NAME_PROP, "node"+N,
ZkStateReader.COLLECTION_PROP, COLLECTION,
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
index d3a2ef1..a854121 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
@@ -269,8 +269,7 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
@SuppressWarnings("rawtypes")
protected void sendNonDirectUpdateRequestReplica(Replica replica, UpdateRequest up, int expectedRf, String collection) throws Exception {
- ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
- String url = zkProps.getBaseUrl() + "/" + collection;
+ String url = replica.getBaseUrl() + "/" + collection;
try (Http2SolrClient solrServer = getHttpSolrClient(url)) {
NamedList resp = solrServer.request(up);
NamedList hdr = (NamedList) resp.get("responseHeader");
diff --git a/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java b/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
index 765ce99..eccb595 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
@@ -55,6 +55,8 @@ import static org.apache.solr.common.util.Utils.makeMap;
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018
public class SSLMigrationTest extends AbstractFullDistribZkTestBase {
+ public static final String[] STRINGS = {};
+
@Test
public void test() throws Exception {
//Migrate from HTTP -> HTTPS -> HTTP
@@ -103,7 +105,7 @@ public class SSLMigrationTest extends AbstractFullDistribZkTestBase {
assertEquals("Wrong number of replicas found", 4, replicas.size());
for(Replica replica : replicas) {
assertTrue("Replica didn't have the proper urlScheme in the ClusterState",
- StringUtils.startsWith(replica.getStr(ZkStateReader.BASE_URL_PROP), urlScheme));
+ StringUtils.startsWith(replica.getBaseUrl(), urlScheme));
}
}
@@ -128,10 +130,10 @@ public class SSLMigrationTest extends AbstractFullDistribZkTestBase {
List<String> urls = new ArrayList<String>();
for(Replica replica : getReplicas()) {
- urls.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
+ urls.add(replica.getBaseUrl());
}
//Create new SolrServer to configure new HttpClient w/ SSL config
- try (SolrClient client = getLBHttpSolrClient(urls.toArray(new String[]{}))) {
+ try (SolrClient client = getLBHttpSolrClient(urls.toArray(STRINGS))) {
client.request(request);
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
index 5f78490..c2d059e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
@@ -279,7 +279,7 @@ public class SolrCloudExampleTest extends SolrCloudBridgeTestCase {
DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection(collection);
for (Slice slice : coll.getActiveSlices()) {
for (Replica replica : slice.getReplicas()) {
- String uri = "" + replica.get(ZkStateReader.BASE_URL_PROP) + "/" + replica.get(ZkStateReader.CORE_NAME_PROP) + "/config";
+ String uri = "" + replica.getCoreUrl() + "/config";
Map respMap = getAsMap(cloudClient, uri);
Long maxTime = (Long) (getObjectByPath(respMap, true, asList("config", "updateHandler", "autoSoftCommit", "maxTime")));
ret.put(replica.getName(), maxTime);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
index fff9897..253ddf1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
@@ -320,8 +320,7 @@ public class TestCloudConsistency extends SolrCloudTestCase {
}
protected Http2SolrClient getHttpSolrClient(Replica replica, String coll) throws Exception {
- ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
- String url = zkProps.getBaseUrl() + "/" + coll;
+ String url = replica.getBaseUrl() + "/" + coll;
return SolrTestCaseJ4.getHttpSolrClient(url);
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java
index 3bfae93..3b08a26 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java
@@ -273,7 +273,7 @@ public class TestHashPartitioner extends SolrTestCaseJ4 {
Map<String,Slice> slices = new HashMap<>();
for (int i=0; i<ranges.size(); i++) {
Range range = ranges.get(i);
- Slice slice = new Slice("shard"+(i+1), null, map("range",range), "collections1");
+ Slice slice = new Slice("shard"+(i+1), null, map("range",range), "collections1", nodeName -> "http://" + nodeName);
slices.put(slice.getName(), slice);
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
index 1939f51..c755a42 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
@@ -41,8 +41,6 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-
/**
* See SOLR-9504
*/
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
index db06a2d..339a9e3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
@@ -316,7 +316,7 @@ public class TestPullReplica extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 0)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
-// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
+// .registerCore(collectionName); //TODO: Is this needed?
waitForState("Replica not added", collectionName, activeReplicaCount(1, 0, 0));
addDocs(500);
List<Replica.State> statesSeen = new ArrayList<>(3);
@@ -498,7 +498,7 @@ public class TestPullReplica extends SolrCloudTestCase {
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 1)
.setMaxShardsPerNode(100)
.process(cluster.getSolrClient());
-// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
+// .registerCore(collectionName); //TODO: Is this needed?
waitForState("Expected collection to be created with 1 shard and 2 replicas", collectionName, clusterShape(1, 2));
DocCollection docCollection = assertNumberOfReplicas(1, 0, 1, false, true);
assertEquals(1, docCollection.getSlices().size());
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
index a6a5b67..0794b9b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
@@ -267,7 +267,7 @@ public void testCantConnectToPullReplica() throws Exception {
}
protected JettySolrRunner getJettyForReplica(Replica replica) throws Exception {
- String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
assertNotNull(replicaBaseUrl);
URL baseUrl = new URL(replicaBaseUrl);
@@ -277,7 +277,7 @@ public void testCantConnectToPullReplica() throws Exception {
}
protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
- String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
assertNotNull(replicaBaseUrl);
URL baseUrl = new URL(replicaBaseUrl);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
index 05921e1..e0da66d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
@@ -109,7 +109,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
DocCollection b1x1 = clusterState.getCollection("b1x1");
Collection<Replica> replicas = b1x1.getSlice("shard1").getReplicas();
assertEquals(1, replicas.size());
- String baseUrl = replicas.iterator().next().getStr(ZkStateReader.BASE_URL_PROP);
+ String baseUrl = replicas.iterator().next().getBaseUrl();
if (!baseUrl.endsWith("/")) baseUrl += "/";
try (Http2SolrClient client = getHttpSolrClient(baseUrl + "a1x2", 2000, 5000)) {
@@ -164,7 +164,6 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
//Simulate a replica being in down state.
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
- ZkStateReader.BASE_URL_PROP, notLeader.getStr(ZkStateReader.BASE_URL_PROP),
ZkStateReader.NODE_NAME_PROP, notLeader.getStr(ZkStateReader.NODE_NAME_PROP),
ZkStateReader.COLLECTION_PROP, "football",
ZkStateReader.SHARD_ID_PROP, "shard1",
@@ -182,7 +181,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
//Query against the node which hosts the down replica
- String baseUrl = notLeader.getStr(ZkStateReader.BASE_URL_PROP);
+ String baseUrl = notLeader.getBaseUrl();
if (!baseUrl.endsWith("/")) baseUrl += "/";
String path = baseUrl + "football";
log.info("Firing queries against path={}", path);
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistClusterPerZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistClusterPerZkTest.java
index ea0a11e..02b869f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistClusterPerZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistClusterPerZkTest.java
@@ -444,7 +444,7 @@ public class CollectionsAPIDistClusterPerZkTest extends SolrCloudTestCase {
assertEquals("Replica should be created on the right node",
cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
- newReplica.getStr(ZkStateReader.BASE_URL_PROP));
+ newReplica.getBaseUrl());
Path instancePath = createTempDir();
response = CollectionAdminRequest.addReplicaToShard(collectionName, "s1")
@@ -453,7 +453,7 @@ public class CollectionsAPIDistClusterPerZkTest extends SolrCloudTestCase {
newReplica = grabNewReplica(response, getCollectionState(collectionName));
assertNotNull(newReplica);
- try (Http2SolrClient coreclient = SolrTestCaseJ4.getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
+ try (Http2SolrClient coreclient = SolrTestCaseJ4.getHttpSolrClient(newReplica.getBaseUrl())) {
CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getName(), coreclient);
NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getName());
String instanceDirStr = (String) coreStatus.get("instanceDir");
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index 00d15ad..2323e72 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -16,7 +16,6 @@
*/
package org.apache.solr.cloud.api.collections;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -185,7 +184,7 @@ public class ShardSplitTest extends SolrCloudBridgeTestCase {
boolean restarted = false;
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
int port = jetty.getLocalPort();
- if (replica.getStr(BASE_URL_PROP).contains(":" + port)) {
+ if (replica.getBaseUrl().contains(":" + port)) {
stoppedNodeName = jetty.getNodeName();
jetty.stop();
jetty.start();
diff --git a/solr/core/src/test/org/apache/solr/core/ConfigureRecoveryStrategyTest.java b/solr/core/src/test/org/apache/solr/core/ConfigureRecoveryStrategyTest.java
index 8a7c793..4477297 100644
--- a/solr/core/src/test/org/apache/solr/core/ConfigureRecoveryStrategyTest.java
+++ b/solr/core/src/test/org/apache/solr/core/ConfigureRecoveryStrategyTest.java
@@ -95,8 +95,8 @@ public class ConfigureRecoveryStrategyTest extends SolrTestCaseJ4 {
}
@Override
- protected String getReplicateLeaderUrl(Replica leaderprops) {
- return ZkCoreNodeProps.getCoreUrl(
+ protected String getReplicateLeaderUrl(Replica leaderprops, ZkStateReader zkStateReader) {
+ return Replica.getCoreUrl(
leaderprops.getStr(alternativeBaseUrlProp),
leaderprops.getStr(ZkStateReader.CORE_NAME_PROP));
}
diff --git a/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java b/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java
index 014c95a..46f356e 100644
--- a/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java
+++ b/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java
@@ -124,7 +124,7 @@ public class CoreSorterTest extends SolrTestCaseJ4 {
addNewReplica(replicas, collection, slice, downNodes);
}
Map<String, Replica> replicaMap = replicas.stream().collect(Collectors.toMap(Replica::getName, Function.identity()));
- sliceMap.put(slice, new Slice(slice, replicaMap, map(), collection));
+ sliceMap.put(slice, new Slice(slice, replicaMap, map(), collection, nodeName -> "http://" + nodeName));
}
DocCollection col = new DocCollection(collection, sliceMap, map(), DocRouter.DEFAULT);
collToState.put(collection, col);
@@ -194,7 +194,7 @@ public class CoreSorterTest extends SolrTestCaseJ4 {
protected Replica addNewReplica(List<Replica> replicaList, String collection, String slice, List<String> possibleNodes) {
String replica = "r" + replicaList.size();
String node = possibleNodes.get(random().nextInt(possibleNodes.size())); // place on a random node
- Replica r = new Replica(replica, map("core", replica, "node_name", node), collection, slice);
+ Replica r = new Replica(replica, map("core", replica, "node_name", node), collection, slice, nodeName -> "http://" + nodeName);
replicaList.add(r);
return r;
}
diff --git a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java
index 86e6f06..09a4ee1 100644
--- a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java
+++ b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java
@@ -16,8 +16,6 @@
*/
package org.apache.solr.core.snapshots;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collection;
@@ -148,8 +146,8 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
continue; // We know that the snapshot is not created for this replica.
}
- String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
- String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
+ String coreName = replica.getName();
assertTrue(snapshotByCoreName.containsKey(coreName));
CoreSnapshotMetaData coreSnapshot = snapshotByCoreName.get(coreName);
@@ -257,8 +255,8 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
continue; // We know that the snapshot was not created for this replica.
}
- String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
- String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
+ String coreName = replica.getName();
try (SolrClient adminClient = SolrTestCaseJ4.getHttpSolrClient(replicaBaseUrl)) {
Collection<SnapshotMetaData> snapshots = listCoreSnapshots(adminClient, coreName);
diff --git a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java
index 8c0567c..9918e48 100644
--- a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java
+++ b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java
@@ -58,8 +58,6 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-
@SolrTestCaseJ4.SuppressSSL // Currently unknown why SSL does not work with this test
@Slow
@Ignore // nocommit debug
@@ -98,8 +96,8 @@ public class TestSolrCoreSnapshots extends SolrCloudTestCase {
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
- String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
- String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
+ String coreName = replica.getName();
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
String duplicateName = commitName.concat("_duplicate");
diff --git a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
index ee6e0f1..371be16 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
@@ -68,7 +68,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
DocCollection sysColl = cloudClient.getZkStateReader().getClusterState().getCollection(".system");
Replica replica = sysColl.getActiveSlicesMap().values().iterator().next().getLeader();
- String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String baseUrl = replica.getBaseUrl();
String url = baseUrl + "/.system/config/requestHandler";
MapWriter map = TestSolrConfigHandlerConcurrent.getAsMap(url, cloudClient);
assertNotNull(map);
diff --git a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
index c09e35d..eb22bde 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
@@ -102,7 +102,7 @@ public class TestConfigReload extends SolrCloudBridgeTestCase {
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas())
- urls.add(""+replica.get(ZkStateReader.BASE_URL_PROP) + "/"+replica.get(ZkStateReader.CORE_NAME_PROP));
+ urls.add(""+replica.getCoreUrl());
}
HashSet<String> succeeded = new HashSet<>();
diff --git a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java b/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
index b91cab8..8087613 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
@@ -53,7 +53,6 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
@SolrTestCaseJ4.SuppressSSL // Currently unknown why SSL does not work with this test
@LuceneTestCase.Nightly
public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
@@ -171,8 +170,8 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
- String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
- String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
+ String coreName = replica.getName();
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
boolean testViaReplicationHandler = random().nextBoolean();
diff --git a/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java b/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java
index ab39061..48729c7 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java
@@ -86,7 +86,7 @@ public class TestReqParamsAPI extends SolrCloudTestCase {
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas())
- urls.add("" + replica.get(ZkStateReader.BASE_URL_PROP) + "/" + replica.get(ZkStateReader.CORE_NAME_PROP));
+ urls.add("" + replica.getCoreUrl());
}
RestTestHarness writeHarness = restTestHarnesses.get(random().nextInt(restTestHarnesses.size()));
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java
index 95da04e..f181a89 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java
@@ -88,7 +88,7 @@ public class TestSolrConfigHandlerCloud extends AbstractFullDistribZkTestBase {
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas())
- urls.add(""+replica.get(ZkStateReader.BASE_URL_PROP) + "/"+replica.get(ZkStateReader.CORE_NAME_PROP));
+ urls.add(""+replica.getCoreUrl());
}
return urls.get(random().nextInt(urls.size()));
}
@@ -98,7 +98,7 @@ public class TestSolrConfigHandlerCloud extends AbstractFullDistribZkTestBase {
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas())
- urls.add(""+replica.get(ZkStateReader.BASE_URL_PROP) + "/"+replica.get(ZkStateReader.CORE_NAME_PROP));
+ urls.add(""+replica.getCoreUrl());
}
RestTestHarness writeHarness = randomRestTestHarness();
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
index 557de11..6b6c70e 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
@@ -150,7 +150,7 @@ public class TestSolrConfigHandlerConcurrent extends SolrCloudBridgeTestCase {
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas())
- urls.add(""+replica.get(ZkStateReader.BASE_URL_PROP) + "/"+replica.get(ZkStateReader.CORE_NAME_PROP));
+ urls.add(""+replica.getBaseUrl());
}
diff --git a/solr/core/src/test/org/apache/solr/util/TestExportTool.java b/solr/core/src/test/org/apache/solr/util/TestExportTool.java
index 4b2e61e..d9ddc63 100644
--- a/solr/core/src/test/org/apache/solr/util/TestExportTool.java
+++ b/solr/core/src/test/org/apache/solr/util/TestExportTool.java
@@ -82,7 +82,7 @@ public class TestExportTool extends SolrCloudTestCase {
String url = cluster.getRandomJetty(random()).getBaseUrl() + "/" + COLLECTION_NAME;
- ExportTool.Info info = new ExportTool.MultiThreadedRunner(url);
+ ExportTool.Info info = new ExportTool.MultiThreadedRunner(cluster.getSolrClient().getZkStateReader(), url);
String absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".json";
info.setOutFormat(absolutePath, "jsonl");
info.setLimit("200");
@@ -91,7 +91,7 @@ public class TestExportTool extends SolrCloudTestCase {
assertJsonDocsCount(info, 200, record -> "2019-09-30T05:58:03Z".equals(record.get("a_dt")));
- info = new ExportTool.MultiThreadedRunner(url);
+ info = new ExportTool.MultiThreadedRunner(cluster.getSolrClient().getZkStateReader(), url);
absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".json";
info.setOutFormat(absolutePath, "jsonl");
info.setLimit("-1");
@@ -100,7 +100,7 @@ public class TestExportTool extends SolrCloudTestCase {
assertJsonDocsCount(info, 1000,null);
- info = new ExportTool.MultiThreadedRunner(url);
+ info = new ExportTool.MultiThreadedRunner(cluster.getSolrClient().getZkStateReader(), url);
absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".javabin";
info.setOutFormat(absolutePath, "javabin");
info.setLimit("200");
@@ -109,7 +109,7 @@ public class TestExportTool extends SolrCloudTestCase {
assertJavabinDocsCount(info, 200);
- info = new ExportTool.MultiThreadedRunner(url);
+ info = new ExportTool.MultiThreadedRunner(cluster.getSolrClient().getZkStateReader(), url);
absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".javabin";
info.setOutFormat(absolutePath, "javabin");
info.setLimit("-1");
@@ -174,7 +174,7 @@ public class TestExportTool extends SolrCloudTestCase {
ExportTool.MultiThreadedRunner info = null;
String absolutePath = null;
- info = new ExportTool.MultiThreadedRunner(url);
+ info = new ExportTool.MultiThreadedRunner(cluster.getSolrClient().getZkStateReader(), url);
info.output = System.out;
absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".javabin";
info.setOutFormat(absolutePath, "javabin");
@@ -184,7 +184,7 @@ public class TestExportTool extends SolrCloudTestCase {
for (Map.Entry<String, Long> e : docCounts.entrySet()) {
assertEquals(e.getValue().longValue(), info.corehandlers.get(e.getKey()).receivedDocs.get());
}
- info = new ExportTool.MultiThreadedRunner(url);
+ info = new ExportTool.MultiThreadedRunner(cluster.getSolrClient().getZkStateReader(), url);
info.output = System.out;
absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".json";
info.setOutFormat(absolutePath, "jsonl");
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
index 6b7be1e..a1f6ff4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
@@ -691,7 +691,8 @@ public abstract class BaseCloudSolrClient extends SolrClient {
// put the leaderUrl first.
sortedReplicas.add(0, leader);
- urlMap.put(name, sortedReplicas.stream().map(Replica::getCoreUrl).collect(Collectors.toList()));
+ ZkStateReader zkStateReader = getZkStateReader();
+ urlMap.put(name, sortedReplicas.stream().map(replica -> replica.getCoreUrl()).collect(Collectors.toList()));
}
return urlMap;
}
@@ -1213,7 +1214,7 @@ public abstract class BaseCloudSolrClient extends SolrClient {
Set<String> seenNodes = new HashSet<>();
sortedReplicas.forEach( replica -> {
if (seenNodes.add(replica.getNodeName())) {
- theUrlList.add(ZkCoreNodeProps.getCoreUrl(replica.getBaseUrl(), joinedInputCollections));
+ theUrlList.add(Replica.getCoreUrl(replica.getBaseUrl(), joinedInputCollections));
}
});
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java
index 513fd57..f05075a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java
@@ -35,6 +35,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.common.ParWork;
import org.apache.solr.common.cloud.Aliases;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
@@ -45,7 +46,7 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.client.solrj.impl.BaseHttpSolrClient.RemoteSolrException;
-public abstract class BaseHttpClusterStateProvider implements ClusterStateProvider {
+public abstract class BaseHttpClusterStateProvider implements ClusterStateProvider, Replica.NodeNameToBaseUrl {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private String urlScheme;
@@ -139,7 +140,7 @@ public abstract class BaseHttpClusterStateProvider implements ClusterStateProvid
Set<String> liveNodes = new HashSet((List<String>)(cluster.get("live_nodes")));
this.liveNodes = liveNodes;
liveNodesTimestamp = System.nanoTime();
- ClusterState cs = ClusterState.createFromCollectionMap(znodeVersion, collectionsMap, liveNodes);
+ ClusterState cs = ClusterState.createFromCollectionMap(this, znodeVersion, collectionsMap, liveNodes);
if (clusterProperties != null) {
Map<String, Object> properties = (Map<String, Object>) cluster.get("properties");
if (properties != null) {
@@ -250,6 +251,12 @@ public abstract class BaseHttpClusterStateProvider implements ClusterStateProvid
}
@Override
+ public String getBaseUrlForNodeName(final String nodeName) {
+ return Utils.getBaseUrlForNodeName(nodeName,
+ getClusterProperty(ZkStateReader.URL_SCHEME, "http"));
+ }
+
+ @Override
public Map<String, String> getAliasProperties(String alias) {
getAliases(false);
return Collections.unmodifiableMap(aliasProperties.getOrDefault(alias, Collections.emptyMap()));
@@ -326,6 +333,6 @@ public abstract class BaseHttpClusterStateProvider implements ClusterStateProvid
}
// This exception is not meant to escape this class it should be caught and wrapped.
- private class NotACollectionException extends Exception {
+ private static class NotACollectionException extends Exception {
}
}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
index 32f9aac..d399f64 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
@@ -20,7 +20,9 @@ package org.apache.solr.client.solrj.impl;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.ParWork;
import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -35,7 +37,7 @@ import java.util.Map;
import java.util.Set;
-public class ZkClientClusterStateProvider implements ClusterStateProvider {
+public class ZkClientClusterStateProvider implements ClusterStateProvider, Replica.NodeNameToBaseUrl {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
ZkStateReader zkStateReader;
@@ -234,4 +236,10 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
public boolean isClosed() {
return isClosed;
}
+
+ @Override
+ public String getBaseUrlForNodeName(final String nodeName) {
+ return Utils.getBaseUrlForNodeName(nodeName,
+ getClusterProperty(ZkStateReader. URL_SCHEME, "http"));
+ }
}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/routing/AffinityReplicaListTransformer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/routing/AffinityReplicaListTransformer.java
index 864105a..9d46ae1 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/routing/AffinityReplicaListTransformer.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/routing/AffinityReplicaListTransformer.java
@@ -97,7 +97,7 @@ public class AffinityReplicaListTransformer implements ReplicaListTransformer, C
private SortableChoice(Object choice) {
this.choice = choice;
if (choice instanceof Replica) {
- this.sortableCoreLabel = ((Replica)choice).getCoreUrl();
+ this.sortableCoreLabel = ((Replica)choice).getNodeName() + "_" + ((Replica)choice).getName();
} else if (choice instanceof String) {
this.sortableCoreLabel = (String)choice;
} else {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparator.java
index bb8cecb..576fe3d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparator.java
@@ -26,6 +26,7 @@ import java.util.Map;
import org.apache.solr.common.StringUtils;
import org.apache.solr.common.cloud.NodesSysPropsCacher;
import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.params.SolrParams;
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGenerator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGenerator.java
index 9ca7d0d..ceb9ebf 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGenerator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGenerator.java
@@ -27,6 +27,7 @@ import java.util.Random;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.NodesSysPropsCacher;
+import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.params.SolrParams;
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
index c5217a4..27cb7b6 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
@@ -243,19 +243,19 @@ public class ClusterState implements JSONWriter.Writable {
* @param liveNodes list of live nodes
* @return the ClusterState
*/
- public static ClusterState createFromJson(int version, byte[] bytes, Set<String> liveNodes) {
+ public static ClusterState createFromJson(Replica.NodeNameToBaseUrl nodeNameToBaseUrl, int version, byte[] bytes, Set<String> liveNodes) {
if (bytes == null || bytes.length == 0) {
return new ClusterState(version, liveNodes, Collections.<String, DocCollection>emptyMap());
}
Map<String, Object> stateMap = (Map<String, Object>) Utils.fromJSON(bytes);
- return createFromCollectionMap(version, stateMap, liveNodes);
+ return createFromCollectionMap(nodeNameToBaseUrl, version, stateMap, liveNodes);
}
- public static ClusterState createFromCollectionMap(int version, Map<String, Object> stateMap, Set<String> liveNodes) {
+ public static ClusterState createFromCollectionMap(Replica.NodeNameToBaseUrl zkStateReader, int version, Map<String, Object> stateMap, Set<String> liveNodes) {
Map<String,CollectionRef> collections = new LinkedHashMap<>(stateMap.size());
for (Entry<String, Object> entry : stateMap.entrySet()) {
String collectionName = entry.getKey();
- DocCollection coll = collectionFromObjects(collectionName, (Map<String,Object>)entry.getValue(), version);
+ DocCollection coll = collectionFromObjects(zkStateReader, collectionName, (Map<String,Object>)entry.getValue(), version);
collections.put(collectionName, new CollectionRef(coll));
}
@@ -263,17 +263,17 @@ public class ClusterState implements JSONWriter.Writable {
}
// TODO move to static DocCollection.loadFromMap
- private static DocCollection collectionFromObjects(String name, Map<String, Object> objs, int version) {
+ private static DocCollection collectionFromObjects(Replica.NodeNameToBaseUrl zkStateReader, String name, Map<String, Object> objs, int version) {
Map<String,Object> props;
Map<String,Slice> slices;
Map<String, Object> sliceObjs = (Map<String, Object>) objs.get(DocCollection.SHARDS);
if (sliceObjs == null) {
// legacy format from 4.0... there was no separate "shards" level to contain the collection shards.
- slices = Slice.loadAllFromMap(name, objs);
+ slices = Slice.loadAllFromMap(zkStateReader, name, objs);
props = Collections.emptyMap();
} else {
- slices = Slice.loadAllFromMap(name, sliceObjs);
+ slices = Slice.loadAllFromMap(zkStateReader, name, sliceObjs);
props = new HashMap<>(objs);
objs.remove(DocCollection.SHARDS);
}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterStateUtil.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterStateUtil.java
index b59d4c1..5dccb44 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterStateUtil.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterStateUtil.java
@@ -103,70 +103,10 @@ public class ClusterStateUtil {
return success;
}
-
- /**
- * Wait to see an entry in the ClusterState with a specific coreNodeName and
- * baseUrl.
- *
- * @param zkStateReader
- * to use for ClusterState
- * @param collection
- * to look in
- * @param coreNodeName
- * to wait for
- * @param baseUrl
- * to wait for
- * @param timeoutInMs
- * how long to wait before giving up
- * @return false if timed out
- */
- public static boolean waitToSeeLiveReplica(ZkStateReader zkStateReader,
- String collection, String coreNodeName, String baseUrl,
- int timeoutInMs) {
- long timeout = System.nanoTime()
- + TimeUnit.NANOSECONDS.convert(timeoutInMs, TimeUnit.MILLISECONDS);
-
- while (System.nanoTime() < timeout) {
- log.debug("waiting to see replica just created live collection={} replica={} baseUrl={}",
- collection, coreNodeName, baseUrl);
- ClusterState clusterState = zkStateReader.getClusterState();
- if (clusterState != null) {
- DocCollection docCollection = clusterState.getCollection(collection);
- Collection<Slice> slices = docCollection.getSlices();
- for (Slice slice : slices) {
- // only look at active shards
- if (slice.getState() == Slice.State.ACTIVE) {
- Collection<Replica> replicas = slice.getReplicas();
- for (Replica replica : replicas) {
- // on a live node?
- boolean live = clusterState.liveNodesContain(replica.getNodeName());
- String rcoreNodeName = replica.getName();
- String rbaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
- if (live && coreNodeName.equals(rcoreNodeName)
- && baseUrl.equals(rbaseUrl)) {
- // found it
- return true;
- }
- }
- }
- }
- try {
- Thread.sleep(TIMEOUT_POLL_MS);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted", e);
- }
- }
- }
-
- log.error("Timed out waiting to see replica just created in cluster state. Continuing...");
- return false;
- }
-
+
public static boolean waitForAllReplicasNotLive(ZkStateReader zkStateReader, int timeoutInMs) {
return waitForAllReplicasNotLive(zkStateReader, null, timeoutInMs);
}
-
public static boolean waitForAllReplicasNotLive(ZkStateReader zkStateReader,
String collection, int timeoutInMs) {
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java
index 544db8c..89f9a6f 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java
@@ -24,7 +24,7 @@ import java.util.Set;
import org.apache.solr.common.util.Utils;
public class Replica extends ZkNodeProps {
-
+
/**
* The replica's state. In general, if the node the replica is hosted on is
* not under {@code /live_nodes} in ZK, the replica's state should be
@@ -107,18 +107,24 @@ public class Replica extends ZkNodeProps {
}
}
+ public interface NodeNameToBaseUrl {
+ String getBaseUrlForNodeName(final String nodeName);
+ }
+
private final String name;
private final String nodeName;
private State state;
private final Type type;
public final String slice, collection;
+ private final String baseUrl;
- public Replica(String name, Map<String,Object> propMap, String collection, String slice) {
+ public Replica(String name, Map<String,Object> propMap, String collection, String slice, NodeNameToBaseUrl nodeNameToBaseUrl) {
super(propMap);
this.collection = collection;
this.slice = slice;
this.name = name;
this.nodeName = (String) propMap.get(ZkStateReader.NODE_NAME_PROP);
+ this.baseUrl = nodeNameToBaseUrl.getBaseUrlForNodeName(this.nodeName);
type = Type.get((String) propMap.get(ZkStateReader.REPLICA_TYPE));
// Objects.requireNonNull(this.collection, "'collection' must not be null");
// Objects.requireNonNull(this.slice, "'slice' must not be null");
@@ -137,6 +143,26 @@ public class Replica extends ZkNodeProps {
}
}
+ public Replica(String name, Map<String,Object> propMap, String collection, String slice, String baseUrl) {
+ super(propMap);
+ this.collection = collection;
+ this.slice = slice;
+ this.name = name;
+ this.nodeName = (String) propMap.get(ZkStateReader.NODE_NAME_PROP);
+ this.baseUrl = baseUrl;
+ type = Type.get((String) propMap.get(ZkStateReader.REPLICA_TYPE));
+ if (propMap.get(ZkStateReader.STATE_PROP) != null) {
+ if (propMap.get(ZkStateReader.STATE_PROP) instanceof State) {
+ this.state = (State) propMap.get(ZkStateReader.STATE_PROP);
+ } else {
+ this.state = State.getState((String) propMap.get(ZkStateReader.STATE_PROP));
+ }
+ } else {
+ this.state = State.DOWN; //Default to DOWN
+ propMap.put(ZkStateReader.STATE_PROP, state.toString());
+ }
+ }
+
public String getCollection(){
return collection;
}
@@ -161,10 +187,10 @@ public class Replica extends ZkNodeProps {
}
public String getCoreUrl() {
- return ZkCoreNodeProps.getCoreUrl(getStr(ZkStateReader.BASE_URL_PROP), name);
+ return getCoreUrl(getBaseUrl(), name);
}
- public String getBaseUrl(){
- return getStr(ZkStateReader.BASE_URL_PROP);
+ public String getBaseUrl() {
+ return baseUrl;
}
/** The name of the node this replica resides on */
@@ -202,6 +228,16 @@ public class Replica extends ZkNodeProps {
return propertyValue;
}
+ public static String getCoreUrl(String baseUrl, String coreName) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(baseUrl);
+ if (!baseUrl.endsWith("/")) sb.append("/");
+ sb.append(coreName);
+ if (!(sb.substring(sb.length() - 1).equals("/"))) sb.append("/");
+ return sb.toString();
+ }
+
+
@Override
public String toString() {
return name + ':' + Utils.toJSONString(propMap); // small enough, keep it on one line (i.e. no indent)
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
index 1f7e2c3..1d171d8 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
@@ -38,9 +38,10 @@ import static org.apache.solr.common.util.Utils.toJSONString;
*/
public class Slice extends ZkNodeProps implements Iterable<Replica> {
public final String collection;
+ private final Replica.NodeNameToBaseUrl nodeNameToBaseUrl;
/** Loads multiple slices into a Map from a generic Map that probably came from deserialized JSON. */
- public static Map<String,Slice> loadAllFromMap(String collection, Map<String, Object> genericSlices) {
+ public static Map<String,Slice> loadAllFromMap(Replica.NodeNameToBaseUrl nodeNameToBaseUrl, String collection, Map<String, Object> genericSlices) {
if (genericSlices == null) return Collections.emptyMap();
Map<String, Slice> result = new LinkedHashMap<>(genericSlices.size());
for (Map.Entry<String, Object> entry : genericSlices.entrySet()) {
@@ -49,7 +50,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
if (val instanceof Slice) {
result.put(name, (Slice) val);
} else if (val instanceof Map) {
- result.put(name, new Slice(name, null, (Map<String, Object>) val, collection));
+ result.put(name, new Slice(name, null, (Map<String, Object>) val, collection, nodeNameToBaseUrl));
}
}
return result;
@@ -130,11 +131,11 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
* @param replicas The replicas of the slice. This is used directly and a copy is not made. If null, replicas will be constructed from props.
* @param props The properties of the slice - a shallow copy will always be made.
*/
- public Slice(String name, Map<String,Replica> replicas, Map<String,Object> props, String collection) {
+ public Slice(String name, Map<String,Replica> replicas, Map<String,Object> props, String collection, Replica.NodeNameToBaseUrl nodeNameToBaseUrl) {
super( props==null ? new LinkedHashMap<String,Object>(2) : new LinkedHashMap<>(props));
this.name = name;
this.collection = collection;
-
+ this.nodeNameToBaseUrl = nodeNameToBaseUrl;
Object rangeObj = propMap.get(RANGE);
if (propMap.get(ZkStateReader.STATE_PROP) != null) {
this.state = State.getState((String) propMap.get(ZkStateReader.STATE_PROP));
@@ -199,7 +200,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
if (val instanceof Replica) {
r = (Replica)val;
} else {
- r = new Replica(name, (Map<String,Object>)val, collection, slice);
+ r = new Replica(name, (Map<String,Object>)val, collection, slice, nodeNameToBaseUrl);
}
result.put(name, r);
}
@@ -208,8 +209,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
private Replica findLeader() {
for (Replica replica : replicas.values()) {
- if (replica.getStr(LEADER) != null && replica.getState() == Replica.State.ACTIVE) {
- assert replica.getType() == Type.TLOG || replica.getType() == Type.NRT: "Pull replica should not become leader!";
+ if (replica.getStr(LEADER) != null) {
return replica;
}
}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java
index b84f6a7..3c43a6f 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkCoreNodeProps.java
@@ -31,24 +31,6 @@ public class ZkCoreNodeProps {
return nodeProps.getStr(ZkStateReader.STATE_PROP);
}
- public String getBaseUrl() {
- return nodeProps.getStr(ZkStateReader.BASE_URL_PROP);
- }
-
-
- public static String getCoreUrl(ZkNodeProps nodeProps) {
- return getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), nodeProps.getStr(ZkStateReader.CORE_NAME_PROP));
- }
-
- public static String getCoreUrl(String baseUrl, String coreName) {
- StringBuilder sb = new StringBuilder();
- sb.append(baseUrl);
- if (!baseUrl.endsWith("/")) sb.append("/");
- sb.append(coreName);
- if (!(sb.substring(sb.length() - 1).equals("/"))) sb.append("/");
- return sb.toString();
- }
-
@Override
public String toString() {
return nodeProps.toString();
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index 4e615d6..31cf7d6 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -73,7 +73,7 @@ import static java.util.Collections.EMPTY_MAP;
import static java.util.Collections.emptySortedSet;
import static org.apache.solr.common.util.Utils.fromJSON;
-public class ZkStateReader implements SolrCloseable {
+public class ZkStateReader implements SolrCloseable, Replica.NodeNameToBaseUrl {
public static final int STATE_UPDATE_DELAY = Integer.getInteger("solr.OverseerStateUpdateDelay", 2000); // delay between cloud state updates
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -1241,6 +1241,7 @@ public class ZkStateReader implements SolrCloseable {
*
* @lucene.experimental
*/
+ @Override
public String getBaseUrlForNodeName(final String nodeName) {
return Utils.getBaseUrlForNodeName(nodeName, getClusterProperty(URL_SCHEME, "http"));
}
@@ -1486,7 +1487,7 @@ public class ZkStateReader implements SolrCloseable {
Stat stat = new Stat();
byte[] data = zkClient.getData(collectionPath, watcher, stat, true);
if (data == null) return null;
- ClusterState state = ClusterState.createFromJson(stat.getVersion(), data, liveNodes);
+ ClusterState state = ClusterState.createFromJson(this, stat.getVersion(), data, liveNodes);
ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll);
return collectionRef == null ? null : collectionRef.get();
} catch (KeeperException.NoNodeException e) {
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
index 084fdbda..44ee0ee 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
@@ -751,10 +751,12 @@ public class Utils {
public static String getBaseUrlForNodeName ( final String nodeName, String urlScheme){
final int _offset = nodeName.indexOf("_");
+ final String hostAndPort;
if (_offset < 0) {
- throw new IllegalArgumentException("nodeName does not contain expected '_' separator: " + nodeName);
+ hostAndPort = nodeName;
+ } else {
+ hostAndPort = nodeName.substring(0, _offset);
}
- final String hostAndPort = nodeName.substring(0, _offset);
final String path = URLDecoder.decode(nodeName.substring(1 + _offset), UTF_8);
return urlScheme + "://" + hostAndPort + (path.isEmpty() ? "" : ("/" + path));
}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BaseSolrClientWireMockTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BaseSolrClientWireMockTest.java
index d201de7..322b18e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BaseSolrClientWireMockTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BaseSolrClientWireMockTest.java
@@ -204,12 +204,11 @@ public abstract class BaseSolrClientWireMockTest extends SolrTestCase {
props.put("autoAddReplicas", "false");
props.put("nrtReplicas", "1");
- return new DocCollection(BUILT_IN_MOCK_COLLECTION, Slice.loadAllFromMap(BUILT_IN_MOCK_COLLECTION, slices), props, DocRouter.DEFAULT);
+ return new DocCollection(BUILT_IN_MOCK_COLLECTION, Slice.loadAllFromMap(nodeName -> mockSolr.baseUrl() + "/solr", BUILT_IN_MOCK_COLLECTION, slices), props, DocRouter.DEFAULT);
}
protected static void updateReplicaBaseUrl(JsonNode json, String shard, String replica, String baseUrl) {
ObjectNode replicaNode = (ObjectNode) json.get(shard).get("replicas").get(replica);
- replicaNode.put("base_url", baseUrl + "/solr");
replicaNode.put("node_name", baseUrl);
}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
index 7a13b6f..c59a37f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
@@ -290,7 +290,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
Map<String, Long> requestCountsMap = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
- String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
+ String baseURL = (String) replica.getBaseUrl();
requestCountsMap.put(baseURL, getNumRequests(baseURL, "routing_collection"));
}
}
@@ -301,7 +301,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
Set<String> expectedBaseURLs = Sets.newHashSet();
for (Slice expectedSlice : expectedSlices) {
for (Replica replica : expectedSlice.getReplicas()) {
- String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
+ String baseURL = replica.getBaseUrl();
expectedBaseURLs.add(baseURL);
}
}
@@ -348,7 +348,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
Map<String, Long> numRequestsToUnexpectedUrls = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
- String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
+ String baseURL = replica.getBaseUrl();
Long prevNumRequests = requestCountsMap.get(baseURL);
Long curNumRequests = getNumRequests(baseURL, "routing_collection");
@@ -688,7 +688,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
SolrQuery q = new SolrQuery().setQuery("*:*");
BaseHttpSolrClient.RemoteSolrException sse = null;
- final String url = r.getStr(ZkStateReader.BASE_URL_PROP) + "/" + collection;
+ final String url = r.getBaseUrl() + "/" + collection;
try (Http2SolrClient solrClient = SolrTestCaseJ4.getHttpSolrClient(url)) {
if (log.isInfoEnabled()) {
@@ -714,7 +714,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
Set<String> allNodesOfColl = new HashSet<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) {
- allNodesOfColl.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
+ allNodesOfColl.add(replica.getBaseUrl());
}
}
String theNode = null;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java
index 4fb3cd8..a41ce09 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientCacheTest.java
@@ -35,6 +35,7 @@ import org.apache.solr.client.solrj.cloud.DelegatingClusterStateProvider;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.NamedList;
import org.junit.BeforeClass;
import org.junit.Ignore;
@@ -84,25 +85,24 @@ public class CloudSolrClientCacheTest extends SolrTestCaseJ4 {
LBHttpSolrClient mockLbclient = getMockLbHttpSolrClient(responses);
AtomicInteger lbhttpRequestCount = new AtomicInteger();
- try (CloudSolrClient cloudClient = new CloudSolrClientBuilder(getStateProvider(livenodes, refs))
- .withLBHttpSolrClient(mockLbclient)
- .build()) {
- livenodes.addAll(ImmutableSet.of("192.168.1.108:7574_solr", "192.168.1.108:8983_solr"));
- ClusterState cs = ClusterState.createFromJson(1, coll1State.getBytes(UTF_8), Collections.emptySet());
- refs.put(collName, new Ref(collName));
- colls.put(collName, cs.getCollectionOrNull(collName));
- responses.put("request", o -> {
- int i = lbhttpRequestCount.incrementAndGet();
- if (i == 1) return new ConnectException("TEST");
- if (i == 2) return new SocketException("TEST");
- if (i == 3) return new NoHttpResponseException("TEST");
- return okResponse;
- });
- UpdateRequest update = new UpdateRequest()
- .add("id", "123", "desc", "Something 0");
-
- cloudClient.request(update, collName);
- assertEquals(2, refs.get(collName).getCount());
+ try (ClusterStateProvider stateProvider = getStateProvider(livenodes, refs)) {
+ try (CloudSolrClient cloudClient = new CloudSolrClientBuilder(stateProvider).withLBHttpSolrClient(mockLbclient).build()) {
+ livenodes.addAll(ImmutableSet.of("192.168.1.108:7574_solr", "192.168.1.108:8983_solr"));
+ ClusterState cs = ClusterState.createFromJson((ZkStateReader) stateProvider, 1, coll1State.getBytes(UTF_8), Collections.emptySet());
+ refs.put(collName, new Ref(collName));
+ colls.put(collName, cs.getCollectionOrNull(collName));
+ responses.put("request", o -> {
+ int i = lbhttpRequestCount.incrementAndGet();
+ if (i == 1) return new ConnectException("TEST");
+ if (i == 2) return new SocketException("TEST");
+ if (i == 3) return new NoHttpResponseException("TEST");
+ return okResponse;
+ });
+ UpdateRequest update = new UpdateRequest().add("id", "123", "desc", "Something 0");
+
+ cloudClient.request(update, collName);
+ assertEquals(2, refs.get(collName).getCount());
+ }
}
mockLbclient.close();
}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index 7bebaf7..36b85f8 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -338,7 +338,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
Map<String, Long> requestCountsMap = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
- String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
+ String baseURL = replica.getBaseUrl();
requestCountsMap.put(baseURL, getNumRequests(baseURL, "routing_collection"));
}
}
@@ -349,7 +349,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
Set<String> expectedBaseURLs = Sets.newHashSet();
for (Slice expectedSlice : expectedSlices) {
for (Replica replica : expectedSlice.getReplicas()) {
- String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
+ String baseURL = replica.getBaseUrl();
expectedBaseURLs.add(baseURL);
}
}
@@ -396,7 +396,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
Map<String, Long> numRequestsToUnexpectedUrls = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
- String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
+ String baseURL = replica.getBaseUrl();
Long prevNumRequests = requestCountsMap.get(baseURL);
Long curNumRequests = getNumRequests(baseURL, "routing_collection");
@@ -728,7 +728,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
SolrQuery q = new SolrQuery().setQuery("*:*");
BaseHttpSolrClient.RemoteSolrException sse = null;
- final String url = r.getStr(ZkStateReader.BASE_URL_PROP) + "/" + collection;
+ final String url = r.getBaseUrl() + "/" + collection;
try (Http2SolrClient solrClient = SolrTestCaseJ4.getHttpSolrClient(url)) {
if (log.isInfoEnabled()) {
@@ -754,7 +754,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
Set<String> allNodesOfColl = new HashSet<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) {
- allNodesOfColl.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
+ allNodesOfColl.add(replica.getBaseUrl());
}
}
String theNode = null;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java
index 8e8cc7d..034bc4b 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java
@@ -829,7 +829,7 @@ public class CloudAuthStreamTest extends SolrCloudTestCase {
final List<String> replicaUrls =
cluster.getSolrClient().getZkStateReader().getClusterState()
.getCollectionOrNull(collection).getReplicas().stream()
- .map(Replica::getCoreUrl).collect(Collectors.toList());
+ .map(replica -> replica.getCoreUrl()).collect(Collectors.toList());
Collections.shuffle(replicaUrls, random());
return replicaUrls.get(0);
}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java
index 3998dd0..61dd58d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java
@@ -18,9 +18,11 @@
package org.apache.solr.client.solrj.routing;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.cloud.MockZkStateReader;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ShardParams;
@@ -33,7 +35,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
List<Replica> replicas = getBasicReplicaList();
// replicaLocation rule
- List<PreferenceRule> rules = PreferenceRule.from(ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION + ":http://host2:8983");
+ List<PreferenceRule> rules = PreferenceRule.from(ShardParams.SHARDS_PREFERENCE_REPLICA_LOCATION + ":http://node2");
NodePreferenceRulesComparator comparator = new NodePreferenceRulesComparator(rules, null);
replicas.sort(comparator);
assertEquals("node2", replicas.get(0).getNodeName());
@@ -55,7 +57,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
// reversed rule
rules = PreferenceRule.from(ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE + ":TLOG," +
ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE + ":NRT");
- comparator = new NodePreferenceRulesComparator(rules, null);
+ comparator = new NodePreferenceRulesComparator( rules, null);
replicas.sort(comparator);
assertEquals("node2", replicas.get(0).getNodeName());
@@ -71,11 +73,10 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
new Replica(
"node4",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2_2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node4",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
- ),"collection1","shard1"
+ ),"collection1","shard1", nodeName -> "http://" + nodeName
)
);
@@ -87,8 +88,8 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
replicas.sort(comparator);
assertEquals("node1", replicas.get(0).getNodeName());
- assertEquals("node4", replicas.get(1).getNodeName());
- assertEquals("node2", replicas.get(2).getNodeName());
+ assertEquals("node2", replicas.get(1).getNodeName());
+ assertEquals("node4", replicas.get(2).getNodeName());
assertEquals("node3", replicas.get(3).getNodeName());
}
@@ -121,33 +122,30 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
new Replica(
"node1",
map(
- ZkStateReader.BASE_URL_PROP, "http://host1:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "NRT"
- ),"collection1","shard1"
+ ),"collection1","shard1", nodeName -> "http://" + nodeName
)
);
replicas.add(
new Replica(
"node2",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node2",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
- ),"collection1","shard1"
+ ),"collection1","shard1", nodeName -> "http://" + nodeName
)
);
replicas.add(
new Replica(
"node3",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2_2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node3",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "PULL"
- ),"collection1","shard1"
+ ),"collection1","shard1", nodeName -> "http://" + nodeName
)
);
return replicas;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java
index ad33a18..4c9dd19 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java
@@ -31,6 +31,7 @@ import org.apache.solr.SolrTestCase;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.component.HttpShardHandlerFactory;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
@@ -60,10 +61,7 @@ public class ReplicaListTransformerTest extends SolrTestCase {
Object choice = it.next();
final String url;
if (choice instanceof String) {
- url = (String)choice;
- }
- else if (choice instanceof Replica) {
- url = ((Replica)choice).getCoreUrl();
+ url = (String) choice;
} else {
url = null;
}
@@ -105,7 +103,7 @@ public class ReplicaListTransformerTest extends SolrTestCase {
@Test
public void testTransform() throws Exception {
- final String regex = ".*" + random().nextInt(10) + ".*";
+ final String regex = ".*_n" + random().nextInt(10) + ".*";
AtomicReference<ReplicaListTransformer> transformer = new AtomicReference<>();
try {
@@ -149,15 +147,19 @@ public class ReplicaListTransformerTest extends SolrTestCase {
final List<String> urls = createRandomUrls();
for (int ii = 0; ii < urls.size(); ++ii) {
- final String name = "replica" + (ii + 1);
+ final String name = "r_n" + (ii + 1);
final String url = urls.get(ii);
final Map<String, Object> propMap = new HashMap<String, Object>();
- propMap.put("base_url", url);
propMap.put("core", "test_core");
- propMap.put("node_name", "test_node");
+ propMap.put("node_name", url);
propMap.put("type", "NRT");
// a skeleton replica, good enough for this test's purposes
- final Replica replica = new Replica(name, propMap, "c1", "s1");
+ final Replica replica = new Replica(name, propMap, "c1", "s1", new Replica.NodeNameToBaseUrl() {
+ @Override
+ public String getBaseUrlForNodeName(String nodeName) {
+ return Utils.getBaseUrlForNodeName(nodeName, "http");
+ }
+ });
inputs.add(replica);
final String coreUrl = replica.getCoreUrl();
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java
index c0ebad3..91d6b9d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java
@@ -80,11 +80,10 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
new Replica(
"node4",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2_2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node4",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
- ), "c1","s1"
+ ), "c1","s1", nodeName -> "http://" + nodeName
)
);
@@ -93,11 +92,10 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
new Replica(
"node5",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2_2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node5",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "PULL"
- ), "c1","s1"
+ ), "c1","s1", nodeName -> "http://" + nodeName
)
);
@@ -133,33 +131,30 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
new Replica(
"node1",
map(
- ZkStateReader.BASE_URL_PROP, "http://host1:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "NRT"
- ),"c1","s1"
+ ),"c1","s1", nodeName -> "http://" + nodeName
)
);
replicas.add(
new Replica(
"node2",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node2",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
- ),"c1","s1"
+ ),"c1","s1", nodeName -> "http://" + nodeName
)
);
replicas.add(
new Replica(
"node3",
map(
- ZkStateReader.BASE_URL_PROP, "http://host2_2:8983/solr",
ZkStateReader.NODE_NAME_PROP, "node3",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "PULL"
- ),"c1","s1"
+ ),"c1","s1", nodeName -> "http://" + nodeName
)
);
return replicas;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java
index 4350511..0d67d5f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java
@@ -42,7 +42,7 @@ public class ShufflingReplicaListTransformerTest extends SolrTestCase {
propMap.put("type", "NRT");
propMap.put("node_name", "node" + counter);
counter++;
- replicas.add(new Replica(url, propMap, "c1", "s1"));
+ replicas.add(new Replica(url, propMap, "c1", "s1", nodeName -> "http://" + nodeName));
}
implTestTransform(replicas);
}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index fbb2deb..a396f73 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -820,7 +820,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
- String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
assertNotNull(replicaBaseUrl);
List<JettySolrRunner> runners = new ArrayList<>(jettys);
@@ -882,7 +882,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
int port = new URI(((Http2SolrClient) client).getBaseURL())
.getPort();
- if (replica.getStr(ZkStateReader.BASE_URL_PROP).contains(":" + port)) {
+ if (replica.getBaseUrl().contains(":" + port)) {
CloudSolrServerClient csc = new CloudSolrServerClient();
csc.solrClient = client;
csc.port = port;
@@ -908,7 +908,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
Set<Entry<String,Replica>> entries = slice.getReplicasMap().entrySet();
for (Entry<String,Replica> entry : entries) {
Replica replica = entry.getValue();
- if (replica.getStr(ZkStateReader.BASE_URL_PROP).contains(":" + port)) {
+ if (replica.getBaseUrl().contains(":" + port)) {
List<CloudJettyRunner> list = shardToJetty.get(slice.getName());
if (list == null) {
list = new ArrayList<>();
@@ -920,7 +920,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
cjr.info = replica;
cjr.nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
cjr.coreNodeName = entry.getKey();
- cjr.url = replica.getStr(ZkStateReader.BASE_URL_PROP) + "/" + replica.getStr(ZkStateReader.CORE_NAME_PROP);
+ cjr.url = replica.getCoreUrl();
cjr.client = findClientByPort(port, theClients);
list.add(cjr);
if (isLeader) {
@@ -1969,7 +1969,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
return commonCloudSolrClient;
}
- public static String getUrlFromZk(ClusterState clusterState, String collection) {
+ public static String getUrlFromZk(ZkStateReader zkStateReader, ClusterState clusterState, String collection) {
Map<String,Slice> slices = clusterState.getCollection(collection).getSlicesMap();
if (slices == null) {
@@ -1981,9 +1981,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
Map<String,Replica> shards = slice.getReplicasMap();
Set<Map.Entry<String,Replica>> shardEntries = shards.entrySet();
for (Map.Entry<String,Replica> shardEntry : shardEntries) {
- final ZkNodeProps node = shardEntry.getValue();
+ final Replica node = shardEntry.getValue();
if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
- return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
+ return node.getCoreUrl(); //new ZkCoreNodeProps(node).getCoreUrl();
}
}
}
@@ -2079,10 +2079,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
protected boolean reloadCollection(Replica replica, String testCollectionName) throws Exception {
- ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
String coreName = replica.getName();
boolean reloadedOk = false;
- try (Http2SolrClient client = getHttpSolrClient(coreProps.getBaseUrl())) {
+ try (Http2SolrClient client = getHttpSolrClient(replica.getBaseUrl())) {
CoreAdminResponse statusResp = CoreAdminRequest.getStatus(coreName, client);
long leaderCoreStartTime = statusResp.getStartTime(coreName).getTime();
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 4643c65..bda9d9a 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -811,7 +811,7 @@ public class MiniSolrCloudCluster {
}
protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
- String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+ String replicaBaseUrl = replica.getBaseUrl();
for (JettySolrRunner j : jettys) {
if (replicaBaseUrl.replaceAll("/$", "").equals(j.getProxyBaseUrl().replaceAll("/$", ""))) {