You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by dw...@apache.org on 2021/03/10 09:49:39 UTC
[lucene] 09/09: SOLR-12730: Cleanup.
This is an automated email from the ASF dual-hosted git repository.
dweiss pushed a commit to branch jira/solr-12730
in repository https://gitbox.apache.org/repos/asf/lucene.git
commit e12aebac31e0747fcea92ad8581f2b9aa1a4b66b
Author: Andrzej Bialecki <ab...@apache.org>
AuthorDate: Mon Nov 5 19:04:34 2018 +0100
SOLR-12730: Cleanup.
---
.../org/apache/lucene/index/IndexFileDeleter.java | 25 ++--------
.../java/org/apache/lucene/store/FSDirectory.java | 1 -
.../solr/cloud/api/collections/SplitShardCmd.java | 55 ++++++++++++++++------
.../solr/cloud/autoscaling/IndexSizeTrigger.java | 11 +----
.../org/apache/solr/core/DirectoryFactory.java | 2 -
.../solr/core/IndexDeletionPolicyWrapper.java | 15 +-----
.../src/java/org/apache/solr/core/SolrCore.java | 27 -----------
.../apache/solr/handler/admin/MetricsHandler.java | 8 ----
.../org/apache/solr/update/SolrIndexConfig.java | 29 +-----------
.../processor/DistributedUpdateProcessor.java | 2 +-
.../src/java/org/apache/solr/util/TimeOut.java | 2 +-
.../configsets/cloud-minimal/conf/solrconfig.xml | 3 +-
.../cloud/autoscaling/IndexSizeTriggerTest.java | 14 +++---
.../autoscaling/sim/SimClusterStateProvider.java | 5 ++
.../autoscaling/sim/SimSolrCloudTestCase.java | 5 +-
.../solrj/cloud/autoscaling/AutoScalingConfig.java | 2 +-
16 files changed, 69 insertions(+), 137 deletions(-)
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
index cd5cf7f..cc9d2e0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
@@ -20,8 +20,6 @@ package org.apache.lucene.index;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Collection;
@@ -110,7 +108,7 @@ final class IndexFileDeleter implements Closeable {
/** Change to true to see details of reference counts when
* infoStream is enabled */
- public static boolean VERBOSE_REF_COUNTS = true;
+ public static boolean VERBOSE_REF_COUNTS = false;
private final IndexWriter writer;
@@ -566,16 +564,7 @@ final class IndexFileDeleter implements Closeable {
RefCount rc = getRefCount(fileName);
if (infoStream.isEnabled("IFD")) {
if (VERBOSE_REF_COUNTS) {
- // todo nocommit remove the extra logging
- String extra = null;
- if ("_0.fdt".equals(fileName)) {
- StringWriter stringWriter = new StringWriter();
- PrintWriter writer = new PrintWriter(stringWriter);
- new Exception().printStackTrace(writer);
- extra = stringWriter.toString();
- }
-
- infoStream.message("IFD", " IncRef \"" + fileName + "\": pre-incr count is " + rc.count + (extra != null ? " and stack is " + extra : ""));
+ infoStream.message("IFD", " IncRef \"" + fileName + "\": pre-incr count is " + rc.count);
}
}
rc.IncRef();
@@ -613,15 +602,7 @@ final class IndexFileDeleter implements Closeable {
RefCount rc = getRefCount(fileName);
if (infoStream.isEnabled("IFD")) {
if (VERBOSE_REF_COUNTS) {
- String extra = null;
- // todo nocommit remove the extra logging
- if ("_0.fdt".equals(fileName)) {
- StringWriter stringWriter = new StringWriter();
- PrintWriter writer = new PrintWriter(stringWriter);
- new Exception().printStackTrace(writer);
- extra = stringWriter.toString();
- }
- infoStream.message("IFD", " DecRef \"" + fileName + "\": pre-decr count is " + rc.count + (extra != null ? " and stack is " + extra : ""));
+ infoStream.message("IFD", " DecRef \"" + fileName + "\": pre-decr count is " + rc.count);
}
}
if (rc.DecRef() == 0) {
diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
index db4e1f9..fb03f85 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
@@ -339,7 +339,6 @@ public abstract class FSDirectory extends BaseDirectory {
}
privateDeleteFile(name, false);
maybeDeletePendingFiles();
- System.out.println("Deleted file: " + name);
}
/** Try to delete any pending files that we had previously tried to delete but failed
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index b9370ac..f9ac520 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -26,16 +26,19 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import org.apache.solr.client.solrj.cloud.DistribStateManager;
import org.apache.solr.client.solrj.cloud.DistributedQueue;
import org.apache.solr.client.solrj.cloud.NodeStateProvider;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
import org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type;
+import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.cloud.Overseer;
import org.apache.solr.cloud.overseer.OverseerAction;
@@ -774,23 +777,47 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
public static boolean lockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
- if (cloudManager.getDistribStateManager().hasData(path)) {
- return false;
- }
- Map<String, Object> map = new HashMap<>();
- map.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
- byte[] data = Utils.toJSON(map);
- try {
- cloudManager.getDistribStateManager().makePath(path, data, CreateMode.EPHEMERAL, true);
- } catch (Exception e) {
- throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
- collection + "/" + shard, e);
+ log.info("===locking " + path, new Exception());
+ final DistribStateManager stateManager = cloudManager.getDistribStateManager();
+ synchronized (stateManager) {
+ if (stateManager.hasData(path)) {
+ VersionedData vd = stateManager.getData(path);
+ log.info("=== already locked! {}", Utils.fromJSON(vd.getData()));
+ return false;
+ }
+ Map<String, Object> map = new HashMap<>();
+ map.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+ byte[] data = Utils.toJSON(map);
+ try {
+ cloudManager.getDistribStateManager().makePath(path, data, CreateMode.EPHEMERAL, true);
+ } catch (Exception e) {
+ throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
+ collection + "/" + shard, e);
+ }
+ return true;
}
- return true;
}
public static void unlockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
- String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
- cloudManager.getDistribStateManager().removeRecursively(path, true, true);
+ if (shard != null) {
+ String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
+ cloudManager.getDistribStateManager().removeRecursively(path, true, true);
+ } else {
+ String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
+ try {
+ List<String> names = cloudManager.getDistribStateManager().listData(path);
+ for (String name : cloudManager.getDistribStateManager().listData(path)) {
+ if (name.endsWith("-splitting")) {
+ try {
+ cloudManager.getDistribStateManager().removeData(path + "/" + name, -1);
+ } catch (NoSuchElementException nse) {
+ // ignore
+ }
+ }
+ }
+ } catch (NoSuchElementException nse) {
+ // ignore
+ }
+ }
}
}
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
index 27daec2..7483501 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -171,7 +171,7 @@ public class IndexSizeTrigger extends TriggerBase {
} catch (Exception e) {
throw new TriggerValidationException(getName(), MAX_OPS_PROP, "invalid value: '" + maxOpsStr + "': " + e.getMessage());
}
- String methodStr = (String)properties.getOrDefault(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower());
+ String methodStr = (String)properties.getOrDefault(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.LINK.toLower());
splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr);
if (splitMethod == null) {
throw new TriggerValidationException(getName(), SPLIT_METHOD_PROP, "Unknown value '" + CommonAdminParams.SPLIT_METHOD +
@@ -286,7 +286,6 @@ public class IndexSizeTrigger extends TriggerBase {
String registry = SolrCoreMetricManager.createRegistryName(true, coll, sh, replicaName, null);
String tag = "metrics:" + registry + ":INDEX.sizeInBytes";
metricTags.put(tag, info);
- metricTags.put("metrics:" + registry + ":INDEX.sizeDetails", info);
tag = "metrics:" + registry + ":SEARCHER.searcher.numDocs";
metricTags.put(tag, info);
});
@@ -463,14 +462,6 @@ public class IndexSizeTrigger extends TriggerBase {
if (ops.isEmpty()) {
return;
}
- try {
- ClusterState cs = cloudManager.getClusterStateProvider().getClusterState();
- cs.forEachCollection(coll -> {
- log.debug("##== Collection: {}", coll);
- });
- } catch (IOException e) {
- throw new RuntimeException("oops: ", e);
- }
if (processor.process(new IndexSizeEvent(getName(), eventTime.get(), ops, aboveSize, belowSize))) {
// update last event times
aboveSize.forEach((coll, replicas) -> {
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index a05a36c..fab3300 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -280,8 +280,6 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
break;
}
}
-
- System.out.println(Arrays.toString(files) + " size=" + size + " on path: " + directory.toString());
return size;
}
diff --git a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
index ee7ed36..40e65b7 100644
--- a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
+++ b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
@@ -130,7 +130,6 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
if (reserveCount == null) reserveCount = new AtomicInteger();
reserveCount.incrementAndGet();
savedCommits.put(indexCommitGen, reserveCount);
- log.debug("Saving commit point for generation {}", indexCommitGen);
}
/** Release a previously saved commit point */
@@ -139,7 +138,6 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
if (reserveCount == null) return;// this should not happen
if (reserveCount.decrementAndGet() <= 0) {
savedCommits.remove(indexCommitGen);
- log.debug("Releasing commit point for generation {}", indexCommitGen);
}
}
@@ -191,20 +189,11 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
@Override
public void delete() {
Long gen = delegate.getGeneration();
- log.debug("Checking whether we can delete commit point with generation: {}", gen);
Long reserve = reserves.get(gen);
- long currentTime = System.nanoTime();
- if (reserve != null && currentTime < reserve) {
- log.debug("Commit point with generation: {} not deleted because its reserve {} is less than current time {}", gen, reserve, currentTime);
- return;
- }
- if (savedCommits.containsKey(gen)) {
- log.debug("Commit point with generation: {} not deleted because it is saved");
- return;
- }
+ if (reserve != null && System.nanoTime() < reserve) return;
+ if (savedCommits.containsKey(gen)) return;
if (snapshotMgr.isSnapshotted(gen)) return;
delegate.delete();
- log.debug("Commit point with generation: {} deleted", gen);
}
@Override
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 1af7771..6e13039 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -433,7 +433,6 @@ public final class SolrCore implements SolrInfoBean, SolrMetricProducer, Closeab
dir = directoryFactory.get(getIndexDir(), DirContext.DEFAULT, solrConfig.indexConfig.lockType);
try {
size = DirectoryFactory.sizeOfDirectory(dir);
- System.out.println("Found size=" + size + " for indexDir=" + getIndexDir());
} finally {
directoryFactory.release(dir);
}
@@ -444,31 +443,6 @@ public final class SolrCore implements SolrInfoBean, SolrMetricProducer, Closeab
return size;
}
- String getIndexSizeDetails() {
- Directory dir;
- StringBuilder sb = new StringBuilder();
- try {
- if (directoryFactory.exists(getIndexDir())) {
- dir = directoryFactory.get(getIndexDir(), DirContext.DEFAULT, solrConfig.indexConfig.lockType);
- try {
- String[] files = dir.listAll();
- Arrays.sort(files);
- for (String file : files) {
- sb.append('\n');
- sb.append(file);
- sb.append('\t');
- sb.append(String.valueOf(dir.fileLength(file)));
- }
- } finally {
- directoryFactory.release(dir);
- }
- }
- } catch (IOException e) {
- SolrException.log(log, "IO error while trying to get the details of the Directory", e);
- }
- return sb.toString();
- }
-
@Override
public String getName() {
return name;
@@ -1187,7 +1161,6 @@ public final class SolrCore implements SolrInfoBean, SolrMetricProducer, Closeab
manager.registerGauge(this, registry, () -> resourceLoader.getInstancePath().toString(), getMetricTag(), true, "instanceDir", Category.CORE.toString());
manager.registerGauge(this, registry, () -> isClosed() ? "(closed)" : getIndexDir(), getMetricTag(), true, "indexDir", Category.CORE.toString());
manager.registerGauge(this, registry, () -> isClosed() ? 0 : getIndexSize(), getMetricTag(), true, "sizeInBytes", Category.INDEX.toString());
- manager.registerGauge(this, registry, () -> isClosed() ? "" : getIndexSizeDetails(), getMetricTag(), true, "sizeDetails", Category.INDEX.toString());
manager.registerGauge(this, registry, () -> isClosed() ? "(closed)" : NumberUtils.readableSize(getIndexSize()), getMetricTag(), true, "size", Category.INDEX.toString());
if (coreContainer != null) {
manager.registerGauge(this, registry, () -> coreContainer.getNamesForCore(this), getMetricTag(), true, "aliases", Category.CORE.toString());
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
index 9b9f948..752e021 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
@@ -17,7 +17,6 @@
package org.apache.solr.handler.admin;
-import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
@@ -42,7 +41,6 @@ import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.metrics.SolrMetricManager;
@@ -51,15 +49,11 @@ import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.security.AuthorizationContext;
import org.apache.solr.security.PermissionNameProvider;
import org.apache.solr.util.stats.MetricUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Request handler to return metrics
*/
public class MetricsHandler extends RequestHandlerBase implements PermissionNameProvider {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
final SolrMetricManager metricManager;
public static final String COMPACT_PARAM = "compact";
@@ -105,8 +99,6 @@ public class MetricsHandler extends RequestHandlerBase implements PermissionName
}
handleRequest(req.getParams(), (k, v) -> rsp.add(k, v));
- log.debug("##== Req: {}", req);
- log.debug("##== Rsp: {}", Utils.toJSONString(rsp.getValues()));
}
public void handleRequest(SolrParams params, BiConsumer<String, Object> consumer) throws Exception {
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java b/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
index 0df2b16..48b2417 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
@@ -18,7 +18,6 @@ package org.apache.solr.update;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
-import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@@ -26,7 +25,6 @@ import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MergePolicy;
@@ -50,7 +48,6 @@ import org.apache.solr.schema.IndexSchema;
import org.apache.solr.util.SolrPluginUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
import static org.apache.solr.core.Config.assertWarnOrFail;
@@ -286,30 +283,7 @@ public class SolrIndexConfig implements MapSerializable {
private MergeScheduler buildMergeScheduler(IndexSchema schema) {
String msClassName = mergeSchedulerInfo == null ? SolrIndexConfig.DEFAULT_MERGE_SCHEDULER_CLASSNAME : mergeSchedulerInfo.className;
- // todo nocommit -- remove this scheduler instance with proper MDC logging support inside merge scheduler threads
- MergeScheduler scheduler = new ConcurrentMergeScheduler() {
- @Override
- protected synchronized MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
- MergeThread mergeThread = super.getMergeThread(writer, merge);
- final Map<String, String> submitterContext = MDC.getCopyOfContextMap();
- StringBuilder contextString = new StringBuilder();
- if (submitterContext != null) {
- Collection<String> values = submitterContext.values();
-
- for (String value : values) {
- contextString.append(value + " ");
- }
- if (contextString.length() > 1) {
- contextString.setLength(contextString.length() - 1);
- }
- }
-
- String ctxStr = contextString.toString().replace("/", "//");
- final String submitterContextStr = ctxStr.length() <= 512 ? ctxStr : ctxStr.substring(0, 512);
- mergeThread.setName(mergeThread.getName() + "-processing-" + submitterContextStr);
- return mergeThread;
- }
- };
+ MergeScheduler scheduler = schema.getResourceLoader().newInstance(msClassName, MergeScheduler.class);
if (mergeSchedulerInfo != null) {
// LUCENE-5080: these two setters are removed, so we have to invoke setMaxMergesAndThreads
@@ -337,4 +311,5 @@ public class SolrIndexConfig implements MapSerializable {
return scheduler;
}
+
}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index 8bdc8bc..c0ca0dc 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -1391,7 +1391,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
}
if (dropCmd) {
- // TODO: do we need to add anytprocehing to the response?
+ // TODO: do we need to add anything to the response?
return;
}
diff --git a/solr/core/src/java/org/apache/solr/util/TimeOut.java b/solr/core/src/java/org/apache/solr/util/TimeOut.java
index ce996f4..a0220a7 100644
--- a/solr/core/src/java/org/apache/solr/util/TimeOut.java
+++ b/solr/core/src/java/org/apache/solr/util/TimeOut.java
@@ -61,7 +61,7 @@ public class TimeOut {
public void waitFor(String messageOnTimeOut, Supplier<Boolean> supplier)
throws InterruptedException, TimeoutException {
while (!supplier.get() && !hasTimedOut()) {
- Thread.sleep(500);
+ timeSource.sleep(500);
}
if (hasTimedOut()) throw new TimeoutException(messageOnTimeOut);
}
diff --git a/solr/core/src/test-files/solr/configsets/cloud-minimal/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/cloud-minimal/conf/solrconfig.xml
index d98ba99..853ba65 100644
--- a/solr/core/src/test-files/solr/configsets/cloud-minimal/conf/solrconfig.xml
+++ b/solr/core/src/test-files/solr/configsets/cloud-minimal/conf/solrconfig.xml
@@ -46,7 +46,6 @@
</requestHandler>
<indexConfig>
<mergeScheduler class="${solr.mscheduler:org.apache.lucene.index.ConcurrentMergeScheduler}"/>
- <infoStream>true</infoStream>
- </indexConfig>
+: </indexConfig>
</config>
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
index 314ddbd..f06a57d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -35,6 +35,7 @@ import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -44,7 +45,6 @@ import org.apache.solr.cloud.CloudTestUtils;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CommonParams;
@@ -54,7 +54,6 @@ import org.apache.solr.common.util.Pair;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.LogLevel;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -95,7 +94,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
configureCluster(2)
.addConfig("conf", configset("cloud-minimal"))
.configure();
- if (random().nextBoolean() || true) {
+ if (random().nextBoolean() && false) {
cloudManager = cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getSolrCloudManager();
solrClient = cluster.getSolrClient();
loader = cluster.getJettySolrRunner(0).getCoreContainer().getResourceLoader();
@@ -126,7 +125,8 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
} else {
cluster.deleteAllCollections();
}
- cloudManager.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(new ZkNodeProps()), -1);
+ AutoScalingConfig autoScalingConfig = new AutoScalingConfig(new Policy(), null, null, null, 0);
+ cloudManager.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(autoScalingConfig), -1);
cloudManager.getTimeSource().sleep(5000);
listenerEvents.clear();
listenerCreated = new CountDownLatch(1);
@@ -211,7 +211,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
}
Map<String, Object> params = (Map<String, Object>)op.getHints().get(Suggester.Hint.PARAMS);
assertNotNull("params are null: " + op, params);
- assertEquals("splitMethod: " + op, "rewrite", params.get(CommonAdminParams.SPLIT_METHOD));
+ assertEquals("splitMethod: " + op, "link", params.get(CommonAdminParams.SPLIT_METHOD));
}
assertTrue("shard1 should be split", shard1);
assertTrue("shard2 should be split", shard2);
@@ -317,7 +317,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
- boolean await = finished.await(60000 / SPEED, TimeUnit.MILLISECONDS);
+ boolean await = finished.await(90000 / SPEED, TimeUnit.MILLISECONDS);
assertTrue("did not finish processing in time", await);
CloudTestUtils.waitForState(cloudManager, collectionName, 20, TimeUnit.SECONDS, CloudTestUtils.clusterShape(6, 2, true, true));
assertEquals(1, listenerEvents.size());
@@ -506,7 +506,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
}
assertTrue("maxSize should be non-zero", maxSize > 0);
- int aboveBytes = maxSize * 9 / 10;
+ int aboveBytes = maxSize * 8 / 10;
// need to wait for recovery after splitting
long waitForSeconds = 10 + random().nextInt(5);
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index a7471eb..ef7e2ef 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -954,6 +954,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
sliceProperties.remove(collection);
leaderThrottles.remove(collection);
colShardReplicaMap.remove(collection);
+ SplitShardCmd.unlockForSplit(cloudManager, collection, null);
opDelay(collection, CollectionParams.CollectionAction.DELETE.name());
@@ -1007,6 +1008,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
values.put(ImplicitSnitch.SYSLOADAVG, 1.0);
values.put(ImplicitSnitch.HEAPUSAGE, 123450000);
});
+ cloudManager.getDistribStateManager().removeRecursively(ZkStateReader.COLLECTIONS_ZKNODE, true, false);
} finally {
lock.unlock();
}
@@ -1260,6 +1262,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
success = true;
} finally {
if (!success) {
+ Map<String, Object> sProps = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
+ .computeIfAbsent(sliceName.get(), s -> new ConcurrentHashMap<>());
+ sProps.remove(BUFFERED_UPDATES);
SplitShardCmd.unlockForSplit(cloudManager, collectionName, sliceName.get());
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
index 3d41ea4..3c9bd26 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
@@ -25,6 +25,8 @@ import java.util.List;
import java.util.function.Predicate;
import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
@@ -87,7 +89,8 @@ public class SimSolrCloudTestCase extends SolrTestCaseJ4 {
if (cluster == null)
throw new RuntimeException("SimCloudManager not configured - have you called configureCluster()?");
// clear any persisted configuration
- cluster.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(new ZkNodeProps()), -1);
+ AutoScalingConfig autoScalingConfig = new AutoScalingConfig(new Policy(), null, null, null, 0);
+ cluster.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(autoScalingConfig), -1);
cluster.getDistribStateManager().setData(ZkStateReader.ROLES, Utils.toJSON(new HashMap<>()), -1);
cluster.getSimNodeStateProvider().simRemoveDeadNodes();
cluster.getSimClusterStateProvider().simRemoveDeadNodes();
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AutoScalingConfig.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AutoScalingConfig.java
index ccd02eb..8a52598 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AutoScalingConfig.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AutoScalingConfig.java
@@ -324,7 +324,7 @@ public class AutoScalingConfig implements MapWriter {
empty = jsonMap.isEmpty();
}
- private AutoScalingConfig(Policy policy, Map<String, TriggerConfig> triggerConfigs, Map<String,
+ public AutoScalingConfig(Policy policy, Map<String, TriggerConfig> triggerConfigs, Map<String,
TriggerListenerConfig> listenerConfigs, Map<String, Object> properties, int zkVersion) {
this.policy = policy;
this.triggers = triggerConfigs != null ? Collections.unmodifiableMap(new LinkedHashMap<>(triggerConfigs)) : null;