You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2020/08/17 23:11:35 UTC
[lucene-solr] 05/49: @519 Need high limit of queued updates per
dest, init some collection sizes.
This is an automated email from the ASF dual-hosted git repository.
markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git
commit e9ac3617d5acc9e4af1b0b9af5911416429e2056
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Aug 11 22:51:20 2020 -0500
@519 Need high limit of queued updates per dest, init some collection sizes.
---
.../apache/solr/update/DefaultSolrCoreState.java | 56 +++++++++++-----------
.../org/apache/solr/update/TransactionLog.java | 2 +-
.../AddSchemaFieldsUpdateProcessorFactory.java | 6 +--
.../solr/client/solrj/impl/Http2SolrClient.java | 6 +--
4 files changed, 34 insertions(+), 36 deletions(-)
diff --git a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
index bc546d4..31aecf5 100644
--- a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
@@ -320,7 +320,8 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
MDCLoggingContext.setCoreDescriptor(cc, cd);
try {
if (SKIP_AUTO_RECOVERY) {
- log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
+ log.warn(
+ "Skipping recovery according to sys prop solrcloud.skip.autorecovery");
return;
}
@@ -343,36 +344,33 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
cancelRecovery();
recoveryLock.lock();
- try {
- // don't use recoveryLock.getQueueLength() for this
- if (recoveryWaiting.decrementAndGet() > 0) {
- // another recovery waiting behind us, let it run now instead of after we finish
- return;
- }
+ // don't use recoveryLock.getQueueLength() for this
+ if (recoveryWaiting.decrementAndGet() > 0) {
+ // another recovery waiting behind us, let it run now instead of after we finish
+ return;
+ }
- // to be air tight we must also check after lock
- if (prepForClose || closed || cc.isShutDown()) {
- log.info("Skipping recovery due to being closed");
- return;
- }
- log.info("Running recovery");
+ // to be air tight we must also check after lock
+ if (prepForClose || closed || cc.isShutDown()) {
+ log.info("Skipping recovery due to being closed");
+ return;
+ }
+ log.info("Running recovery");
- recoveryThrottle.minimumWaitBetweenActions();
- recoveryThrottle.markAttemptingAction();
- if (recoveryStrat != null) {
- ParWork.close(recoveryStrat);
- }
+ recoveryThrottle.minimumWaitBetweenActions();
+ recoveryThrottle.markAttemptingAction();
+ if (recoveryStrat != null) {
+ ParWork.close(recoveryStrat);
+ }
- if (prepForClose || cc.isShutDown() || closed) {
- return;
- }
- recoveryStrat = recoveryStrategyBuilder
- .create(cc, cd, DefaultSolrCoreState.this);
- recoveryStrat.setRecoveringAfterStartup(recoveringAfterStartup);
- recoveryStrat.run();
- } finally {
- recoveryLock.unlock();
+ if (prepForClose || cc.isShutDown() || closed) {
+ return;
}
+ recoveryStrat = recoveryStrategyBuilder
+ .create(cc, cd, DefaultSolrCoreState.this);
+ recoveryStrat.setRecoveringAfterStartup(recoveringAfterStartup);
+ recoveryStrat.run();
+
} finally {
if (locked) recoveryLock.unlock();
}
@@ -386,8 +384,8 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
// already queued up - the recovery execution itself is run
// in another thread on another 'recovery' executor.
//
- // avoid deadlock: we can't use the recovery executor here!
- recoveryFuture = cc.getUpdateShardHandler().getRecoveryExecutor().submit(recoveryTask);
+ recoveryFuture = cc.getUpdateShardHandler().getRecoveryExecutor()
+ .submit(recoveryTask);
} catch (RejectedExecutionException e) {
// fine, we are shutting down
}
diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
index 00a0899..cd39713 100644
--- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java
+++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
@@ -307,7 +307,7 @@ public class TransactionLog implements Closeable {
assert pos == 0;
@SuppressWarnings({"rawtypes"})
- Map header = new LinkedHashMap<String, Object>();
+ Map header = new LinkedHashMap<String, Object>(2);
header.put("SOLR_TLOG", 1); // a magic string + version number
header.put("strings", globalStringList);
codec.marshal(header, fos);
diff --git a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
index 2ded9c7..113f1a1 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
@@ -387,13 +387,13 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso
IndexSchema oldSchema = cmd.getReq().getSchema();
for (;;) {
List<SchemaField> newFields = new ArrayList<>();
- // Group copyField defs per field and then per maxChar, to adapt to IndexSchema API
- Map<String,Map<Integer,List<CopyFieldDef>>> newCopyFields = new HashMap<>();
+ // Group copyField defs per field and then per maxChar, to adapt to IndexSchema API
// build a selector each time through the loop b/c the schema we are
// processing may have changed
FieldNameSelector selector = buildSelector(oldSchema);
Map<String,List<SolrInputField>> unknownFields = new HashMap<>();
getUnknownFields(selector, doc, unknownFields);
+ Map<String,Map<Integer,List<CopyFieldDef>>> newCopyFields = new HashMap<>(unknownFields.size() + 1);
for (final Map.Entry<String,List<SolrInputField>> entry : unknownFields.entrySet()) {
String fieldName = entry.getKey();
String fieldTypeName = defaultFieldType;
@@ -416,7 +416,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso
throw new SolrException(BAD_REQUEST, message);
}
if (log.isDebugEnabled()) {
- StringBuilder builder = new StringBuilder();
+ StringBuilder builder = new StringBuilder(512);
builder.append("\nFields to be added to the schema: [");
boolean isFirst = true;
for (SchemaField field : newFields) {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/Http2SolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/Http2SolrClient.java
index 9fc5d80..bbeefb1 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/Http2SolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/Http2SolrClient.java
@@ -222,7 +222,7 @@ public class Http2SolrClient extends SolrClient {
HTTP2Client http2client = new HTTP2Client();
transport = new HttpClientTransportOverHTTP2(http2client);
httpClient = new HttpClient(transport, sslContextFactory);
- httpClient.setMaxConnectionsPerDestination(300);
+ if (builder.maxConnectionsPerHost != null) httpClient.setMaxConnectionsPerDestination(builder.maxConnectionsPerHost);
}
httpClientExecutor = new SolrQueuedThreadPool("httpClient");
@@ -876,7 +876,7 @@ public class Http2SolrClient extends SolrClient {
private class AsyncTracker {
// nocommit - look at outstanding max again
- private static final int MAX_OUTSTANDING_REQUESTS = 10;
+ private static final int MAX_OUTSTANDING_REQUESTS = 20;
private final Semaphore available;
@@ -912,7 +912,7 @@ public class Http2SolrClient extends SolrClient {
int getMaxRequestsQueuedPerDestination() {
// comfortably above max outstanding requests
- return MAX_OUTSTANDING_REQUESTS * 3;
+ return MAX_OUTSTANDING_REQUESTS * 10;
}
public void waitForComplete() {