You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2020/11/10 06:14:40 UTC

[lucene-solr] branch reference_impl_dev updated (b37b718 -> 69bf236)

This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a change to branch reference_impl_dev
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


    from b37b718  @1133 Bit of clusterstate managment work.
     new c8edb42  @1134 Higher limits on overseer task queue, return parallel index fetch for now.
     new 177e172  @1135 Cluster state managment hardening.
     new 69bf236  @1136 Deal with collections that have EMPTY createNodeSet.

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/java/org/apache/solr/cloud/Overseer.java   |  2 +-
 .../cloud/api/collections/CreateCollectionCmd.java | 45 ++++++++++++----------
 .../apache/solr/cloud/overseer/ZkStateWriter.java  |  3 +-
 .../java/org/apache/solr/handler/IndexFetcher.java |  8 ++--
 .../org/apache/solr/schema/ManagedIndexSchema.java |  2 +-
 .../apache/solr/cloud/ShardRoutingCustomTest.java  | 41 ++++++--------------
 6 files changed, 44 insertions(+), 57 deletions(-)


[lucene-solr] 02/03: @1135 Cluster state managment hardening.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl_dev
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 177e1723d8bedeb10b5e248d036537103a3ed53e
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Mon Nov 9 23:35:55 2020 -0600

    @1135 Cluster state managment hardening.
---
 solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java | 3 ++-
 solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java    | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
index a2c4ce1..55726c2 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
@@ -309,7 +309,8 @@ public class ZkStateWriter {
             version = v;
           }
 
-          reader.getZkClient().setData(path, data, version, true);
+
+          reader.getZkClient().setData(path, data, version == 0 ? -1 : version, true);
 
           trackVersions.put(collection.getName(), version + 1);
         } catch (KeeperException.NoNodeException e) {
diff --git a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
index 7219bfe..bdfdeb8 100644
--- a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
@@ -213,7 +213,7 @@ public final class ManagedIndexSchema extends IndexSchema {
         try {
           // Assumption: the path exists
           int ver = schemaZkVersion;
-          Stat stat = zkClient.setData(managedSchemaPath, data, ver, true);
+          Stat stat = zkClient.setData(managedSchemaPath, data, ver == 0 ? -1 : ver, true);
           schemaZkVersion = stat.getVersion();
           log.info("Persisted managed schema version {} at {}", ver, managedSchemaPath);
         } catch (KeeperException.BadVersionException e) {


[lucene-solr] 01/03: @1134 Higher limits on overseer task queue, return parallel index fetch for now.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl_dev
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit c8edb4246886c48a970bb5b89c12a3a580c666df
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Mon Nov 9 23:13:06 2020 -0600

    @1134 Higher limits on overseer task queue, return parallel index fetch for now.
---
 solr/core/src/java/org/apache/solr/cloud/Overseer.java       | 2 +-
 solr/core/src/java/org/apache/solr/handler/IndexFetcher.java | 8 ++++----
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index 683f3da..a74ecee 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -286,7 +286,7 @@ public class Overseer implements SolrCloseable {
 //     stateManagmentExecutor = ParWork.getParExecutorService("stateManagmentExecutor",
 //        1, 1, 3000, new SynchronousQueue());
      taskExecutor = ParWork.getParExecutorService("overseerTaskExecutor",
-        4, 16, 10000, new SynchronousQueue());
+        10, 32, 10000, new SynchronousQueue());
 
 //    try {
 //      if (context != null) context.close();
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index 5ed6ea1..c2fa9f6 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -949,9 +949,9 @@ public class IndexFetcher {
       if (!status) {
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed to create temporary config folder: " + tmpconfDir.getName());
       }
-      //try (ParWork work = new ParWork(this, true)) {
+      try (ParWork work = new ParWork(this, true)) {
         for (Map<String,Object> file : confFilesToDownload) {
-       //   work.collect("fetchConfigFile", () -> {
+          work.collect("fetchConfigFile", () -> {
             try {
               String saveAs = (String) (file.get(ALIAS) == null ? file.get(NAME) : file.get(ALIAS));
               localFileFetcher = new LocalFsFileFetcher(tmpconfDir, file, saveAs, CONF_FILE_SHORT, latestGeneration);
@@ -968,8 +968,8 @@ public class IndexFetcher {
             } finally {
               fileFetchRequests.remove(file.get(NAME));
             }
-        //  });
-      //  }
+          });
+        }
       }
       // this is called before copying the files to the original conf dir
       // so that if there is an exception avoid corrupting the original files.


[lucene-solr] 03/03: @1136 Deal with collections that have EMPTY createNodeSet.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl_dev
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 69bf23677af94794f507ce4fa82d98d8bd696e19
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Nov 10 00:13:02 2020 -0600

    @1136 Deal with collections that have EMPTY createNodeSet.
---
 .../cloud/api/collections/CreateCollectionCmd.java | 45 ++++++++++++----------
 .../apache/solr/cloud/ShardRoutingCustomTest.java  | 41 ++++++--------------
 2 files changed, 36 insertions(+), 50 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 63b8ad7..2cf5a41 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -334,32 +334,35 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
             log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
             //throw new SolrException(ErrorCode.BAD_REQUEST, "Underlying core creation failed while creating collection: " + collectionName + "\n" + results);
           } else {
-
+            Object createNodeSet = message.get(ZkStateReader.CREATE_NODE_SET);
+            log.info("createNodeSet={}", createNodeSet);
+            if (createNodeSet == null || (!createNodeSet.equals("") && !createNodeSet.equals(ZkStateReader.CREATE_NODE_SET_EMPTY))) {
             try {
-              zkStateReader.waitForState(collectionName, 10, TimeUnit.SECONDS, (l, c) -> {
-                if (c == null) {
-                  return false;
-                }
-                for (String name : coresToCreate.keySet()) {
-                  if (c.getReplica(name) == null || c.getReplica(name).getState() != Replica.State.ACTIVE) {
+                zkStateReader.waitForState(collectionName, 10, TimeUnit.SECONDS, (l, c) -> {
+                  if (c == null) {
                     return false;
                   }
-                }
-                Collection<Slice> slices = c.getSlices();
-                if (slices.size() < shardNames.size()) {
-                  return false;
-                }
-                for (Slice slice : slices) {
-                  if (slice.getLeader() == null) {
+                  for (String name : coresToCreate.keySet()) {
+                    if (c.getReplica(name) == null || c.getReplica(name).getState() != Replica.State.ACTIVE) {
+                      return false;
+                    }
+                  }
+                  Collection<Slice> slices = c.getSlices();
+                  if (slices.size() < shardNames.size()) {
                     return false;
                   }
-                }
-                return true;
-              });
-            } catch (InterruptedException e) {
-              log.warn("Interrupted waiting for active replicas on collection creation {}", collectionName);
-            } catch (TimeoutException e) {
-              log.error("Exception waiting for active replicas on collection creation {}", collectionName);
+                  for (Slice slice : slices) {
+                    if (slice.getLeader() == null) {
+                      return false;
+                    }
+                  }
+                  return true;
+                });
+              } catch(InterruptedException e){
+                log.warn("Interrupted waiting for active replicas on collection creation {}", collectionName);
+              } catch(TimeoutException e){
+                log.error("Exception waiting for active replicas on collection creation {}", collectionName);
+              }
             }
 
             if (log.isDebugEnabled()) log.debug("Finished create command on all shards for collection: {}", collectionName);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
index 7bf794c..95273a9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
@@ -22,11 +22,12 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
-public class ShardRoutingCustomTest extends AbstractFullDistribZkTestBase {
+public class ShardRoutingCustomTest extends SolrCloudBridgeTestCase {
 
   String collection = DEFAULT_COLLECTION;  // enable this to be configurable (more work needs to be done)
 
@@ -38,50 +39,32 @@ public class ShardRoutingCustomTest extends AbstractFullDistribZkTestBase {
 
   public ShardRoutingCustomTest() {
     schemaString = "schema15.xml";      // we need a string id
+    solrconfigString = "solrconfig.xml";
+    uploadSelectCollection1Config = true;
+    createCollection1 = false;
     sliceCount = 0;
   }
 
   @Test
   public void test() throws Exception {
-    boolean testFinished = false;
-    try {
-      doCustomSharding();
-
-      testFinished = true;
-    } finally {
-      if (!testFinished) {
-        printLayout();
-      }
-    }
+    doCustomSharding();
   }
 
   private void doCustomSharding() throws Exception {
-    printLayout();
 
-    File jettyDir = createTempDir("jetty").toFile();
-    jettyDir.mkdirs();
-    setupJettySolrHome(jettyDir);
-    JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
-    j.start();
     assertEquals(0, CollectionAdminRequest
         .createCollection(DEFAULT_COLLECTION, "_default", 1, 1)
-        .setCreateNodeSet("")
+        .setCreateNodeSet(ZkStateReader.CREATE_NODE_SET_EMPTY)
         .process(cloudClient).getStatus());
     assertTrue(CollectionAdminRequest
         .addReplicaToShard(collection,"s1")
-        .setNode(j.getNodeName())
-        .setType(useTlogReplicas()? Replica.Type.TLOG: Replica.Type.NRT)
+        .setNode(cluster.getJettySolrRunner(0).getNodeName())
+        .setType(useTlogReplicas() ? Replica.Type.TLOG: Replica.Type.NRT)
         .process(cloudClient).isSuccess());
-    jettys.add(j);
-    SolrClient client = createNewSolrClient(j.getLocalPort());
-    clients.add(client);
-
-    waitForActiveReplicaCount(cloudClient, DEFAULT_COLLECTION, 1);
-
-    updateMappingsFromZk(this.jettys, this.clients);
-
-    printLayout();
   }
 
+  private boolean useTlogReplicas() {
+    return random().nextBoolean();
+  }
 
 }