You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2019/03/15 08:37:22 UTC

[lucene-solr] 02/02: SOLR-11127: Immplement .system collection back-compat check.

This is an automated email from the ASF dual-hosted git repository.

ab pushed a commit to branch jira/solr-11127-2
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 49b448e80aa5a303d950bcd0405f0356462aef02
Author: Andrzej Bialecki <ab...@apache.org>
AuthorDate: Fri Mar 15 09:36:51 2019 +0100

    SOLR-11127: Immplement .system collection back-compat check.
---
 .../src/java/org/apache/solr/cloud/Overseer.java   | 123 +++++++++++++-
 .../java/org/apache/solr/core/CoreContainer.java   |   6 -
 .../org/apache/solr/handler/admin/ColStatus.java   |   3 +
 .../handler/admin/SegmentsInfoRequestHandler.java  |   5 +-
 .../apache/solr/cloud/CollectionsAPISolrJTest.java |  11 +-
 .../solr/cloud/SystemCollectionCompatTest.java     | 187 +++++++++++++++++++++
 .../solrj/request/CollectionAdminRequest.java      |   8 +-
 7 files changed, 328 insertions(+), 15 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index 91b7e74..c9b81c7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -21,15 +21,24 @@ import static org.apache.solr.common.params.CommonParams.ID;
 import java.io.Closeable;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
+import java.util.function.BiConsumer;
 
+import org.apache.lucene.util.Version;
+import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
@@ -45,11 +54,15 @@ import org.apache.solr.common.SolrCloseable;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.ConnectionManager;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.util.IOUtils;
+import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.ObjectReleaseTracker;
 import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.Utils;
@@ -570,10 +583,118 @@ public class Overseer implements SolrCloseable {
     updaterThread.start();
     ccThread.start();
     triggerThread.start();
- 
+
+    systemCollectionCompatCheck(new BiConsumer<String, Object>() {
+      boolean firstPair = true;
+      @Override
+      public void accept(String s, Object o) {
+        if (firstPair) {
+          log.warn("WARNING: Collection '.system' may need re-indexing due to compatibility issues listed below. See REINDEXCOLLECTION documentation for more details.");
+          firstPair = false;
+        }
+        log.warn("WARNING: *\t{}:\t{}", s, o);
+      }
+    });
+
     assert ObjectReleaseTracker.track(this);
   }
 
+  public void systemCollectionCompatCheck(final BiConsumer<String, Object> consumer) {
+    ClusterState clusterState = zkController.getClusterState();
+    DocCollection coll = clusterState.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
+    if (coll == null) {
+      return;
+    }
+    // check that all shard leaders are active
+    boolean allActive = true;
+    for (Slice s : coll.getActiveSlices()) {
+      if (s.getLeader() == null || !s.getLeader().isActive(clusterState.getLiveNodes())) {
+        allActive = false;
+        break;
+      }
+    }
+    if (allActive) {
+      doCompatCheck(consumer);
+    } else {
+      // wait for all leaders to become active and then check
+      zkController.zkStateReader.registerCollectionStateWatcher(CollectionAdminParams.SYSTEM_COLL, (liveNodes, state) -> {
+        boolean active = true;
+        for (Slice s : state.getActiveSlices()) {
+          if (s.getLeader() == null || !s.getLeader().isActive(liveNodes)) {
+            active = false;
+            break;
+          }
+        }
+        if (active) {
+          doCompatCheck(consumer);
+        }
+        return active;
+      });
+    }
+  }
+
+  private void doCompatCheck(BiConsumer<String, Object> consumer) {
+    try (CloudSolrClient client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
+          .withSocketTimeout(30000).withConnectionTimeout(15000)
+        .withHttpClient(updateShardHandler.getDefaultHttpClient()).build()) {
+      CollectionAdminRequest.ColStatus req = CollectionAdminRequest.collectionStatus(CollectionAdminParams.SYSTEM_COLL)
+          .setWithSegments(true)
+          .setWithFieldInfo(true);
+      CollectionAdminResponse rsp = req.process(client);
+      NamedList<Object> status = (NamedList<Object>)rsp.getResponse().get(CollectionAdminParams.SYSTEM_COLL);
+      Collection<String> nonCompliant = (Collection<String>)status.get("schemaNonCompliant");
+      if (!nonCompliant.contains("(NONE)")) {
+        consumer.accept("indexFieldsNotMatchingSchema", nonCompliant);
+      }
+      Set<Integer> segmentCreatedMajorVersions = new HashSet<>();
+      Set<String> segmentVersions = new HashSet<>();
+      int currentMajorVersion = Version.LATEST.major;
+      String currentVersion = Version.LATEST.toString();
+      segmentVersions.add(currentVersion);
+      segmentCreatedMajorVersions.add(currentMajorVersion);
+      NamedList<Object> shards = (NamedList<Object>)status.get("shards");
+      for (Map.Entry<String, Object> entry : shards) {
+        NamedList<Object> leader = (NamedList<Object>)((NamedList<Object>)entry.getValue()).get("leader");
+        if (leader == null) {
+          continue;
+        }
+        NamedList<Object> segInfos = (NamedList<Object>)leader.get("segInfos");
+        if (segInfos == null) {
+          continue;
+        }
+        NamedList<Object> infos = (NamedList<Object>)segInfos.get("info");
+        if (((Number)infos.get("numSegments")).intValue() > 0) {
+          segmentVersions.add(infos.get("minSegmentLuceneVersion").toString());
+        }
+        if (infos.get("commitLuceneVersion") != null) {
+          segmentVersions.add(infos.get("commitLuceneVersion").toString());
+        }
+        NamedList<Object> segmentInfos = (NamedList<Object>)segInfos.get("segments");
+        segmentInfos.forEach((k, v) -> {
+          NamedList<Object> segment = (NamedList<Object>)v;
+          segmentVersions.add(segment.get("version").toString());
+          if (segment.get("minVersion") != null) {
+            segmentVersions.add(segment.get("version").toString());
+          }
+          if (segment.get("createdVersionMajor") != null) {
+            segmentCreatedMajorVersions.add(((Number)segment.get("createdVersionMajor")).intValue());
+          }
+        });
+      }
+      if (segmentVersions.size() > 1) {
+        consumer.accept("differentSegmentVersions", segmentVersions);
+        consumer.accept("currentLuceneVersion", currentVersion);
+      }
+      if (segmentCreatedMajorVersions.size() > 1) {
+        consumer.accept("differentMajorSegmentVersions", segmentCreatedMajorVersions);
+        consumer.accept("currentLuceneMajorVersion", currentMajorVersion);
+      }
+
+    } catch (SolrServerException | IOException e) {
+      log.warn("Unable to perform back-compat check of .system collection", e);
+    }
+  }
+
   public Stats getStats() {
     return stats;
   }
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index c746628..410e26e 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -748,16 +748,10 @@ public class CoreContainer {
       containerHandlers.put(AutoScalingHandler.HANDLER_PATH, autoScalingHandler);
       autoScalingHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, AutoScalingHandler.HANDLER_PATH);
     }
-    // verify .system compatibility
-    systemCollCompatCheck();
     // This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
     status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
   }
 
-  private void systemCollCompatCheck() {
-
-  }
-
   // MetricsHistoryHandler supports both cloud and standalone configs
   private void createMetricsHistoryHandler() {
     PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java b/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java
index 7a4e090..b8e56a9 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ColStatus.java
@@ -149,6 +149,9 @@ public class ColStatus {
         sliceMap.add("leader", leaderMap);
         leaderMap.add("coreNode", leader.getName());
         leaderMap.addAll(leader.getProperties());
+        if (!leader.isActive(clusterState.getLiveNodes())) {
+          continue;
+        }
         String url = ZkCoreNodeProps.getCoreUrl(leader);
         try (SolrClient client = solrClientCache.getHttpSolrClient(url)) {
           ModifiableSolrParams params = new ModifiableSolrParams();
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
index 5b37bbb..3cc7a09 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
@@ -386,8 +386,7 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
         nonCompliant.add("docValues", "schema=" + sf.getType().getUninversionType(sf) + ", segment=false");
       }
       if (!sf.hasDocValues() &&
-          fi.getDocValuesType() != DocValuesType.NONE &&
-          fi.getIndexOptions() != IndexOptions.NONE) {
+          fi.getDocValuesType() != DocValuesType.NONE) {
         nonCompliant.add("docValues", "schema=false, segment=" + fi.getDocValuesType().toString());
       }
       if (!sf.isPolyField()) { // difficult to find all sub-fields in a general way
@@ -395,7 +394,7 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
           nonCompliant.add("indexed", "schema=" + sf.indexed() + ", segment=" + fi.getIndexOptions());
         }
       }
-      if (sf.omitNorms() != (fi.omitsNorms() || hasPoints)) {
+      if (!hasPoints && (sf.omitNorms() != fi.omitsNorms())) {
         nonCompliant.add("omitNorms", "schema=" + sf.omitNorms() + ", segment=" + fi.omitsNorms());
       }
       if (sf.storeTermVector() != fi.hasVectors()) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index 42940a8..829bd88 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -30,6 +30,7 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Date;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
@@ -611,9 +612,17 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     cluster.waitForActiveCollection(collectionName, 2, 4);
 
     SolrClient client = cluster.getSolrClient();
+    byte[] binData = collectionName.getBytes("UTF-8");
     // index some docs
     for (int i = 0; i < 10; i++) {
-      client.add(collectionName, new SolrInputDocument("id", String.valueOf(i)));
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", String.valueOf(i));
+      doc.addField("number_l", i);
+      doc.addField("string_s", String.valueOf(i));
+      doc.addField("string_txt", String.valueOf(i));
+      doc.addField("timestamp", new Date());
+      doc.addField("data_bin", binData);
+      client.add(collectionName, doc);
     }
     client.commit(collectionName);
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/SystemCollectionCompatTest.java b/solr/core/src/test/org/apache/solr/cloud/SystemCollectionCompatTest.java
new file mode 100644
index 0000000..78a85cc
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/SystemCollectionCompatTest.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.cloud.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.schema.SchemaRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.schema.SchemaResponse;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.logging.LogWatcher;
+import org.apache.solr.logging.LogWatcherConfig;
+import org.apache.solr.util.IdUtils;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.CreateMode;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public class SystemCollectionCompatTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(3)
+        .addConfig("conf1", configset("cloud-minimal"))
+        .configure();
+    if (! log.isWarnEnabled()) {
+      fail("Test requires that log-level is at-least WARN, but WARN is disabled");
+    }
+  }
+
+  private SolrCloudManager cloudManager;
+  private CloudSolrClient solrClient;
+
+  @Before
+  public void setupSystemCollection() throws Exception {
+    CollectionAdminRequest.createCollection(CollectionAdminParams.SYSTEM_COLL, null, 1, 3)
+        .process(cluster.getSolrClient());
+    cluster.waitForActiveCollection(CollectionAdminParams.SYSTEM_COLL,  1, 3);
+    ZkController zkController = cluster.getJettySolrRunner(0).getCoreContainer().getZkController();
+    cloudManager = zkController.getSolrCloudManager();
+    solrClient = new CloudSolrClientBuilder(Collections.singletonList(zkController.getZkServerAddress()),
+        Optional.empty()).build();
+    // send a dummy doc to the .system collection
+    SolrInputDocument doc = new SolrInputDocument(
+        "id", IdUtils.timeRandomId(),
+        CommonParams.TYPE, "dummy");
+    doc.addField("time_l", cloudManager.getTimeSource().getEpochTimeNs());
+    doc.addField("timestamp", new Date());
+    solrClient.add(CollectionAdminParams.SYSTEM_COLL, doc);
+    solrClient.commit(CollectionAdminParams.SYSTEM_COLL);
+
+    // workaround for a bug in schema update API
+    String pathToBackup = ZkStateReader.CONFIGS_ZKNODE + "/.system/schema.xml.bak";
+    DistribStateManager stateManager = cloudManager.getDistribStateManager();
+    VersionedData data = null;
+    if (stateManager.hasData(pathToBackup)) {
+      data = stateManager.getData(pathToBackup);
+    }
+    String path = ZkStateReader.CONFIGS_ZKNODE + "/.system/schema.xml";
+    if (!stateManager.hasData(path) && data != null) {
+      stateManager.createData(path, data.getData(), CreateMode.PERSISTENT);
+      stateManager.setData(path, data.getData(), -1);
+      stateManager.removeData(pathToBackup, -1);
+      stateManager.createData(pathToBackup, data.getData(), CreateMode.PERSISTENT);
+      // update it to increase the version
+      stateManager.setData(pathToBackup, data.getData(), -1);
+    }
+    // trigger compat report by changing the schema
+    SchemaRequest req = new SchemaRequest();
+    SchemaResponse rsp = req.process(solrClient, CollectionAdminParams.SYSTEM_COLL);
+    Map<String, Object> field = getSchemaField("timestamp", rsp);
+    // make obviously incompatible changes
+    field.put("type", "string");
+    field.put("docValues", false);
+    SchemaRequest.ReplaceField replaceFieldRequest = new SchemaRequest.ReplaceField(field);
+    SchemaResponse.UpdateResponse replaceFieldResponse = replaceFieldRequest.process(solrClient, CollectionAdminParams.SYSTEM_COLL);
+    assertEquals(replaceFieldResponse.toString(), 0, replaceFieldResponse.getStatus());
+    // reload for the schema changes to become active
+    CollectionAdminRequest.reloadCollection(CollectionAdminParams.SYSTEM_COLL);
+    cluster.waitForActiveCollection(CollectionAdminParams.SYSTEM_COLL,  1, 3);
+  }
+
+  @After
+  public void doAfter() throws Exception {
+    cluster.deleteAllCollections();
+
+    solrClient.close();
+  }
+
+  private Map<String, Object> getSchemaField(String name, SchemaResponse schemaResponse) {
+    List<Map<String, Object>> fields = schemaResponse.getSchemaRepresentation().getFields();
+    for (Map<String, Object> field : fields) {
+      if (name.equals(field.get("name"))) {
+        return field;
+      }
+    }
+    return null;
+  }
+
+  @Test
+  public void testBackCompat() throws Exception {
+    CollectionAdminRequest.OverseerStatus status = new CollectionAdminRequest.OverseerStatus();
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    CollectionAdminResponse adminResponse = status.process(solrClient);
+    NamedList<Object> response = adminResponse.getResponse();
+    String leader = (String) response.get("leader");
+    JettySolrRunner overseerNode = null;
+    int index = -1;
+    List<JettySolrRunner> jettySolrRunners = cluster.getJettySolrRunners();
+    for (int i = 0; i < jettySolrRunners.size(); i++) {
+      JettySolrRunner runner = jettySolrRunners.get(i);
+      if (runner.getNodeName().equals(leader)) {
+        overseerNode = runner;
+        index = i;
+        break;
+      }
+    }
+    assertNotNull(overseerNode);
+    LogWatcherConfig watcherCfg = new LogWatcherConfig(true, null, "WARN", 100);
+    LogWatcher watcher = LogWatcher.newRegisteredLogWatcher(watcherCfg, null);
+
+    watcher.reset();
+
+    // restart Overseer to trigger the back-compat check
+    cluster.stopJettySolrRunner(index);
+    TimeOut timeOut = new TimeOut(30, TimeUnit.SECONDS, cloudManager.getTimeSource());
+    while (!timeOut.hasTimedOut()) {
+      adminResponse = status.process(solrClient);
+      response = adminResponse.getResponse();
+      String newLeader = (String) response.get("leader");
+      if (newLeader != null && !leader.equals(newLeader)) {
+        break;
+      }
+      timeOut.sleep(200);
+    }
+    if (timeOut.hasTimedOut()) {
+      fail("time out waiting for new Overseer leader");
+    }
+
+    Thread.sleep(5000);
+    SolrDocumentList history = watcher.getHistory(-1, null);
+    assertFalse(history.isEmpty());
+    boolean foundWarning = false;
+    boolean foundSchemaWarning = false;
+  }
+
+}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 11efb3e..ad1c6b7 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -907,10 +907,10 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
     @Override
     public SolrParams getParams() {
       ModifiableSolrParams params = (ModifiableSolrParams)super.getParams();
-      params.setNonNull("segments", withSegments.toString());
-      params.setNonNull("fieldInfo", withFieldInfo.toString());
-      params.setNonNull("coreInfo", withCoreInfo.toString());
-      params.setNonNull("sizeInfo", withSizeInfo.toString());
+      params.setNonNull("segments", withSegments);
+      params.setNonNull("fieldInfo", withFieldInfo);
+      params.setNonNull("coreInfo", withCoreInfo);
+      params.setNonNull("sizeInfo", withSizeInfo);
       return params;
     }
   }