You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by tf...@apache.org on 2017/05/10 22:09:58 UTC

[4/6] lucene-solr:jira/solr-10233: Backup/Restore with replica types

Backup/Restore with replica types


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/62712294
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/62712294
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/62712294

Branch: refs/heads/jira/solr-10233
Commit: 6271229493a0f5adac675ee0f0d2c4b2c148cef5
Parents: d9021e9
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Wed May 10 12:57:40 2017 -0700
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Wed May 10 12:57:40 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/solr/cloud/RestoreCmd.java  | 52 ++++++++++++++++++--
 .../AbstractCloudBackupRestoreTestCase.java     | 23 +++++----
 .../apache/solr/common/cloud/DocCollection.java | 24 +++++++++
 3 files changed, 85 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/62712294/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
index 4e7fb58..367ee86 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
@@ -33,6 +33,7 @@ import org.apache.solr.cloud.overseer.OverseerAction;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -56,6 +57,7 @@ import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
@@ -178,6 +180,12 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
       propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
       propMap.put(COLLECTION_PROP, restoreCollectionName);
       propMap.put(SHARD_ID_PROP, slice.getName());
+      
+      if (restoreCollection.getNumRealtimeReplicas() != null && restoreCollection.getNumRealtimeReplicas() >= 1) {
+        propMap.put(REPLICA_TYPE, Replica.Type.REALTIME.name());
+      } else if (restoreCollection.getNumAppendReplicas() != null && restoreCollection.getNumAppendReplicas() >= 1) {
+        propMap.put(REPLICA_TYPE, Replica.Type.APPEND.name());
+      }
       // add async param
       if (asyncId != null) {
         propMap.put(ASYNC, asyncId);
@@ -216,17 +224,51 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
     //refresh the location copy of collection state
     restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
 
-    //Add the remaining replicas for each shard
-    Integer numReplicas = restoreCollection.getReplicationFactor();
-    if (numReplicas != null && numReplicas > 1) {
+    //Add the remaining replicas for each shard, considering it's type
+    int numRealtimeReplicas = restoreCollection.getNumRealtimeReplicas() != null?
+        restoreCollection.getNumRealtimeReplicas():0;
+    if (numRealtimeReplicas == 0) {
+      numRealtimeReplicas = restoreCollection.getReplicationFactor() != null?
+          restoreCollection.getReplicationFactor():0;
+    }
+    int numAppendReplicas = restoreCollection.getNumAppendReplicas() != null?
+        restoreCollection.getNumAppendReplicas():0;
+    int numPassiveReplicas = restoreCollection.getNumPassiveReplicas() != null?
+        restoreCollection.getNumPassiveReplicas():0;
+    
+    int createdRealtimeReplicas = 0, createdAppendReplicas = 0, createdPassiveReplicas = 0;
+    
+    // We already created either a REALTIME or an APPEND replica as leader
+    if (numRealtimeReplicas > 0) {
+      createdRealtimeReplicas++;
+    } else if (createdAppendReplicas > 0) {
+      createdAppendReplicas++;
+    }
+    
+    int totalReplicasPerShard = numRealtimeReplicas + numAppendReplicas + numPassiveReplicas;
+    
+    if (totalReplicasPerShard > 1) {
       log.info("Adding replicas to restored collection={}", restoreCollection);
 
       for (Slice slice : restoreCollection.getSlices()) {
-        for (int i = 1; i < numReplicas; i++) {
-          log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
+        for (int i = 1; i < totalReplicasPerShard; i++) {
+          Replica.Type typeToCreate;
+          if (createdRealtimeReplicas < numRealtimeReplicas) {
+            createdRealtimeReplicas++;
+            typeToCreate = Replica.Type.REALTIME;
+          } else if (createdAppendReplicas < numAppendReplicas) {
+            createdAppendReplicas++;
+            typeToCreate = Replica.Type.APPEND;
+          } else {
+            createdPassiveReplicas++;
+            typeToCreate = Replica.Type.PASSIVE;
+          }
+          
+          log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
           HashMap<String, Object> propMap = new HashMap<>();
           propMap.put(COLLECTION_PROP, restoreCollectionName);
           propMap.put(SHARD_ID_PROP, slice.getName());
+          propMap.put(REPLICA_TYPE, typeToCreate.name());
           // add async param
           if (asyncId != null) {
             propMap.put(ASYNC, asyncId);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/62712294/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
index f39cfed..e9cebc9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
@@ -29,6 +29,7 @@ import org.apache.lucene.util.TestUtil;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
 import org.apache.solr.client.solrj.response.RequestStatusState;
@@ -45,8 +46,6 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
-
 /**
  * This class implements the logic required to test Solr cloud backup/restore capability.
  */
@@ -81,11 +80,15 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
   @Test
   public void test() throws Exception {
     boolean isImplicit = random().nextBoolean();
+    boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
     int replFactor = TestUtil.nextInt(random(), 1, 2);
+    // Split Shard not supported with replica types
+    int numAppendReplicas = TestUtil.nextInt(random(), 0, 1);
+    int numPassiveReplicas = TestUtil.nextInt(random(), 0, 1);
     CollectionAdminRequest.Create create =
-        CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor);
-    if (NUM_SHARDS * replFactor > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
-      create.setMaxShardsPerNode(NUM_SHARDS);//just to assert it survives the restoration
+        CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numAppendReplicas, numPassiveReplicas);
+    if (NUM_SHARDS * (replFactor + numAppendReplicas + numPassiveReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
+      create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numAppendReplicas + numPassiveReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
     }
     if (random().nextBoolean()) {
       create.setAutoAddReplicas(true);//just to assert it survives the restoration
@@ -109,7 +112,7 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
 
     indexDocs(getCollectionName());
 
-    if (!isImplicit && random().nextBoolean()) {
+    if (doSplitShardOperation) {
       // shard split the first shard
       int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
       CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
@@ -277,9 +280,11 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
     Map<String,Integer> shardToDocCount = new TreeMap<>();
     for (Slice slice : docCollection.getActiveSlices()) {
       String shardName = slice.getName();
-      long docsInShard = client.query(docCollection.getName(), new SolrQuery("*:*").setParam(_ROUTE_, shardName))
-          .getResults().getNumFound();
-      shardToDocCount.put(shardName, (int) docsInShard);
+      try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
+        long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
+            .getResults().getNumFound();
+        shardToDocCount.put(shardName, (int) docsInShard);
+      }
     }
     return shardToDocCount;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/62712294/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
index 3b409b7..3b6ac80 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
@@ -35,6 +35,9 @@ import org.noggit.JSONWriter;
 import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.REALTIME_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.APPEND_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PASSIVE_REPLICAS;
 
 /**
  * Models a Collection in zookeeper (but that Java name is obviously taken, hence "DocCollection")
@@ -58,6 +61,9 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
   private final String znode;
 
   private final Integer replicationFactor;
+  private final Integer numRealtimeReplicas;
+  private final Integer numAppendReplicas;
+  private final Integer numPassiveReplicas;
   private final Integer maxShardsPerNode;
   private final Boolean autoAddReplicas;
 
@@ -81,6 +87,9 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
     this.nodeNameLeaderReplicas = new HashMap<>();
     this.nodeNameReplicas = new HashMap<>();
     this.replicationFactor = (Integer) verifyProp(props, REPLICATION_FACTOR);
+    this.numRealtimeReplicas = (Integer) verifyProp(props, REALTIME_REPLICAS);
+    this.numAppendReplicas = (Integer) verifyProp(props, APPEND_REPLICAS);
+    this.numPassiveReplicas = (Integer) verifyProp(props, PASSIVE_REPLICAS);
     this.maxShardsPerNode = (Integer) verifyProp(props, MAX_SHARDS_PER_NODE);
     Boolean autoAddReplicas = (Boolean) verifyProp(props, AUTO_ADD_REPLICAS);
     this.autoAddReplicas = autoAddReplicas == null ? Boolean.FALSE : autoAddReplicas;
@@ -127,6 +136,9 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
     switch (propName) {
       case MAX_SHARDS_PER_NODE:
       case REPLICATION_FACTOR:
+      case REALTIME_REPLICAS:
+      case PASSIVE_REPLICAS:
+      case APPEND_REPLICAS:
         return Integer.parseInt(o.toString());
       case AUTO_ADD_REPLICAS:
         return Boolean.parseBoolean(o.toString());
@@ -330,4 +342,16 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
     return super.equals(that) && Objects.equals(this.znode, other.znode) && this.znodeVersion == other.znodeVersion;
   }
 
+  public Integer getNumRealtimeReplicas() {
+    return numRealtimeReplicas;
+  }
+
+  public Integer getNumAppendReplicas() {
+    return numAppendReplicas;
+  }
+
+  public Integer getNumPassiveReplicas() {
+    return numPassiveReplicas;
+  }
+
 }