You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2017/07/13 07:28:00 UTC

[1/2] lucene-solr:feature/autoscaling: SOLR-11005: While using policies, convert maxShardsperNode parameter into a policy SOLR-10994: CREATESHARD ADDREPLICA to use policy framework

Repository: lucene-solr
Updated Branches:
  refs/heads/feature/autoscaling 846f59e27 -> d2849cd45


SOLR-11005: While using policies, convert maxShardsperNode parameter into a policy
SOLR-10994: CREATESHARD ADDREPLICA to use policy framework


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9e4d542b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9e4d542b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9e4d542b

Branch: refs/heads/feature/autoscaling
Commit: 9e4d542b04a9b28eb0b9cbe7731cd7ab336afff0
Parents: 29db981
Author: Noble Paul <no...@apache.org>
Authored: Thu Jul 13 16:57:36 2017 +0930
Committer: Noble Paul <no...@apache.org>
Committed: Thu Jul 13 16:57:36 2017 +0930

----------------------------------------------------------------------
 .../org/apache/solr/cloud/AddReplicaCmd.java    | 23 +++++-
 .../src/java/org/apache/solr/cloud/Assign.java  | 21 +++--
 .../apache/solr/cloud/CreateCollectionCmd.java  | 34 +++++++-
 .../org/apache/solr/cloud/CreateShardCmd.java   | 81 +++++++++++++-------
 .../apache/solr/cloud/DeleteCollectionCmd.java  | 33 ++++++++
 .../cloud/autoscaling/AutoScalingHandler.java   |  5 +-
 .../autoscaling/AutoScalingHandlerTest.java     |  1 -
 .../autoscaling/ComputePlanActionTest.java      |  5 +-
 .../solr/cloud/autoscaling/TestPolicyCloud.java | 50 +++++++++++-
 .../apache/solr/common/cloud/DocCollection.java |  7 +-
 .../solr/common/cloud/ReplicaPosition.java      |  2 +-
 11 files changed, 210 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
index e9bfebf..9469653 100644
--- a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
@@ -26,6 +26,7 @@ import java.util.Locale;
 import java.util.Map;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -45,6 +46,7 @@ import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.cloud.Assign.getNodesForNewReplicas;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
@@ -94,8 +96,25 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
 
     // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
     if (!skipCreateReplicaInClusterState) {
-      node = getNodesForNewReplicas(clusterState, collection, shard, 1, node,
-          ocmh.overseer.getZkController().getCoreContainer()).get(0).nodeName;// TODO: use replica type in this logic too
+      if (CreateShardCmd.usePolicyFramework(coll, ocmh)) {
+        if (node == null) {
+          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
+          node = Assign.identifyNodes(() -> ocmh.overseer.getZkController().getCoreContainer(),
+              ocmh.zkStateReader,
+              clusterState,
+              Collections.emptyList(),
+              collection,
+              message,
+              Collections.singletonList(shard),
+              replicaType == Replica.Type.NRT ? 0 : 1,
+              replicaType == Replica.Type.TLOG ? 0 : 1,
+              replicaType == Replica.Type.PULL ? 0 : 1
+          ).get(0).node;
+        }
+      } else {
+        node = getNodesForNewReplicas(clusterState, collection, shard, 1, node,
+            ocmh.overseer.getZkController().getCoreContainer()).get(0).nodeName;// TODO: use replica type in this logic too
+      }
     }
     log.info("Node Identified {} for creating new replica", node);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/java/org/apache/solr/cloud/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Assign.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java
index f83dbb7..266dab4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Assign.java
@@ -63,7 +63,6 @@ import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT;
 import static org.apache.solr.common.cloud.DocCollection.SNITCH;
 import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
 
 
@@ -264,8 +263,9 @@ public class Assign {
   public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
                                                           String shard, int nrtReplicas,
                                                           Object createNodeSet, CoreContainer cc) throws KeeperException, InterruptedException {
+    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
     DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getInt(MAX_SHARDS_PER_NODE, 1);
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
     List<String> createNodeList = null;
 
     if (createNodeSet instanceof List) {
@@ -323,13 +323,16 @@ public class Assign {
                                                               int pullReplicas,
                                                               String policyName, ZkStateReader zkStateReader,
                                                               List<String> nodesList) throws KeeperException, InterruptedException {
+    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
+    SolrClientDataProvider clientDataProvider = null;
+    List<ReplicaPosition> replicaPositions = null;
     try (CloudSolrClient csc = new CloudSolrClient.Builder()
         .withClusterStateProvider(new ZkClientClusterStateProvider(zkStateReader))
         .build()) {
-      SolrClientDataProvider clientDataProvider = new SolrClientDataProvider(csc);
+      clientDataProvider = new SolrClientDataProvider(csc);
       Map<String, Object> autoScalingJson = Utils.getJson(zkStateReader.getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
       Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
-      return PolicyHelper.getReplicaLocations(
+      replicaPositions = PolicyHelper.getReplicaLocations(
           collName,
           autoScalingJson,
           clientDataProvider,
@@ -339,8 +342,16 @@ public class Assign {
           tlogReplicas,
           pullReplicas,
           nodesList);
+      return replicaPositions;
     } catch (IOException e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error closing CloudSolrClient",e);
+    } finally {
+      if (log.isTraceEnabled()) {
+        if (clientDataProvider != null) log.trace("CLUSTER_DATA_PROVIDER: " + Utils.toJSONString(clientDataProvider));
+        if (replicaPositions != null)
+          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
+        log.trace("AUTOSCALING_JSON: " + Utils.toJSONString(Utils.getJson(zkStateReader.getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true)));
+      }
     }
   }
 
@@ -392,7 +403,7 @@ public class Assign {
       return nodeNameVsShardCount;
     }
     DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getInt(MAX_SHARDS_PER_NODE, 1);
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
     Map<String, DocCollection> collections = clusterState.getCollectionsMap();
     for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
       DocCollection c = entry.getValue();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
index df23796..9bf4802 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
@@ -29,9 +29,11 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.client.solrj.request.V2Request;
 import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
 import org.apache.solr.cloud.autoscaling.AutoScaling;
+import org.apache.solr.cloud.autoscaling.AutoScalingHandler;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.SolrException;
@@ -45,9 +47,11 @@ import org.apache.solr.common.cloud.ZkConfigManager;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.cloud.ZooKeeperException;
+import org.apache.solr.common.params.AutoScalingParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.common.util.ContentStreamBase;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -64,6 +68,7 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static java.util.Collections.singletonMap;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
@@ -72,6 +77,8 @@ import static org.apache.solr.common.cloud.ZkStateReader.*;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.params.CoreAdminParams.REPLICA;
+import static org.apache.solr.common.params.CoreAdminParams.SHARD;
 import static org.apache.solr.common.util.StrUtils.formatString;
 
 public class CreateCollectionCmd implements Cmd {
@@ -107,6 +114,9 @@ public class CreateCollectionCmd implements Cmd {
       int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
       int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
       boolean autoAddReplicas = message.getBool(AUTO_ADD_REPLICAS, false);
+      Map autoScalingJson = Utils.getJson(ocmh.zkStateReader.getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+      String policy = message.getStr(Policy.POLICY);
+      boolean usePolicyFramework = autoScalingJson.get(Policy.CLUSTER_POLICY) != null || policy != null;
 
       ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
       final String async = message.getStr(ASYNC);
@@ -124,7 +134,8 @@ public class CreateCollectionCmd implements Cmd {
         ClusterStateMutator.getShardNames(numSlices, shardNames);
       }
 
-      int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
+      int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, usePolicyFramework? 0: 1);
+      if(maxShardsPerNode == 0) message.getProperties().put(MAX_SHARDS_PER_NODE, "0");
 
       if (numNrtReplicas + numTlogReplicas <= 0) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
@@ -158,7 +169,7 @@ public class CreateCollectionCmd implements Cmd {
 
         int maxShardsAllowedToCreate = maxShardsPerNode * nodeList.size();
         int requestedShardsToCreate = numSlices * totalNumReplicas;
-        if (maxShardsAllowedToCreate < requestedShardsToCreate) {
+        if (!usePolicyFramework &&  maxShardsAllowedToCreate < requestedShardsToCreate) {
           throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
               + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
               + ", and the number of nodes currently live or live and part of your "+CREATE_NODE_SET+" is " + nodeList.size()
@@ -170,6 +181,25 @@ public class CreateCollectionCmd implements Cmd {
               + ". This requires " + requestedShardsToCreate
               + " shards to be created (higher than the allowed number)");
         }
+        if (usePolicyFramework && maxShardsPerNode > 0) {
+          if (policy == null) {
+            //this means we should create a policy for this collection
+            AutoScalingHandler ash = (AutoScalingHandler) ocmh.overseer.getZkController().getCoreContainer().getRequestHandler(AutoScalingHandler.HANDLER_PATH);
+            Map newPolicy = Utils.makeMap(REPLICA, "<" + (maxShardsPerNode + 1), SHARD, Policy.EACH, "node", Policy.ANY);
+            SolrQueryResponse rsp = new SolrQueryResponse();
+            policy = "COLL_POLICY_" + collectionName;
+            ash.handleSetPolicies(null, rsp, new CommandOperation(AutoScalingParams.CMD_SET_POLICY, singletonMap(
+                policy
+                , Collections.singletonList(newPolicy))));
+            if (!"success".equals(rsp.getValues().get("result"))) {
+              throw new SolrException(ErrorCode.SERVER_ERROR, "unable to create new policy");
+            }
+            message.getProperties().put(Policy.POLICY, policy);
+
+          } else {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection");
+          }
+        }
 
         replicaPositions = Assign.identifyNodes(() -> ocmh.overseer.getZkController().getCoreContainer(),
             ocmh.zkStateReader

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
index f96dd0c..0dd7c76 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
@@ -18,33 +18,41 @@ package org.apache.solr.cloud;
 
 
 import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.collect.ImmutableMap;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.Utils;
+import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.cloud.Assign.getNodesForNewReplicas;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 
 public class CreateShardCmd implements Cmd {
@@ -75,37 +83,46 @@ public class CreateShardCmd implements Cmd {
     }
     
     Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
-    List<Assign.ReplicaCount> sortedNodeList = getNodesForNewReplicas(clusterState, collectionName, sliceName, totalReplicas,
-        createNodeSetStr, ocmh.overseer.getZkController().getCoreContainer());
 
     ZkStateReader zkStateReader = ocmh.zkStateReader;
+    boolean usePolicyFramework = usePolicyFramework(collection,ocmh);
+    List<ReplicaPosition> positions = null;
+    if (usePolicyFramework) {
+      if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
+      positions = Assign.identifyNodes(() -> ocmh.overseer.getZkController().getCoreContainer(),
+          zkStateReader,
+          clusterState,
+          Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM),
+          collectionName,
+          message,
+          Collections.singletonList(sliceName),
+          numNrtReplicas,
+          numTlogReplicas,
+          numPullReplicas);
+    } else {
+      List<Assign.ReplicaCount> sortedNodeList = getNodesForNewReplicas(clusterState, collectionName, sliceName, totalReplicas,
+          createNodeSetStr, ocmh.overseer.getZkController().getCoreContainer());
+      int i = 0;
+      positions = new ArrayList<>();
+      for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
+          Replica.Type.TLOG, numTlogReplicas,
+          Replica.Type.PULL, numPullReplicas
+      ).entrySet()) {
+        for (int j = 0; j < e.getValue(); j++) {
+          positions.add(new ReplicaPosition(sliceName, j+1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
+          i++;
+        }
+      }
+    }
     Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
     // wait for a while until we see the shard
     ocmh.waitForNewShard(collectionName, sliceName);
 
     String async = message.getStr(ASYNC);
-    
-    int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
     CountDownLatch countDownLatch = new CountDownLatch(totalReplicas);
-    for (int j = 1; j <= totalReplicas; j++) {
-      int coreNameNumber;
-      Replica.Type typeToCreate;
-      if (createdNrtReplicas < numNrtReplicas) {
-        createdNrtReplicas++;
-        coreNameNumber = createdNrtReplicas;
-        typeToCreate = Replica.Type.NRT;
-      } else if (createdTlogReplicas < numTlogReplicas) {
-        createdTlogReplicas++;
-        coreNameNumber = createdTlogReplicas;
-        typeToCreate = Replica.Type.TLOG;
-      } else {
-        createdPullReplicas++;
-        coreNameNumber = createdPullReplicas;
-        typeToCreate = Replica.Type.PULL;
-      }
-      String nodeName = sortedNodeList.get(((j - 1)) % sortedNodeList.size()).nodeName;
-      String coreName = Assign.buildCoreName(collectionName, sliceName, typeToCreate, coreNameNumber);
-//      String coreName = collectionName + "_" + sliceName + "_replica" + j;
+    for (ReplicaPosition position : positions) {
+      String nodeName = position.node;
+      String coreName = Assign.buildCoreName(collectionName, sliceName, position.type, position.index);
       log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
           + " on " + nodeName);
 
@@ -113,15 +130,15 @@ public class CreateShardCmd implements Cmd {
       ZkNodeProps addReplicasProps = new ZkNodeProps(
           COLLECTION_PROP, collectionName,
           SHARD_ID_PROP, sliceName,
-          CoreAdminParams.REPLICA_TYPE, typeToCreate.name(),
+          ZkStateReader.REPLICA_TYPE, position.type.name(),
           CoreAdminParams.NODE, nodeName,
           CoreAdminParams.NAME, coreName);
       Map<String, Object> propertyParams = new HashMap<>();
-      ocmh.addPropertyParams(message, propertyParams);;
+      ocmh.addPropertyParams(message, propertyParams);
       addReplicasProps = addReplicasProps.plus(propertyParams);
-      if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
+      if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
       final NamedList addResult = new NamedList();
-      ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, ()-> {
+      ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
         countDownLatch.countDown();
         Object addResultFailure = addResult.get("failure");
         if (addResultFailure != null) {
@@ -149,4 +166,10 @@ public class CreateShardCmd implements Cmd {
     log.info("Finished create command on all shards for collection: " + collectionName);
 
   }
+
+  static boolean usePolicyFramework(DocCollection collection, OverseerCollectionMessageHandler ocmh)
+      throws KeeperException, InterruptedException {
+    Map autoScalingJson = Utils.getJson(ocmh.zkStateReader.getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+    return autoScalingJson.get(Policy.CLUSTER_POLICY) != null || collection.getPolicyName() != null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
index b891c92..f0ee458 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
@@ -25,17 +25,23 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.solr.cloud.autoscaling.AutoScalingHandler;
 import org.apache.solr.common.NonExistentCoreException;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.AutoScalingParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -57,6 +63,8 @@ public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd
   public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
     ZkStateReader zkStateReader = ocmh.zkStateReader;
     final String collection = message.getStr(NAME);
+    DocCollection coll = state.getCollectionOrNull(collection);
+    String policy = coll == null ? null : coll.getPolicyName();
     try {
       // Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
       // should be taken care of as part of collection delete operation.
@@ -106,6 +114,31 @@ public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
             "Could not fully remove collection: " + collection);
       }
+      String collectionSpecificPolicy = "COLL_POLICY_" + collection;
+      if (collectionSpecificPolicy.equals(policy)) {
+        for (Map.Entry<String, ClusterState.CollectionRef> e : state.getCollectionStates().entrySet()) {
+          if (collection.equals(e.getKey())) continue;
+          DocCollection c = e.getValue().get();
+          if (collectionSpecificPolicy.equals(c.getPolicyName())) {
+            log.info(StrUtils.formatString("{0} is being used by collection {1} . So, it's not deleted", collectionSpecificPolicy, e.getKey()));
+            return;
+          }
+
+        }
+        AutoScalingHandler ash = (AutoScalingHandler) ocmh.overseer.getZkController().getCoreContainer()
+            .getRequestHandler(AutoScalingHandler.HANDLER_PATH);
+        SolrQueryResponse rsp = new SolrQueryResponse();
+        try {
+          ash.handleRemovePolicy(null, rsp, new CommandOperation(AutoScalingParams.CMD_REMOVE_POLICY, collectionSpecificPolicy));
+        } catch (SolrException e) {
+          if (e.getMessage().contains("No policy exists with name")) {
+            log.warn("The policy: " + collectionSpecificPolicy + " does not exist to be removed");
+          } else {
+            throw e;
+          }
+        }
+      }
+
 
     } finally {
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
index e730088..ebec3f6 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
@@ -222,7 +222,8 @@ public class AutoScalingHandler extends RequestHandlerBase implements Permission
     rsp.getValues().add("result", "success");
   }
 
-  private void handleRemovePolicy(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op) throws KeeperException, InterruptedException, IOException {
+  public void handleRemovePolicy(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op)
+      throws KeeperException, InterruptedException, IOException {
     String policyName = (String) op.getCommandData();
 
     if (policyName.trim().length() == 0) {
@@ -238,7 +239,7 @@ public class AutoScalingHandler extends RequestHandlerBase implements Permission
     rsp.getValues().add("result", "success");
   }
 
-  private void handleSetPolicies(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op) throws KeeperException, InterruptedException, IOException {
+  public void handleSetPolicies(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation op) throws KeeperException, InterruptedException, IOException {
     Map<String, Object> policies = op.getDataMap();
     for (Map.Entry<String, Object> policy : policies.entrySet()) {
       String policyName = policy.getKey();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
index fb000b9..6494923 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
@@ -677,7 +677,6 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
     // lets create a collection which violates the rule replicas < 2
     CollectionAdminResponse adminResponse = CollectionAdminRequest.Create
         .createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6)
-        .setMaxShardsPerNode(10)
         .process(solrClient);
     assertTrue(adminResponse.isSuccess());
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
index 27ddc95..3816fde 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
@@ -157,7 +157,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeLost",
         "conf",1, 2);
-    create.setMaxShardsPerNode(1);
+//    create.setMaxShardsPerNode(1);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",
@@ -231,7 +231,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeWithMultipleReplicasLost",
         "conf",2, 3);
-    create.setMaxShardsPerNode(2);
+//    create.setMaxShardsPerNode(2);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",
@@ -309,7 +309,6 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeAdded",
         "conf",1, 2);
-    create.setMaxShardsPerNode(2);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
index 1f5e967..bce297e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
@@ -22,7 +22,6 @@ import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.util.Objects;
 import java.util.function.BiConsumer;
 
 import org.apache.lucene.util.Constants;
@@ -30,6 +29,7 @@ import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.cloud.OverseerTaskProcessor;
@@ -46,6 +46,9 @@ import org.junit.rules.ExpectedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.cloud.autoscaling.AutoScalingHandlerTest.createAutoScalingRequest;
+import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
+
 @LuceneTestCase.Slow
 public class TestPolicyCloud extends SolrCloudTestCase {
 
@@ -71,7 +74,7 @@ public class TestPolicyCloud extends SolrCloudTestCase {
     JettySolrRunner jetty = cluster.getRandomJetty(random());
     int port = jetty.getLocalPort();
 
-    String commands =  "{set-policy :{c1 : [{replica:2 , shard:'#EACH', port: '" + port + "'}]}}";
+    String commands =  "{set-policy :{c1 : [{replica:0 , shard:'#EACH', port: '!" + port + "'}]}}";
     cluster.getSolrClient().request(AutoScalingHandlerTest.createAutoScalingRequest(SolrRequest.METHOD.POST, commands));
 
     String collectionName = "testCreateCollectionAddReplica";
@@ -106,7 +109,6 @@ public class TestPolicyCloud extends SolrCloudTestCase {
     String collectionName = "testCreateCollectionSplitShard";
     CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
         .setPolicy("c1")
-        .setMaxShardsPerNode(10)
         .process(cluster.getSolrClient());
 
     DocCollection docCollection = getCollectionState(collectionName);
@@ -133,6 +135,47 @@ public class TestPolicyCloud extends SolrCloudTestCase {
     assertEquals("Expected exactly three replica of collection on node with port: " + secondNodePort, 3, replicasOnNode2);
   }
 
+  public void testCreateCollectionWithPolicyAndMaxShardsPerNode() throws Exception {
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    Map original = Utils.getJson(solrClient.getZkStateReader().getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+    String setClusterPolicyCommand = "{" +
+        " 'set-cluster-policy': [" +
+        "      {'cores':'<10', 'node':'#ANY'}," +
+        "      {'replica':'<2', 'shard': '#EACH', 'node': '#ANY'}," +
+        "      {'nodeRole':'overseer', 'replica':0}" +
+        "    ]" +
+        "}";
+    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setClusterPolicyCommand);
+    NamedList<Object> response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+    Map data = Utils.getJson(solrClient.getZkStateReader().getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+    List clusterPolicy = (List) data.get("cluster-policy");
+    assertNotNull(clusterPolicy);
+    assertEquals(3, clusterPolicy.size());
+
+    CollectionAdminRequest.createCollection("myColl", "conf", 1, 2)
+        .process(cluster.getSolrClient());
+    data = Utils.getJson(solrClient.getZkStateReader().getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+    assertEquals("Did create unexpected new policy  " + Utils.toJSONString(data),
+       null,  Utils.getObjectByPath(data, false, "policies/COLL_POLICY_myColl"));
+    CollectionAdminRequest.createCollection("myColl2", "conf", 1, 2)
+        .setMaxShardsPerNode(4)
+        .process(cluster.getSolrClient());
+    data = Utils.getJson(solrClient.getZkStateReader().getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+
+    assertEquals("Did not create expected new policy  " + Utils.toJSONString(data),
+        "<5",  Utils.getObjectByPath(data, false, "policies/COLL_POLICY_myColl2[0]/replica"));
+
+    CollectionAdminRequest.deleteCollection("myColl2").process(cluster.getSolrClient());
+
+    data = Utils.getJson(solrClient.getZkStateReader().getZkClient(), SOLR_AUTOSCALING_CONF_PATH, true);
+    assertEquals("Did not delete new policy  " + Utils.toJSONString(data),
+        null,  Utils.getObjectByPath(data, false, "policies/COLL_POLICY_myColl2"));
+
+    solrClient.getZkStateReader().getZkClient().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(original), true);
+
+  }
+
   public void testCreateCollectionAddShardWithReplicaTypeUsingPolicy() throws Exception {
     JettySolrRunner jetty = cluster.getJettySolrRunners().get(0);
     String nrtNodeName = jetty.getNodeName();
@@ -164,7 +207,6 @@ public class TestPolicyCloud extends SolrCloudTestCase {
         Utils.getObjectByPath(json, true, "cluster-policy[2]/port"));
 
     CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "s1", 1, 1, 1)
-        .setMaxShardsPerNode(5)
         .process(cluster.getSolrClient());
 
     DocCollection coll = getCollectionState("policiesTest");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
index b1d6ee2..4c12d9c 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java
@@ -36,10 +36,10 @@ import org.noggit.JSONWriter;
 
 import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 
 /**
  * Models a Collection in zookeeper (but that Java name is obviously taken, hence "DocCollection")
@@ -244,7 +244,8 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
     if (maxShardsPerNode == null) {
       throw new SolrException(ErrorCode.BAD_REQUEST, MAX_SHARDS_PER_NODE + " is not in the cluster state.");
     }
-    return maxShardsPerNode;
+    //maxShardsPerNode=0 when policy is used. This variable is not important then
+    return maxShardsPerNode == 0 ? Integer.MAX_VALUE : maxShardsPerNode;
   }
 
   public String getZNode(){

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e4d542b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
index d64d1d1..591a001 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
@@ -45,7 +45,7 @@ public class ReplicaPosition implements Comparable<ReplicaPosition> {
 
   @Override
   public String toString() {
-    return shard + ":" + index;
+    return shard + ":" + index + "["+ type +"] @" + node;
   }
 
   public ReplicaPosition setNode(String node) {


[2/2] lucene-solr:feature/autoscaling: Merge remote-tracking branch 'origin/feature/autoscaling' into feature/autoscaling

Posted by no...@apache.org.
Merge remote-tracking branch 'origin/feature/autoscaling' into feature/autoscaling


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d2849cd4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d2849cd4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d2849cd4

Branch: refs/heads/feature/autoscaling
Commit: d2849cd453b9fa44970e27dfbc1771f3d5939d26
Parents: 9e4d542 846f59e
Author: Noble Paul <no...@apache.org>
Authored: Thu Jul 13 16:57:51 2017 +0930
Committer: Noble Paul <no...@apache.org>
Committed: Thu Jul 13 16:57:51 2017 +0930

----------------------------------------------------------------------
 dev-tools/scripts/addVersion.py                 |    2 +-
 solr/CHANGES.txt                                |   30 +-
 solr/bin/solr                                   |   24 +-
 solr/bin/solr.cmd                               |    8 +
 .../java/org/apache/solr/ltr/LTRRescorer.java   |   13 +-
 .../java/org/apache/solr/cloud/CloudUtil.java   |    4 +-
 .../apache/solr/cloud/CreateCollectionCmd.java  |    9 +
 .../org/apache/solr/cloud/MoveReplicaCmd.java   |   35 +-
 .../java/org/apache/solr/cloud/Overseer.java    |    2 +-
 .../org/apache/solr/cloud/ZkController.java     |    5 +-
 .../org/apache/solr/handler/StreamHandler.java  |    2 +-
 .../solr/handler/admin/CollectionsHandler.java  |    2 +
 .../component/QueryElevationComponent.java      |   82 +-
 .../solr/search/stats/ExactStatsCache.java      |    5 +-
 .../apache/solr/search/stats/LRUStatsCache.java |    2 +-
 .../solr/search/stats/LocalStatsCache.java      |   18 +-
 .../src/java/org/apache/solr/util/SolrCLI.java  |  202 ++-
 ...chema-add-schema-fields-update-processor.xml |   12 +-
 .../conf/schema-blockjoinfacetcomponent.xml     |    6 +-
 .../collection1/conf/schema-custom-field.xml    |    4 +-
 .../solr/configsets/_default/conf/elevate.xml   |   42 -
 .../configsets/_default/conf/solrconfig.xml     |    1 -
 .../solr/cloud/CollectionsAPISolrJTest.java     |    3 +-
 .../apache/solr/cloud/MoveReplicaHDFSTest.java  |    1 -
 .../solr/cloud/MoveReplicaHDFSUlogDirTest.java  |  142 ++
 .../cloud/TestPullReplicaErrorHandling.java     |    3 +-
 .../cloud/TestRandomRequestDistribution.java    |   64 +-
 .../org/apache/solr/schema/TestPointFields.java | 1409 ++++++++++++------
 .../solr/schema/TestUseDocValuesAsStored.java   |   24 +-
 .../solr/configsets/_default/conf/elevate.xml   |   42 -
 .../configsets/_default/conf/solrconfig.xml     |    1 -
 solr/solr-ref-guide/src/about-this-guide.adoc   |   59 +-
 solr/solr-ref-guide/src/about-tokenizers.adoc   |    1 -
 ...adding-custom-plugins-in-solrcloud-mode.adoc |    9 -
 solr/solr-ref-guide/src/analyzers.adoc          |    2 -
 ...uthentication-and-authorization-plugins.adoc |   18 +-
 .../src/basic-authentication-plugin.adoc        |   11 +-
 solr/solr-ref-guide/src/blob-store-api.adoc     |    3 -
 solr/solr-ref-guide/src/blockjoin-faceting.adoc |    4 +-
 .../solr-ref-guide/src/charfilterfactories.adoc |    4 -
 .../src/collapse-and-expand-results.adoc        |    4 +-
 solr/solr-ref-guide/src/collections-api.adoc    |  136 +-
 .../src/collections-core-admin.adoc             |    2 +-
 .../src/command-line-utilities.adoc             |   26 +-
 .../src/common-query-parameters.adoc            |    4 +-
 solr/solr-ref-guide/src/config-api.adoc         |   40 +-
 solr/solr-ref-guide/src/configsets-api.adoc     |   79 +-
 .../solr-ref-guide/src/configuring-logging.adoc |    5 -
 .../src/configuring-solrconfig-xml.adoc         |    8 +-
 solr/solr-ref-guide/src/content-streams.adoc    |    7 +-
 solr/solr-ref-guide/src/coreadmin-api.adoc      |   29 +-
 .../src/cross-data-center-replication-cdcr.adoc |  140 +-
 ...adir-and-directoryfactory-in-solrconfig.adoc |    3 -
 solr/solr-ref-guide/src/dataimport-screen.adoc  |    1 -
 solr/solr-ref-guide/src/de-duplication.adoc     |    5 -
 .../src/defining-core-properties.adoc           |    6 +-
 solr/solr-ref-guide/src/defining-fields.adoc    |    5 +-
 .../detecting-languages-during-indexing.adoc    |    4 -
 .../src/distributed-requests.adoc               |    9 +-
 .../distributed-search-with-index-sharding.adoc |    7 +-
 solr/solr-ref-guide/src/documents-screen.adoc   |   16 +-
 solr/solr-ref-guide/src/docvalues.adoc          |    2 -
 solr/solr-ref-guide/src/enabling-ssl.adoc       |   22 +-
 solr/solr-ref-guide/src/errata.adoc             |    2 -
 .../src/exporting-result-sets.adoc              |    6 -
 solr/solr-ref-guide/src/faceting.adoc           |    4 +-
 .../field-type-definitions-and-properties.adoc  |    8 +-
 .../src/field-types-included-with-solr.adoc     |    6 +-
 .../solr-ref-guide/src/filter-descriptions.adoc |   70 +-
 solr/solr-ref-guide/src/function-queries.adoc   |   14 +-
 .../src/getting-started-with-solrcloud.adoc     |    5 -
 solr/solr-ref-guide/src/graph-traversal.adoc    |   32 +-
 .../src/hadoop-authentication-plugin.adoc       |    7 +-
 solr/solr-ref-guide/src/highlighting.adoc       |   13 +-
 .../solr-ref-guide/src/how-solrcloud-works.adoc |    7 +-
 .../src/implicit-requesthandlers.adoc           |   13 +-
 solr/solr-ref-guide/src/index-replication.adoc  |   15 +-
 .../src/indexconfig-in-solrconfig.adoc          |   20 +-
 .../src/indexing-and-basic-data-operations.adoc |    1 -
 .../src/initparams-in-solrconfig.adoc           |    3 +-
 .../src/introduction-to-solr-indexing.adoc      |    2 -
 solr/solr-ref-guide/src/jvm-settings.adoc       |    3 -
 .../src/kerberos-authentication-plugin.adoc     |   19 +-
 solr/solr-ref-guide/src/language-analysis.adoc  |  125 +-
 solr/solr-ref-guide/src/learning-to-rank.adoc   |   74 +-
 .../src/local-parameters-in-queries.adoc        |    3 -
 solr/solr-ref-guide/src/logging.adoc            |    1 -
 .../major-changes-from-solr-5-to-solr-6.adoc    |    4 +-
 .../src/making-and-restoring-backups.adoc       |    8 +-
 solr/solr-ref-guide/src/managed-resources.adoc  |   17 +-
 .../src/mbean-request-handler.adoc              |    3 +-
 solr/solr-ref-guide/src/merging-indexes.adoc    |    6 +-
 solr/solr-ref-guide/src/morelikethis.adoc       |    9 +-
 .../src/near-real-time-searching.adoc           |   10 +-
 solr/solr-ref-guide/src/other-parsers.adoc      |   99 +-
 .../src/other-schema-elements.adoc              |    2 -
 .../src/overview-of-searching-in-solr.adoc      |    2 +-
 .../src/pagination-of-results.adoc              |    4 +-
 .../src/performance-statistics-reference.adoc   |    4 +-
 solr/solr-ref-guide/src/phonetic-matching.adoc  |   20 +-
 solr/solr-ref-guide/src/post-tool.adoc          |   11 +-
 .../src/query-settings-in-solrconfig.adoc       |   16 +-
 .../read-and-write-side-fault-tolerance.adoc    |    6 -
 .../src/request-parameters-api.adoc             |   15 +-
 .../src/requestdispatcher-in-solrconfig.adoc    |    6 +-
 ...lers-and-searchcomponents-in-solrconfig.adoc |   12 +-
 solr/solr-ref-guide/src/response-writers.adoc   |   69 +-
 solr/solr-ref-guide/src/result-clustering.adoc  |   36 +-
 solr/solr-ref-guide/src/result-grouping.adoc    |   15 +-
 .../src/rule-based-authorization-plugin.adoc    |   16 +-
 .../src/rule-based-replica-placement.adoc       |   28 +-
 .../src/running-solr-on-hdfs.adoc               |   20 +-
 solr/solr-ref-guide/src/running-solr.adoc       |    4 +-
 solr/solr-ref-guide/src/schema-api.adoc         |    4 +-
 ...schema-factory-definition-in-solrconfig.adoc |    6 +-
 solr/solr-ref-guide/src/schemaless-mode.adoc    |   17 +-
 solr/solr-ref-guide/src/segments-info.adoc      |    2 +-
 ...tting-up-an-external-zookeeper-ensemble.adoc |   21 +-
 .../shards-and-indexing-data-in-solrcloud.adoc  |    7 +-
 .../src/solr-control-script-reference.adoc      |   13 +-
 solr/solr-ref-guide/src/solr-glossary.adoc      |    4 +-
 .../src/solr-jdbc-apache-zeppelin.adoc          |    3 -
 .../src/solr-jdbc-dbvisualizer.adoc             |   15 +-
 solr/solr-ref-guide/src/spatial-search.adoc     |   23 +-
 solr/solr-ref-guide/src/spell-checking.adoc     |    2 +-
 solr/solr-ref-guide/src/stream-decorators.adoc  |   16 +-
 .../src/streaming-expressions.adoc              |    3 -
 solr/solr-ref-guide/src/suggester.adoc          |  333 +++--
 .../src/taking-solr-to-production.adoc          |   23 +-
 .../src/the-query-elevation-component.adoc      |   11 +-
 .../solr-ref-guide/src/the-stats-component.adoc |   16 +-
 .../src/the-term-vector-component.adoc          |    8 +-
 .../solr-ref-guide/src/the-terms-component.adoc |   13 +-
 .../src/the-well-configured-solr-instance.adoc  |    2 -
 solr/solr-ref-guide/src/tokenizers.adoc         |   14 -
 .../transforming-and-indexing-custom-json.adoc  |   52 +-
 .../src/transforming-result-documents.adoc      |   25 +-
 solr/solr-ref-guide/src/uima-integration.adoc   |    2 -
 ...anding-analyzers-tokenizers-and-filters.adoc |    4 -
 .../src/update-request-processors.adoc          |   25 +-
 .../src/updatehandlers-in-solrconfig.adoc       |    8 +-
 .../src/updating-parts-of-documents.adoc        |   27 +-
 .../src/upgrading-a-solr-cluster.adoc           |    8 -
 solr/solr-ref-guide/src/upgrading-solr.adoc     |    8 +-
 .../src/uploading-data-with-index-handlers.adoc |   37 +-
 ...g-data-with-solr-cell-using-apache-tika.adoc |   48 +-
 solr/solr-ref-guide/src/using-javascript.adoc   |    2 +-
 .../solr-ref-guide/src/using-jmx-with-solr.adoc |   10 +-
 solr/solr-ref-guide/src/using-python.adoc       |    4 +-
 .../src/using-solr-from-ruby.adoc               |    2 +-
 solr/solr-ref-guide/src/using-solrj.adoc        |    8 -
 ...zookeeper-to-manage-configuration-files.adoc |   25 +-
 solr/solr-ref-guide/src/v2-api.adoc             |    5 -
 .../src/velocity-response-writer.adoc           |    3 -
 solr/solr-ref-guide/src/velocity-search-ui.adoc |    4 +-
 ...king-with-currencies-and-exchange-rates.adoc |   23 +-
 solr/solr-ref-guide/src/working-with-dates.adoc |   10 -
 .../src/working-with-enum-fields.adoc           |   10 +-
 ...rking-with-external-files-and-processes.adoc |   27 +-
 .../src/zookeeper-access-control.adoc           |    8 -
 .../solrj/io/eval/ResidualsEvaluator.java       |   82 +
 .../solrj/request/CollectionAdminRequest.java   |   13 +
 .../solrj/response/CollectionAdminResponse.java |    5 +
 .../org/apache/solr/common/cloud/Replica.java   |    7 +-
 .../solrj/io/stream/StreamExpressionTest.java   |   24 +
 solr/webapp/web/js/angular/controllers/cloud.js |    1 +
 166 files changed, 2402 insertions(+), 2436 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2849cd4/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
----------------------------------------------------------------------