You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2017/05/01 07:29:19 UTC

lucene-solr:feature/autoscaling: Policy framework changed for the new format cluster-preferences, cluster-policy and other common policies

Repository: lucene-solr
Updated Branches:
  refs/heads/feature/autoscaling ba63ac69e -> ea106682c


Policy framework changed for the new format cluster-preferences, cluster-policy and other common policies


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ea106682
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ea106682
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ea106682

Branch: refs/heads/feature/autoscaling
Commit: ea106682c253cde580b6a30133cbb867ebe361e3
Parents: ba63ac6
Author: Noble Paul <no...@apache.org>
Authored: Mon May 1 16:58:41 2017 +0930
Committer: Noble Paul <no...@apache.org>
Committed: Mon May 1 16:58:41 2017 +0930

----------------------------------------------------------------------
 .../cloud/OverseerCollectionMessageHandler.java |   6 +-
 .../solr/cloud/autoscaling/TestPolicyCloud.java |   5 +-
 .../client/solrj/impl/ClientDataProvider.java   | 195 ------------------
 .../solrj/impl/SolrClientDataProvider.java      | 204 +++++++++++++++++++
 .../cloud/autoscaling/AddReplicaSuggester.java  |   2 +-
 .../apache/solr/cloud/autoscaling/Clause.java   |  41 +++-
 .../cloud/autoscaling/ClusterDataProvider.java  |  10 +-
 .../cloud/autoscaling/MoveReplicaSuggester.java |   4 +-
 .../apache/solr/cloud/autoscaling/Policy.java   | 189 +++++++++--------
 .../solr/cloud/autoscaling/PolicyHelper.java    |  12 +-
 .../solr/cloud/autoscaling/TestPolicy.java      | 185 ++++++++---------
 11 files changed, 455 insertions(+), 398 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
index 65b23cf..ebc13c4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
@@ -37,7 +37,7 @@ import com.google.common.collect.ImmutableMap;
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.ClientDataProvider;
+import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
@@ -727,10 +727,10 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler
       try(CloudSolrClient csc = new CloudSolrClient.Builder()
           .withClusterStateProvider(new ZkClientClusterStateProvider(zkStateReader))
           .build()) {
-        ClientDataProvider clientDataProvider = new ClientDataProvider(csc);
+        SolrClientDataProvider clientDataProvider = new SolrClientDataProvider(csc);
         Map<String, List<String>> locations = PolicyHelper.getReplicaLocations(collName,
             zkStateReader.getZkClient().getJson(SOLR_AUTOSCALING_CONF_PATH, true),
-            policyName, clientDataProvider, shardNames, repFactor);
+            clientDataProvider, shardNames, repFactor);
         Map<Position, String> result = new HashMap<>();
         for (Map.Entry<String, List<String>> e : locations.entrySet()) {
           List<String> value = e.getValue();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
index ffea699..657d48d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
@@ -23,7 +23,7 @@ import java.util.Map;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.ClientDataProvider;
+import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.cloud.DocCollection;
@@ -59,11 +59,10 @@ public class TestPolicyCloud extends SolrCloudTestCase {
     CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "shard1", 2)
         .process(cluster.getSolrClient());
     DocCollection rulesCollection = getCollectionState("policiesTest");
-    ClientDataProvider provider = new ClientDataProvider(cluster.getSolrClient());
+    SolrClientDataProvider provider = new SolrClientDataProvider(cluster.getSolrClient());
 
     Map<String, Object> val = provider.getNodeValues(rulesCollection.getReplicas().get(0).getNodeName(), Arrays.asList("freedisk", "cores"));
     assertTrue(((Number) val.get("cores")).intValue() > 0);
     assertTrue("freedisk value is "+((Number) val.get("freedisk")).longValue() , ((Number) val.get("freedisk")).longValue() > 0);
-    System.out.println(Utils.toJSONString(val));
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClientDataProvider.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClientDataProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClientDataProvider.java
deleted file mode 100644
index bae32d7..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClientDataProvider.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.client.solrj.impl;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.request.GenericSolrRequest;
-import org.apache.solr.client.solrj.response.SimpleSolrResponse;
-import org.apache.solr.cloud.autoscaling.ClusterDataProvider;
-import org.apache.solr.cloud.autoscaling.Policy.ReplicaInfo;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.rule.ImplicitSnitch;
-import org.apache.solr.common.cloud.rule.RemoteCallback;
-import org.apache.solr.common.cloud.rule.SnitchContext;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ClientDataProvider implements ClusterDataProvider {
-
-  private final CloudSolrClient solrClient;
-  private Set<String> liveNodes;
-  private Map<String,Object> snitchSession = new HashMap<>();
-  private final Map<String, Map<String, Map<String, List<ReplicaInfo>>>> data = new HashMap<>();
-
-  public ClientDataProvider(CloudSolrClient solrClient) {
-    this.solrClient = solrClient;
-    ZkStateReader zkStateReader = solrClient.getZkStateReader();
-    ClusterState clusterState = zkStateReader.getClusterState();
-    this.liveNodes = clusterState.getLiveNodes();
-    Map<String, ClusterState.CollectionRef> all = clusterState.getCollectionStates();
-    all.forEach((collName, ref) -> {
-      DocCollection coll = ref.get();
-      if (coll == null) return;
-      coll.forEachReplica((shard, replica) -> {
-        Map<String, Map<String, List<ReplicaInfo>>> nodeData = data.get(replica.getNodeName());
-        if (nodeData == null) data.put(replica.getNodeName(), nodeData = new HashMap<>());
-        Map<String, List<ReplicaInfo>> collData = nodeData.get(collName);
-        if (collData == null) nodeData.put(collName, collData = new HashMap<>());
-        List<ReplicaInfo> replicas = collData.get(shard);
-        if (replicas == null) collData.put(shard, replicas = new ArrayList<>());
-        replicas.add(new ReplicaInfo(replica.getName(), new HashMap<>()));
-      });
-    });
-  }
-
-  @Override
-  public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
-    AutoScalingSnitch  snitch = new AutoScalingSnitch();
-    ClientSnitchCtx ctx = new ClientSnitchCtx(null, node, snitchSession, solrClient);
-    snitch.getRemoteInfo(node, new HashSet<>(keys), ctx);
-    return ctx.getTags();
-  }
-
-  @Override
-  public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
-    return data.get(node);//todo fill other details
-  }
-
-  @Override
-  public Collection<String> getNodes() {
-    return liveNodes;
-  }
-
-
-  static class ClientSnitchCtx
-      extends SnitchContext {
-    private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-    ZkClientClusterStateProvider zkClientClusterStateProvider;
-    CloudSolrClient solrClient;
-
-    public ClientSnitchCtx(SnitchInfo perSnitch,
-                           String node, Map<String, Object> session,
-                           CloudSolrClient solrClient) {
-      super(perSnitch, node, session);
-      this.solrClient = solrClient;
-      this.zkClientClusterStateProvider = (ZkClientClusterStateProvider) solrClient.getClusterStateProvider();
-    }
-
-
-    public Map getZkJson(String path) {
-      try {
-        byte[] data = zkClientClusterStateProvider.getZkStateReader().getZkClient().getData(path, null, new Stat(), true);
-        if (data == null) return null;
-        return (Map) Utils.fromJSON(data);
-      } catch (Exception e) {
-        log.warn("Unable to read from ZK path : " + path, e);
-        return null;
-      }
-    }
-
-    public void invokeRemote(String node, ModifiableSolrParams params, String klas, RemoteCallback callback) {
-
-    }
-
-    public SimpleSolrResponse invoke(String solrNode, String path, SolrParams params)
-        throws IOException, SolrServerException {
-      String url = zkClientClusterStateProvider.getZkStateReader().getBaseUrlForNodeName(solrNode);
-
-      GenericSolrRequest request = new GenericSolrRequest(SolrRequest.METHOD.GET, path, params);
-      try (HttpSolrClient client = new HttpSolrClient.Builder()
-          .withHttpClient(solrClient.getHttpClient())
-          .withBaseSolrUrl(url)
-          .withResponseParser(new BinaryResponseParser())
-          .build()) {
-        NamedList<Object> rsp = client.request(request);
-        request.response.nl = rsp;
-        return request.response;
-      }
-    }
-
-  }
-
-  //uses metrics API to get node information
-  static class AutoScalingSnitch extends ImplicitSnitch {
-
-
-    @Override
-    protected void getRemoteInfo(String solrNode, Set<String> requestedTags, SnitchContext ctx) {
-      ClientSnitchCtx snitchContext = (ClientSnitchCtx) ctx;
-      List<String> groups = new ArrayList<>();
-      List<String> prefixes = new ArrayList<>();
-      if (requestedTags.contains(DISK)) {
-        groups.add("solr.node");
-        prefixes.add("CONTAINER.fs.usableSpace");
-      }
-      if (requestedTags.contains(CORES)) {
-        groups.add("solr.core");
-        prefixes.add("CORE.coreName");
-      }
-      if(groups.isEmpty() || prefixes.isEmpty()) return;
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.add("group", StrUtils.join(groups, ','));
-      params.add("prefix", StrUtils.join(prefixes,','));
-
-      try {
-        SimpleSolrResponse rsp = snitchContext.invoke(solrNode, CommonParams.METRICS_PATH, params);
-        Map m = rsp.nl.asMap(4);
-        if(requestedTags.contains(DISK)){
-          Number n = (Number) Utils.getObjectByPath(m,true, "metrics/solr.node/CONTAINER.fs.usableSpace/value");
-          if(n != null) ctx.getTags().put(DISK, n.longValue());
-        }
-        if(requestedTags.contains(CORES)){
-          int count = 0;
-          Map cores  = (Map) m.get("metrics");
-          for (Object o : cores.keySet()) {
-            if(o.toString().startsWith("solr.core.")) count++;
-          }
-          ctx.getTags().put(CORES, count);
-        }
-
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", e);
-      }
-
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientDataProvider.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientDataProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientDataProvider.java
new file mode 100644
index 0000000..acd5970
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientDataProvider.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.impl;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.request.GenericSolrRequest;
+import org.apache.solr.client.solrj.response.SimpleSolrResponse;
+import org.apache.solr.cloud.autoscaling.ClusterDataProvider;
+import org.apache.solr.cloud.autoscaling.Policy.ReplicaInfo;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.cloud.rule.ImplicitSnitch;
+import org.apache.solr.common.cloud.rule.RemoteCallback;
+import org.apache.solr.common.cloud.rule.SnitchContext;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**Class that implements {@link ClusterStateProvider} accepting a SolrClient
+ *
+ */
+public class SolrClientDataProvider implements ClusterDataProvider {
+
+  private final CloudSolrClient solrClient;
+  private Set<String> liveNodes;
+  private Map<String,Object> snitchSession = new HashMap<>();
+  private final Map<String, Map<String, Map<String, List<ReplicaInfo>>>> data = new HashMap<>();
+
+  public SolrClientDataProvider(CloudSolrClient solrClient) {
+    this.solrClient = solrClient;
+    ZkStateReader zkStateReader = solrClient.getZkStateReader();
+    ClusterState clusterState = zkStateReader.getClusterState();
+    this.liveNodes = clusterState.getLiveNodes();
+    Map<String, ClusterState.CollectionRef> all = clusterState.getCollectionStates();
+    all.forEach((collName, ref) -> {
+      DocCollection coll = ref.get();
+      if (coll == null) return;
+      coll.forEachReplica((shard, replica) -> {
+        Map<String, Map<String, List<ReplicaInfo>>> nodeData = data.get(replica.getNodeName());
+        if (nodeData == null) data.put(replica.getNodeName(), nodeData = new HashMap<>());
+        Map<String, List<ReplicaInfo>> collData = nodeData.get(collName);
+        if (collData == null) nodeData.put(collName, collData = new HashMap<>());
+        List<ReplicaInfo> replicas = collData.get(shard);
+        if (replicas == null) collData.put(shard, replicas = new ArrayList<>());
+        replicas.add(new ReplicaInfo(replica.getName(), new HashMap<>()));
+      });
+    });
+  }
+
+  @Override
+  public String getPolicy(String coll) {
+    ClusterState.CollectionRef state = solrClient.getClusterStateProvider().getState(coll);
+    return state == null || state.get() == null ? null : (String) state.get().getProperties().get("policy");
+  }
+
+  @Override
+  public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
+    AutoScalingSnitch  snitch = new AutoScalingSnitch();
+    ClientSnitchCtx ctx = new ClientSnitchCtx(null, node, snitchSession, solrClient);
+    snitch.getRemoteInfo(node, new HashSet<>(tags), ctx);
+    return ctx.getTags();
+  }
+
+  @Override
+  public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
+    return data.get(node);//todo fill other details
+  }
+
+  @Override
+  public Collection<String> getNodes() {
+    return liveNodes;
+  }
+
+
+  static class ClientSnitchCtx
+      extends SnitchContext {
+    private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+    ZkClientClusterStateProvider zkClientClusterStateProvider;
+    CloudSolrClient solrClient;
+
+    public ClientSnitchCtx(SnitchInfo perSnitch,
+                           String node, Map<String, Object> session,
+                           CloudSolrClient solrClient) {
+      super(perSnitch, node, session);
+      this.solrClient = solrClient;
+      this.zkClientClusterStateProvider = (ZkClientClusterStateProvider) solrClient.getClusterStateProvider();
+    }
+
+
+    public Map getZkJson(String path) {
+      try {
+        byte[] data = zkClientClusterStateProvider.getZkStateReader().getZkClient().getData(path, null, new Stat(), true);
+        if (data == null) return null;
+        return (Map) Utils.fromJSON(data);
+      } catch (Exception e) {
+        log.warn("Unable to read from ZK path : " + path, e);
+        return null;
+      }
+    }
+
+    public void invokeRemote(String node, ModifiableSolrParams params, String klas, RemoteCallback callback) {
+
+    }
+
+    public SimpleSolrResponse invoke(String solrNode, String path, SolrParams params)
+        throws IOException, SolrServerException {
+      String url = zkClientClusterStateProvider.getZkStateReader().getBaseUrlForNodeName(solrNode);
+
+      GenericSolrRequest request = new GenericSolrRequest(SolrRequest.METHOD.GET, path, params);
+      try (HttpSolrClient client = new HttpSolrClient.Builder()
+          .withHttpClient(solrClient.getHttpClient())
+          .withBaseSolrUrl(url)
+          .withResponseParser(new BinaryResponseParser())
+          .build()) {
+        NamedList<Object> rsp = client.request(request);
+        request.response.nl = rsp;
+        return request.response;
+      }
+    }
+
+  }
+
+  //uses metrics API to get node information
+  static class AutoScalingSnitch extends ImplicitSnitch {
+
+
+    @Override
+    protected void getRemoteInfo(String solrNode, Set<String> requestedTags, SnitchContext ctx) {
+      ClientSnitchCtx snitchContext = (ClientSnitchCtx) ctx;
+      List<String> groups = new ArrayList<>();
+      List<String> prefixes = new ArrayList<>();
+      if (requestedTags.contains(DISK)) {
+        groups.add("solr.node");
+        prefixes.add("CONTAINER.fs.usableSpace");
+      }
+      if (requestedTags.contains(CORES)) {
+        groups.add("solr.core");
+        prefixes.add("CORE.coreName");
+      }
+      if(groups.isEmpty() || prefixes.isEmpty()) return;
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.add("group", StrUtils.join(groups, ','));
+      params.add("prefix", StrUtils.join(prefixes,','));
+
+      try {
+        SimpleSolrResponse rsp = snitchContext.invoke(solrNode, CommonParams.METRICS_PATH, params);
+        Map m = rsp.nl.asMap(4);
+        if(requestedTags.contains(DISK)){
+          Number n = (Number) Utils.getObjectByPath(m,true, "metrics/solr.node/CONTAINER.fs.usableSpace/value");
+          if(n != null) ctx.getTags().put(DISK, n.longValue());
+        }
+        if(requestedTags.contains(CORES)){
+          int count = 0;
+          Map cores  = (Map) m.get("metrics");
+          for (Object o : cores.keySet()) {
+            if(o.toString().startsWith("solr.core.")) count++;
+          }
+          ctx.getTags().put(CORES, count);
+        }
+
+      } catch (Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", e);
+      }
+
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/AddReplicaSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/AddReplicaSuggester.java b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/AddReplicaSuggester.java
index a45280d..88434e1 100644
--- a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/AddReplicaSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/AddReplicaSuggester.java
@@ -43,7 +43,7 @@ class AddReplicaSuggester extends Suggester {
       String shard = hints.get(Hint.SHARD);
       row = row.addReplica(coll, shard);
       row.violations.clear();
-      for (Clause clause : session.getPolicy().clauses) {
+      for (Clause clause : session.expandedClauses) {
         if (strict || clause.strict) clause.test(row);
       }
       if (row.violations.isEmpty()) {// there are no rule violations

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Clause.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Clause.java b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Clause.java
index 3a4c6ff..4947fd4 100644
--- a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Clause.java
+++ b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Clause.java
@@ -23,13 +23,16 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Predicate;
 
 import org.apache.solr.common.MapWriter;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.cloud.autoscaling.Policy.ReplicaInfo;
 
+import static java.util.Collections.reverseOrder;
 import static java.util.Collections.singletonMap;
 import static org.apache.solr.common.params.CoreAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CoreAdminParams.REPLICA;
@@ -48,18 +51,39 @@ import static org.apache.solr.cloud.autoscaling.Policy.EACH;
 // a set of conditions in a policy
 public class Clause implements MapWriter, Comparable<Clause> {
   Map<String, Object> original;
-  Condition collection, shard, replica, tag;
+  Condition collection, shard, replica, tag, globalTag;
+
   boolean strict = true;
 
   Clause(Map<String, Object> m) {
     this.original = m;
-    collection = parse(COLLECTION, m);
-    shard = parse(SHARD, m);
-    this.replica = parse(REPLICA, m);
     strict = Boolean.parseBoolean(String.valueOf(m.getOrDefault("strict", "true")));
-    m.forEach((s, o) -> parseCondition(s, o));
+    Optional<String> globalTagName = m.keySet().stream().filter(Policy.GLOBAL_ONLY_TAGS::contains).findFirst();
+    if (globalTagName.isPresent()) {
+      globalTag = parse(globalTagName.get(), m);
+      if (m.size() > 2) {
+        throw new RuntimeException("Only one extra tag supported for the tag " + globalTagName.get() + " in " + Utils.toJSONString(m));
+      }
+      tag = parse(m.keySet().stream().filter(s -> (!globalTagName.get().equals(s) && !IGNORE_TAGS.contains(s))).findFirst().get(), m);
+    } else {
+      collection = parse(COLLECTION, m);
+      shard = parse(SHARD, m);
+      this.replica = parse(REPLICA, m);
+      m.forEach((s, o) -> parseCondition(s, o));
+    }
     if (tag == null)
       throw new RuntimeException("Invalid op, must have one and only one tag other than collection, shard,replica " + Utils.toJSONString(m));
+
+  }
+
+  public boolean doesOverride(Clause that) {
+    return (collection.equals(that.collection) &&
+        tag.name.equals(that.tag.name));
+
+  }
+
+  public boolean isPerCollectiontag() {
+    return globalTag == null;
   }
 
   void parseCondition(String s, Object o) {
@@ -150,9 +174,9 @@ public class Clause implements MapWriter, Comparable<Clause> {
         count += shards.getValue().size();
         if (shard.val.equals(EACH)) testReplicaCount(row, result, count);
         if (EACH.equals(shard.val)) count = 0;
-        }
-      if (shard.val.equals(ANY)) testReplicaCount(row, result, count);
       }
+      if (shard.val.equals(ANY)) testReplicaCount(row, result, count);
+    }
     if (result.get() == FAIL) row.violations.add(this);
     return result.get();
 
@@ -161,7 +185,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
   private void testReplicaCount(Row row, AtomicReference<TestStatus> result, int count) {
     if("node".equals(tag.name)) if(!tag.isPass(row.node)) return;
     boolean checkCount = replica.op.match(replica.val, 0) != PASS || count > 0;
-    if (replica.op == WILDCARD && count > 0 && !tag.isPass(row)) {
+    if (replica.op == WILDCARD && count > 0 && !tag.isPass(row)) {//nodeRole:'!overseer', strict:false
       result.set(FAIL);
     } else if (checkCount && !replica.isPass(count)) {
       if (tag.op != WILDCARD && tag.isPass(row)) {
@@ -175,6 +199,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
   public boolean isStrict() {
     return strict;
   }
+
   @Override
   public String toString() {
     return Utils.toJSONString(original);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/ClusterDataProvider.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/ClusterDataProvider.java b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/ClusterDataProvider.java
index 8e4d89e..1348557 100644
--- a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/ClusterDataProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/ClusterDataProvider.java
@@ -28,7 +28,13 @@ import org.apache.solr.client.solrj.SolrServerException;
 
 
 public interface ClusterDataProvider extends Closeable {
-  Map<String, Object> getNodeValues(String node, Collection<String> keys);
+  /**Get the value of each tag for a given node
+   *
+   * @param node node name
+   * @param tags tag names
+   * @return a map of tag vs value
+   */
+  Map<String, Object> getNodeValues(String node, Collection<String> tags);
 
   /**
    * Get the details of each replica in a node. It attempts to fetch as much details about
@@ -40,6 +46,8 @@ public interface ClusterDataProvider extends Closeable {
 
   Collection<String> getNodes();
 
+  String getPolicy(String coll);
+
   @Override
   default void close() throws IOException {
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/MoveReplicaSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/MoveReplicaSuggester.java b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/MoveReplicaSuggester.java
index 3a99169..82b1aba 100644
--- a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/MoveReplicaSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/MoveReplicaSuggester.java
@@ -51,7 +51,7 @@ public class MoveReplicaSuggester extends Suggester {
         continue;
       }
 
-      for (Clause clause : session.getPolicy().clauses) {
+      for (Clause clause : session.expandedClauses) {
         if (strict || clause.strict) clause.test(fromRow);
       }
       if (fromRow.violations.isEmpty()) {
@@ -59,7 +59,7 @@ public class MoveReplicaSuggester extends Suggester {
           Row targetRow = getMatrix().get(i);
           targetRow = targetRow.addReplica(coll, shard);
           targetRow.violations.clear();
-          for (Clause clause : session.getPolicy().clauses) {
+          for (Clause clause : session.expandedClauses) {
             if (strict || clause.strict) clause.test(targetRow);
           }
           if (targetRow.violations.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Policy.java
index e4d9d9c..13cf70b 100644
--- a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/Policy.java
@@ -35,35 +35,55 @@ import java.util.stream.Collectors;
 import org.apache.solr.common.IteratorWriter;
 import org.apache.solr.common.MapWriter;
 import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.Utils;
 
-import static java.util.Collections.singletonList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.emptyMap;
 import static java.util.stream.Collectors.toList;
-import static org.apache.solr.common.util.Utils.getDeepCopy;
 
 public class Policy implements MapWriter {
   public static final String EACH = "#EACH";
   public static final String ANY = "#ANY";
-  List<Clause> clauses = new ArrayList<>();
-  List<Preference> preferences = new ArrayList<>();
+  public static final String CLUSTER_POLICY = "cluster-policy";
+  public static final String CLUSTER_PREFERENCE = "cluster-preferences";
+  public static final Set<String> GLOBAL_ONLY_TAGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("cores")));
+  Map<String, List<Clause>> policies = new HashMap<>();
+  List<Clause> clusterPolicy;
+  List<Preference> clusterPreferences;
   List<String> params = new ArrayList<>();
 
 
   public Policy(Map<String, Object> jsonMap) {
-    List<Map<String, Object>> l = getListOfMap("conditions", jsonMap);
-    clauses = l.stream()
-        .map(Clause::new)
-        .sorted()
+
+    clusterPreferences = ((List<Map<String, Object>>) jsonMap.getOrDefault(CLUSTER_PREFERENCE, emptyList())).stream()
+        .map(Preference::new)
         .collect(toList());
-    l = getListOfMap("preferences", jsonMap);
-    preferences = l.stream().map(Preference::new).collect(toList());
-    for (int i = 0; i < preferences.size() - 1; i++) {
-      Preference preference = preferences.get(i);
-      preference.next = preferences.get(i + 1);
+    for (int i = 0; i < clusterPreferences.size() - 1; i++) {
+      Preference preference = clusterPreferences.get(i);
+      preference.next = clusterPreferences.get(i + 1);
     }
+    if (clusterPreferences.isEmpty()) {
+      clusterPreferences.add(new Preference((Map<String, Object>) Utils.fromJSONString("{minimize : cores, precision:1}")));
+    }
+    clusterPolicy = ((List<Map<String, Object>>) jsonMap.getOrDefault(CLUSTER_POLICY, emptyList())).stream()
+        .map(Clause::new)
+        .collect(Collectors.toList());
+
+    ((Map<String, List<Map<String, Object>>>) jsonMap.getOrDefault("policies", emptyMap())).forEach((s, l1) ->
+        this.policies.put(s, l1.stream()
+            .map(Clause::new)
+            .sorted()
+            .collect(toList())));
+
+    this.policies.forEach((s, c) -> {
+      for (Clause clause : c) {
+        if (!clause.isPerCollectiontag())
+          throw new RuntimeException(clause.globalTag.name + " is only allowed in 'cluster-policy'");
+      }
+    });
 
-    for (Clause c : clauses) params.add(c.tag.name);
-    for (Preference preference : preferences) {
+    for (Preference preference : clusterPreferences) {
       if (params.contains(preference.name.name())) {
         throw new RuntimeException(preference.name + " is repeated");
       }
@@ -72,16 +92,22 @@ public class Policy implements MapWriter {
     }
   }
 
+  public List<Clause> getClusterPolicy() {
+    return clusterPolicy;
+  }
+
   @Override
   public void writeMap(EntryWriter ew) throws IOException {
-    if (!clauses.isEmpty()) {
-      ew.put("conditions", (IteratorWriter) iw -> {
-        for (Clause clause : clauses) iw.add(clause);
+    if (!policies.isEmpty()) {
+      ew.put("policies", (MapWriter) ew1 -> {
+        for (Map.Entry<String, List<Clause>> e : policies.entrySet()) {
+          ew1.put(e.getKey(), e.getValue());
+        }
       });
     }
-    if (!preferences.isEmpty()) {
+    if (!clusterPreferences.isEmpty()) {
       ew.put("preferences", (IteratorWriter) iw -> {
-        for (Preference p : preferences) iw.add(p);
+        for (Preference p : clusterPreferences) iw.add(p);
       });
     }
 
@@ -92,23 +118,52 @@ public class Policy implements MapWriter {
     final ClusterDataProvider dataProvider;
     final List<Row> matrix;
     Set<String> collections = new HashSet<>();
+    List<Clause> expandedClauses;
+    private List<String> paramsOfInterest;
 
-    Session(List<String> nodes, ClusterDataProvider dataProvider, List<Row> matrix) {
+    private Session(List<String> nodes, ClusterDataProvider dataProvider,
+                    List<Row> matrix, List<Clause> expandedClauses,
+                    List<String> paramsOfInterest) {
       this.nodes = nodes;
       this.dataProvider = dataProvider;
       this.matrix = matrix;
+      this.expandedClauses = expandedClauses;
+      this.paramsOfInterest = paramsOfInterest;
     }
 
     Session(ClusterDataProvider dataProvider) {
       this.nodes = new ArrayList<>(dataProvider.getNodes());
       this.dataProvider = dataProvider;
+      for (String node : nodes) {
+        collections.addAll(dataProvider.getReplicaInfo(node, Collections.emptyList()).keySet());
+      }
+
+      expandedClauses = clusterPolicy.stream()
+          .filter(clause -> !clause.isPerCollectiontag())
+          .collect(Collectors.toList());
+
+      for (String c : collections) {
+        String p = dataProvider.getPolicy(c);
+        if (p != null) {
+          List<Clause> perCollPolicy = policies.get(p);
+          if (perCollPolicy == null)
+            throw new RuntimeException(StrUtils.formatString("Policy for collection {0} is {1} . It does not exist", c, p));
+        }
+        expandedClauses.addAll(mergePolicies(c, policies.getOrDefault(p, emptyList()), clusterPolicy));
+      }
+
+      Collections.sort(expandedClauses);
+      List<String> p = new ArrayList<>(params);
+      p.addAll(expandedClauses.stream().map(clause -> clause.tag.name).distinct().collect(Collectors.toList()));
+      paramsOfInterest = new ArrayList<>(p);
       matrix = new ArrayList<>(nodes.size());
-      for (String node : nodes) matrix.add(new Row(node, params, dataProvider));
+      for (String node : nodes) matrix.add(new Row(node, paramsOfInterest, dataProvider));
       for (Row row : matrix) row.replicaInfo.forEach((s, e) -> collections.add(s));
+      applyRules();
     }
 
     Session copy() {
-      return new Session(nodes, dataProvider, getMatrixCopy());
+      return new Session(nodes, dataProvider, getMatrixCopy(), expandedClauses, paramsOfInterest);
     }
 
     List<Row> getMatrixCopy() {
@@ -125,26 +180,23 @@ public class Policy implements MapWriter {
     /**
      * Apply the preferences and conditions
      */
-    public void applyRules() {
-      if (!preferences.isEmpty()) {
+    private void applyRules() {
+      if (!clusterPreferences.isEmpty()) {
         //this is to set the approximate value according to the precision
         ArrayList<Row> tmpMatrix = new ArrayList<>(matrix);
-        for (Preference p : preferences) {
+        for (Preference p : clusterPreferences) {
           Collections.sort(tmpMatrix, (r1, r2) -> p.compare(r1, r2, false));
           p.setApproxVal(tmpMatrix);
         }
         //approximate values are set now. Let's do recursive sorting
-        Collections.sort(matrix, (r1, r2) -> preferences.get(0).compare(r1, r2, true));
+        Collections.sort(matrix, (r1, r2) -> clusterPreferences.get(0).compare(r1, r2, true));
       }
 
-      if (!clauses.isEmpty()) {
-        for (Clause clause : clauses) {
+      for (Clause clause : expandedClauses) {
           for (Row row : matrix) {
             clause.test(row);
           }
         }
-      }
-
     }
 
     public Map<String, List<Clause>> getViolations() {
@@ -183,26 +235,13 @@ public class Policy implements MapWriter {
     return new Session(snitch);
   }
 
-
-  static List<Map<String, Object>> getListOfMap(String key, Map<String, Object> jsonMap) {
-    Object o = jsonMap.get(key);
-    if (o != null) {
-      if (!(o instanceof List)) o = singletonList(o);
-      return (List) o;
-    } else {
-      return Collections.emptyList();
-    }
-  }
-
-
   enum SortParam {
-    replica, freedisk, cores, heap, cpu;
+    freedisk, cores, heap, cpu;
 
     static SortParam get(String m) {
       for (SortParam p : values()) if (p.name().equals(m)) return p;
-      throw new RuntimeException("Invalid sort " + m + " Sort must be on one of these " + Arrays.asList(values()));
+      throw new RuntimeException(StrUtils.formatString("Invalid sort {0} Sort must be on one of these {1}", m, Arrays.asList(values())));
     }
-
   }
 
   enum Sort {
@@ -293,44 +332,32 @@ public class Policy implements MapWriter {
 
   }
 
-  public static Map<String, Object> mergePolicies(String coll,
-                                                  Map<String, Object> collPolicy,
-                                                  Map<String, Object> defaultPolicy) {
-    Collection<Map<String, Object>> conditions = getDeepCopy(getListOfMap("conditions", collPolicy), 4, true);
-    insertColl(coll, conditions);
-    List<Clause> parsedConditions = conditions.stream().map(Clause::new).collect(toList());
-    Collection<Map<String, Object>> preferences = getDeepCopy(getListOfMap("preferences", collPolicy), 4, true);
-    List<Preference> parsedPreferences = preferences.stream().map(Preference::new).collect(toList());
-    if (defaultPolicy != null) {
-      Collection<Map<String, Object>> defaultConditions = getDeepCopy(getListOfMap("conditions", defaultPolicy), 4, true);
-      insertColl(coll, defaultConditions);
-      defaultConditions.forEach(e -> {
-        Clause clause = new Clause(e);
-        for (Clause c : parsedConditions) {
-          if (c.collection.equals(clause.collection) &&
-              c.tag.name.equals(clause.tag.name)) return;
-        }
-        conditions.add(e);
-      });
-      Collection<Map<String, Object>> defaultPreferences = getDeepCopy(getListOfMap("preferences", defaultPolicy), 4, true);
-      defaultPreferences.forEach(e -> {
-        Preference preference = new Preference(e);
-        for (Preference p : parsedPreferences) {
-          if (p.name == preference.name) return;
-        }
-        preferences.add(e);
-
-      });
-    }
-    return Utils.makeMap("conditions", conditions, "preferences", preferences);
+  static List<Clause> mergePolicies(String coll,
+                                    List<Clause> collPolicy,
+                                    List<Clause> globalPolicy) {
 
+    List<Clause> merged = insertColl(coll, collPolicy);
+    List<Clause> global = insertColl(coll, globalPolicy);
+    merged.addAll(global.stream()
+        .filter(clusterPolicyClause -> merged.stream().noneMatch(perCollPolicy -> perCollPolicy.doesOverride(clusterPolicyClause)))
+        .collect(Collectors.toList()));
+    return merged;
   }
 
-  private static Collection<Map<String, Object>> insertColl(String coll, Collection<Map<String, Object>> conditions) {
-    conditions.forEach(e -> {
-      if (!e.containsKey("collection")) e.put("collection", coll);
-    });
-    return conditions;
+  /**
+   * Insert the collection name into the clauses where collection is not specified
+   */
+  static List<Clause> insertColl(String coll, Collection<Clause> conditions) {
+    return conditions.stream()
+        .filter(Clause::isPerCollectiontag)
+        .map(clause -> {
+          Map<String, Object> copy = new LinkedHashMap<>(clause.original);
+          if (!copy.containsKey("collection")) copy.put("collection", coll);
+          return new Clause(copy);
+        })
+        .filter(it -> (it.collection.isPass(coll)))
+        .collect(Collectors.toList());
+
   }
 
   private static final Map<CollectionAction, Supplier<Suggester>> ops = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/PolicyHelper.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/PolicyHelper.java b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/PolicyHelper.java
index 750efad..7ce0769 100644
--- a/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/PolicyHelper.java
+++ b/solr/solrj/src/java/org/apache/solr/cloud/autoscaling/PolicyHelper.java
@@ -33,19 +33,15 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.AD
 
 public class PolicyHelper {
   public static Map<String, List<String>> getReplicaLocations(String collName, Map<String, Object> autoScalingJson,
-                                                              String policyName, ClusterDataProvider cdp,
+                                                              ClusterDataProvider cdp,
                                                               List<String> shardNames,
                                                               int repFactor) {
     Map<String, List<String>> positionMapping = new HashMap<>();
     for (String shardName : shardNames) positionMapping.put(shardName, new ArrayList<>(repFactor));
-    Map policyJson = (Map) Utils.getObjectByPath(autoScalingJson, false, asList("policies", policyName));
-    if (policyJson == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such policy exists " + policyName);
-    }
-    Map defaultPolicy = (Map) Utils.getObjectByPath(autoScalingJson, false, asList("policies", "default"));
 
-    Map<String, Object> merged = Policy.mergePolicies(collName, policyJson, defaultPolicy);
-    Policy policy = new Policy(merged);
+
+//    Map<String, Object> merged = Policy.mergePolicies(collName, policyJson, defaultPolicy);
+    Policy policy = new Policy(autoScalingJson);
     Policy.Session session = policy.createSession(cdp);
     for (String shardName : shardNames) {
       for (int i = 0; i < repFactor; i++) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea106682/solr/solrj/src/test/org/apache/solr/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/cloud/autoscaling/TestPolicy.java
index 4aaf1be..a6cb2bc 100644
--- a/solr/solrj/src/test/org/apache/solr/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/cloud/autoscaling/TestPolicy.java
@@ -28,12 +28,11 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.cloud.autoscaling.Policy.Suggester.Hint;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.common.util.ValidatingJsonMap;
-import org.apache.solr.cloud.autoscaling.Policy.Suggester.Hint;
 
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.util.Utils.getObjectByPath;
 
 public class TestPolicy extends SolrTestCaseJ4 {
 
@@ -66,67 +65,100 @@ public class TestPolicy extends SolrTestCaseJ4 {
   public void testMerge() {
 
     Map map = (Map) Utils.fromJSONString("{" +
+        "  'cluster-preferences': [" +
+        "    { 'maximize': 'freedisk', 'precision': 50}," +
+        "    { 'minimize': 'cores', 'precision': 50}" +
+        "  ]," +
+        "  'cluster-policy': [" +
+        "    { 'replica': '#ANY', 'nodeRole': '!overseer'}," +
+        "    { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY'}" +
+        "  ]," +
         "  'policies': {" +
-        "    'default': {" +
-        "      'conditions': [" +
-        "        {  replica:'#ANY' , 'nodeRole': '!overseer'}," +
-        "        { 'replica': '<2', 'shard': '#EACH', node:'#ANY'}" +
-        "      ]," +
-        "      'preferences':[" +
-        "        {'maximize': 'freedisk', 'precision':50}," +
-        "      {'minimize': 'replica', 'precision':50}]" +
-        "    }," +
-        "    'policy1': {" +
-        "      'conditions': [" +
-        "        { 'replica': '1', 'sysprop.fs': 'ssd', 'shard': '#EACH'}," +
-        "        { 'replica': '<2', 'shard': '#ANY' , node: '#ANY'}," +
-        "        { 'replica': '<2', 'shard':'#EACH', 'rack': 'rack1' }" +
-        "      ], preferences: [{maximize:freedisk, precision:75}]} } }");
-    map = (Map) map.get("policies");
-    map = Policy.mergePolicies("mycoll", (Map<String, Object>) map.get("policy1"), (Map<String, Object>) map.get("default"));
-    assertEquals(((List) map.get("conditions")).size(), 4);
-    assertEquals(((List) map.get("preferences")).size(), 2);
-    assertEquals(String.valueOf(getObjectByPath(map, true, "conditions[0]/replica")), "1");
-    assertEquals(String.valueOf(getObjectByPath(map, true, "conditions[1]/replica")), "<2");
-    assertEquals(String.valueOf(getObjectByPath(map, true, "conditions[1]/shard")), "#ANY");
-    assertEquals(String.valueOf(getObjectByPath(map, true, "conditions[2]/rack")), "rack1");
-    assertEquals(String.valueOf(getObjectByPath(map, true, "conditions[3]/nodeRole")), "!overseer");
-
-    assertEquals(String.valueOf(getObjectByPath(map, true, "preferences[0]/maximize")), "freedisk");
-    assertEquals(String.valueOf(getObjectByPath(map, true, "preferences[0]/precision")), "75");
-    assertEquals(String.valueOf(getObjectByPath(map, true, "preferences[1]/precision")), "50");
+        "    'policy1': [" +
+        "      { 'replica': '1', 'sysprop.fs': 'ssd', 'shard': '#EACH'}," +
+        "      { 'replica': '<2', 'shard': '#ANY', 'node': '#ANY'}," +
+        "      { 'replica': '<2', 'shard': '#EACH', 'rack': 'rack1'}" +
+        "    ]" +
+        "  }" +
+        "}");
+    Policy policy = new Policy(map);
+    List<Clause> clauses = Policy.mergePolicies("mycoll", policy.policies.get("policy1"), policy.clusterPolicy);
+    Collections.sort(clauses);
+    assertEquals(clauses.size(), 4);
+    assertEquals("1", String.valueOf(clauses.get(0).original.get("replica")));
+    assertEquals("<2", String.valueOf(clauses.get(1).original.get("replica")));
+    assertEquals("#ANY", clauses.get(3).original.get("shard"));
+    assertEquals("rack1",clauses.get(1).original.get("rack"));
+    assertEquals("!overseer", clauses.get(2).original.get("nodeRole"));
   }
 
 
   public void testConditionsSort(){
     String rules = "{" +
-        "conditions:[" +
-        "{nodeRole:'!overseer', strict:false}," +
-        "{replica:'<1',node:node3, shard: '#EACH'}," +
-        "{replica:'<2',node:'#ANY', shard:'#EACH'}," +
-        "{replica:1, rack:rack1}]," +
-        " preferences:[" +
-        "{minimize:cores , precision:2}," +
-        "{maximize:freedisk, precision:50}, " +
-        "{minimize:heap, precision:1000}]}";
-
-    Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(rules));
-    assertEquals("rack", policy.clauses.get(0).tag.name);
-
-
+        "    'cluster-policy':[" +
+        "      { 'nodeRole':'!overseer', 'strict':false}," +
+        "      { 'replica':'<1', 'node':'node3', 'shard':'#EACH'}," +
+        "      { 'replica':'<2', 'node':'#ANY', 'shard':'#EACH'}," +
+        "      { 'replica':1, 'rack':'rack1'}]" +
+        "  }";
+    Policy p = new Policy((Map<String, Object>) Utils.fromJSONString(rules));
+    List<Clause> clauses = new ArrayList<>(p.getClusterPolicy());
+    Collections.sort(clauses);
+    assertEquals("rack", clauses.get(0).tag.name);
   }
+  public static String clusterState = "{'gettingstarted':{" +
+      "    'router':{'name':'compositeId'}," +
+      "    'shards':{" +
+      "      'shard1':{" +
+      "        'range':'80000000-ffffffff'," +
+      "        'replicas':{" +
+      "          'r1':{" +
+      "            'core':r1," +
+      "            'base_url':'http://10.0.0.4:8983/solr'," +
+      "            'node_name':'node1'," +
+      "            'state':'active'," +
+      "            'leader':'true'}," +
+      "          'r2':{" +
+      "            'core':r2," +
+      "            'base_url':'http://10.0.0.4:7574/solr'," +
+      "            'node_name':'node2'," +
+      "            'state':'active'}}}," +
+      "      'shard2':{" +
+      "        'range':'0-7fffffff'," +
+      "        'replicas':{" +
+      "          'r3':{" +
+      "            'core':r3," +
+      "            'base_url':'http://10.0.0.4:8983/solr'," +
+      "            'node_name':'node1'," +
+      "            'state':'active'," +
+      "            'leader':'true'}," +
+      "          'r4':{" +
+      "            'core':r4," +
+      "            'base_url':'http://10.0.0.4:8987/solr'," +
+      "            'node_name':'node4'," +
+      "            'state':'active'}," +
+      "          'r6':{" +
+      "            'core':r6," +
+      "            'base_url':'http://10.0.0.4:8989/solr'," +
+      "            'node_name':'node3'," +
+      "            'state':'active'}," +
+      "          'r5':{" +
+      "            'core':r5," +
+      "            'base_url':'http://10.0.0.4:7574/solr'," +
+      "            'node_name':'node1'," +
+      "            'state':'active'}}}}}}";
+
   public void testRules() throws IOException {
     String rules = "{" +
-        "conditions:[" +
+        "cluster-policy:[" +
         "{nodeRole:'!overseer', strict:false}," +
         "{replica:'<1',node:node3}," +
         "{replica:'<2',node:'#ANY', shard:'#EACH'}]," +
-        " preferences:[" +
+        " cluster-preferences:[" +
         "{minimize:cores , precision:2}," +
         "{maximize:freedisk, precision:50}, " +
         "{minimize:heap, precision:1000}]}";
 
-
     Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
         "node1:{cores:12, freedisk: 334, heap:10480}," +
         "node2:{cores:4, freedisk: 749, heap:6873}," +
@@ -138,7 +170,6 @@ public class TestPolicy extends SolrTestCaseJ4 {
     Policy.Session session;
     session = policy.createSession(getClusterDataProvider(nodeValues, clusterState));
 
-    session.applyRules();
     List<Row> l = session.getSorted();
     assertEquals("node1", l.get(0).node);
     assertEquals("node3", l.get(1).node);
@@ -146,7 +177,6 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertEquals("node2", l.get(3).node);
 
 
-    System.out.printf(Utils.toJSONString(Utils.getDeepCopy(session.toMap(new LinkedHashMap<>()), 8)));
     Map<String, List<Clause>> violations = session.getViolations();
     System.out.println(Utils.getDeepCopy(violations, 6));
     assertEquals(3, violations.size());
@@ -172,7 +202,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
   }
 
-  public void testOtherTag(){
+ /* public void testOtherTag(){
     String rules = "{" +
         "conditions:[" +
         "{nodeRole:'!overseer', strict:false}," +
@@ -199,15 +229,15 @@ public class TestPolicy extends SolrTestCaseJ4 {
         .hint(Hint.COLL, "newColl")
         .hint(Hint.SHARD, "s1").getOperation();
     assertNotNull(op);
-  }
+  }*/
 
 
   private ClusterDataProvider getClusterDataProvider(final Map<String, Map> nodeValues, String  clusterState) {
     return new ClusterDataProvider() {
         @Override
-        public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
+        public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
           Map<String, Object> result = new LinkedHashMap<>();
-          keys.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
+          tags.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
           return result;
         }
 
@@ -216,7 +246,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
           return nodeValues.keySet();
         }
 
-        @Override
+      @Override
+      public String getPolicy(String coll) {
+        return null;
+      }
+
+      @Override
         public Map<String, Map<String, List<Policy.ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
           return getReplicaDetails(node, clusterState);
         }
@@ -275,48 +310,6 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
   }*/
 
-  public static String clusterState = "{'gettingstarted':{" +
-      "    'router':{'name':'compositeId'}," +
-      "    'shards':{" +
-      "      'shard1':{" +
-      "        'range':'80000000-ffffffff'," +
-      "        'replicas':{" +
-      "          'r1':{" +
-      "            'core':r1," +
-      "            'base_url':'http://10.0.0.4:8983/solr'," +
-      "            'node_name':'node1'," +
-      "            'state':'active'," +
-      "            'leader':'true'}," +
-      "          'r2':{" +
-      "            'core':r2," +
-      "            'base_url':'http://10.0.0.4:7574/solr'," +
-      "            'node_name':'node2'," +
-      "            'state':'active'}}}," +
-      "      'shard2':{" +
-      "        'range':'0-7fffffff'," +
-      "        'replicas':{" +
-      "          'r3':{" +
-      "            'core':r3," +
-      "            'base_url':'http://10.0.0.4:8983/solr'," +
-      "            'node_name':'node1'," +
-      "            'state':'active'," +
-      "            'leader':'true'}," +
-      "          'r4':{" +
-      "            'core':r4," +
-      "            'base_url':'http://10.0.0.4:8987/solr'," +
-      "            'node_name':'node4'," +
-      "            'state':'active'}," +
-      "          'r6':{" +
-      "            'core':r6," +
-      "            'base_url':'http://10.0.0.4:8989/solr'," +
-      "            'node_name':'node3'," +
-      "            'state':'active'}," +
-      "          'r5':{" +
-      "            'core':r5," +
-      "            'base_url':'http://10.0.0.4:7574/solr'," +
-      "            'node_name':'node1'," +
-      "            'state':'active'}}}}}}";
-
   public static Map<String, Map<String, List<Policy.ReplicaInfo>>> getReplicaDetails(String node, String s) {
     ValidatingJsonMap m = ValidatingJsonMap
         .getDeepCopy((Map) Utils.fromJSONString(s), 6, true);