You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@brooklyn.apache.org by al...@apache.org on 2015/08/06 18:32:15 UTC

[01/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Repository: incubator-brooklyn
Updated Branches:
  refs/heads/master 08662a7ca -> 56e8c3989


http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/couchbase-replication-w-pillowfight.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/couchbase-replication-w-pillowfight.yaml b/usage/launcher/src/test/resources/couchbase-replication-w-pillowfight.yaml
index 252d58c..6c5a95a 100644
--- a/usage/launcher/src/test/resources/couchbase-replication-w-pillowfight.yaml
+++ b/usage/launcher/src/test/resources/couchbase-replication-w-pillowfight.yaml
@@ -19,7 +19,7 @@
 name: Couchbase w Replicating Cluster and Pillow Fight
 
 services:
-- type: brooklyn.entity.nosql.couchbase.CouchbaseCluster
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster
   id: couchbase
   name: Couchbase Primary Cluster
   adminUsername: Administrator
@@ -36,7 +36,7 @@ services:
   location: aws-ec2:us-west-1
 
 
-- type: brooklyn.entity.nosql.couchbase.CouchbaseCluster
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster
   id: couchbase-backup
   name: Couchbase Backup Cluster
   adminUsername: Administrator
@@ -49,7 +49,7 @@ services:
   location: softlayer:ams01
 
 
-- type: "classpath://brooklyn/entity/nosql/couchbase/pillowfight.yaml"
+- type: "classpath://org/apache/brooklyn/entity/nosql/couchbase/pillowfight.yaml"
   brooklyn.config:
     base_url: $brooklyn:entity("couchbase").attributeWhenReady("couchbase.cluster.connection.url") 
 

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/couchbase-w-loadgen.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/couchbase-w-loadgen.yaml b/usage/launcher/src/test/resources/couchbase-w-loadgen.yaml
index 101511e..b3e1bce 100644
--- a/usage/launcher/src/test/resources/couchbase-w-loadgen.yaml
+++ b/usage/launcher/src/test/resources/couchbase-w-loadgen.yaml
@@ -22,7 +22,7 @@ location: softlayer:wdc01
 
 services:
 
-- type: brooklyn.entity.nosql.couchbase.CouchbaseCluster
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster
   id: cb-cluster
   adminUsername: Administrator
   adminPassword: Password
@@ -35,7 +35,7 @@ services:
   brooklyn.policies:
   - type: brooklyn.policy.autoscaling.AutoScalerPolicy
     brooklyn.config:
-      metric: $brooklyn:sensor("brooklyn.entity.nosql.couchbase.CouchbaseCluster",
+      metric: $brooklyn:sensor("org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster",
         "couchbase.stats.cluster.per.node.ops")
       metricLowerBound: 500
       metricUpperBound: 1000

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/couchbase-w-pillowfight.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/couchbase-w-pillowfight.yaml b/usage/launcher/src/test/resources/couchbase-w-pillowfight.yaml
index 2ee46ee..8ac5e03 100644
--- a/usage/launcher/src/test/resources/couchbase-w-pillowfight.yaml
+++ b/usage/launcher/src/test/resources/couchbase-w-pillowfight.yaml
@@ -21,7 +21,7 @@ name: Couchbase w Pillow Fight
 location: softlayer:wdc01
 
 services:
-- type: brooklyn.entity.nosql.couchbase.CouchbaseCluster
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster
   id: couchbase
   adminUsername: Administrator
   adminPassword: Password
@@ -30,6 +30,6 @@ services:
   - bucket: default
     bucket-port: 11211
 
-- type: "classpath://brooklyn/entity/nosql/couchbase/pillowfight.yaml"
+- type: "classpath://org/apache/brooklyn/entity/nosql/couchbase/pillowfight.yaml"
   brooklyn.config:
     base_url: $brooklyn:entity("couchbase").attributeWhenReady("couchbase.cluster.connection.url") 

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/mongo-blueprint.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/mongo-blueprint.yaml b/usage/launcher/src/test/resources/mongo-blueprint.yaml
index c63b8db..32fe22a 100644
--- a/usage/launcher/src/test/resources/mongo-blueprint.yaml
+++ b/usage/launcher/src/test/resources/mongo-blueprint.yaml
@@ -19,5 +19,5 @@
 name: Mongo3
 location: localhost
 services:
-- type: brooklyn.entity.nosql.mongodb.MongoDBReplicaSet
+- type: org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet
   name: MongoDB Replica Set

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/mongo-client-single-server.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/mongo-client-single-server.yaml b/usage/launcher/src/test/resources/mongo-client-single-server.yaml
index 353baff..4732b9a 100644
--- a/usage/launcher/src/test/resources/mongo-client-single-server.yaml
+++ b/usage/launcher/src/test/resources/mongo-client-single-server.yaml
@@ -19,10 +19,10 @@
 name: Mongo Single Server with Scripts
 location: localhost
 services:
-- serviceType: brooklyn.entity.nosql.mongodb.MongoDBServer
+- serviceType: org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer
   id: mySingleServer
   name: mongod
-- serviceType: brooklyn.entity.nosql.mongodb.MongoDBClient
+- serviceType: org.apache.brooklyn.entity.nosql.mongodb.MongoDBClient
   name: MongoDB Javascript Client
   brooklyn.config:
     server: $brooklyn:component("mySingleServer")

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/mongo-scripts.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/mongo-scripts.yaml b/usage/launcher/src/test/resources/mongo-scripts.yaml
index 0115d71..17362e5 100644
--- a/usage/launcher/src/test/resources/mongo-scripts.yaml
+++ b/usage/launcher/src/test/resources/mongo-scripts.yaml
@@ -21,12 +21,12 @@ description: MongoDB sharded deployment with MongoDB (javascript) client
 origin: https://github.com/apache/incubator-brooklyn
 location: localhost
 services:
-- serviceType: brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment
+- serviceType: org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment
   id: shardeddeployment
   name: MongoDB Sharded Deployment
   brooklyn.config:
     initialRouterClusterSize: 1
-- serviceType: brooklyn.entity.nosql.mongodb.MongoDBClient
+- serviceType: org.apache.brooklyn.entity.nosql.mongodb.MongoDBClient
   name: MongoDB Javascript Client
   brooklyn.config:
     shardedDeployment: $brooklyn:component("shardeddeployment")

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/mongo-sharded.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/mongo-sharded.yaml b/usage/launcher/src/test/resources/mongo-sharded.yaml
index dcf1f2b..8d27c19 100644
--- a/usage/launcher/src/test/resources/mongo-sharded.yaml
+++ b/usage/launcher/src/test/resources/mongo-sharded.yaml
@@ -20,7 +20,7 @@ name: Sharded MongoDB With Web App
 description: Auto-scaling web app backed by MongoDB
 location: my-docker-cloud
 services:
-- type: brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment
+- type: org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment
   id: mongo
   name: Mongo DB Backend
   brooklyn.config:
@@ -34,7 +34,7 @@ services:
   brooklyn.config:
     memberSpec:
       $brooklyn:entitySpec:
-        type: brooklyn.entity.nosql.mongodb.sharding.CoLocatedMongoDBRouter
+        type: org.apache.brooklyn.entity.nosql.mongodb.sharding.CoLocatedMongoDBRouter
         brooklyn.enrichers:
           - type: brooklyn.enricher.basic.Propagator
             brooklyn.config:

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/mongo-single-server-blueprint.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/mongo-single-server-blueprint.yaml b/usage/launcher/src/test/resources/mongo-single-server-blueprint.yaml
index 73615d0..1c87dd5 100644
--- a/usage/launcher/src/test/resources/mongo-single-server-blueprint.yaml
+++ b/usage/launcher/src/test/resources/mongo-single-server-blueprint.yaml
@@ -18,6 +18,6 @@
 #
 name: Mongo1
 services:
-- serviceType: brooklyn.entity.nosql.mongodb.MongoDBServer
+- serviceType: org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer
   name: MongoDB Single Server
   location: localhost

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/playing.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/playing.yaml b/usage/launcher/src/test/resources/playing.yaml
index 6182f38..d02bb76 100644
--- a/usage/launcher/src/test/resources/playing.yaml
+++ b/usage/launcher/src/test/resources/playing.yaml
@@ -18,4 +18,4 @@
 #
 name: cassandra node
 services:
-- type: brooklyn.entity.nosql.cassandra.CassandraNode
+- type: org.apache.brooklyn.entity.nosql.cassandra.CassandraNode

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/rest-client/src/test/java/org/apache/brooklyn/rest/client/ApplicationResourceIntegrationTest.java
----------------------------------------------------------------------
diff --git a/usage/rest-client/src/test/java/org/apache/brooklyn/rest/client/ApplicationResourceIntegrationTest.java b/usage/rest-client/src/test/java/org/apache/brooklyn/rest/client/ApplicationResourceIntegrationTest.java
index 02c19bb..2c0d8fb 100644
--- a/usage/rest-client/src/test/java/org/apache/brooklyn/rest/client/ApplicationResourceIntegrationTest.java
+++ b/usage/rest-client/src/test/java/org/apache/brooklyn/rest/client/ApplicationResourceIntegrationTest.java
@@ -62,10 +62,10 @@ public class ApplicationResourceIntegrationTest {
 
     private static final Duration LONG_WAIT = Duration.minutes(10);
     
-    private final String redisSpec = "{\"name\": \"redis-app\", \"type\": \"brooklyn.entity.nosql.redis.RedisStore\", \"locations\": [ \"localhost\"]}";
+    private final String redisSpec = "{\"name\": \"redis-app\", \"type\": \"org.apache.brooklyn.entity.nosql.redis.RedisStore\", \"locations\": [ \"localhost\"]}";
     
     private final ApplicationSpec legacyRedisSpec = ApplicationSpec.builder().name("redis-legacy-app")
-            .entities(ImmutableSet.of(new EntitySpec("redis-ent", "brooklyn.entity.nosql.redis.RedisStore")))
+            .entities(ImmutableSet.of(new EntitySpec("redis-ent", "org.apache.brooklyn.entity.nosql.redis.RedisStore")))
             .locations(ImmutableSet.of("localhost"))
             .build();
 

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/rest-server/src/test/java/brooklyn/rest/resources/ApplicationResourceIntegrationTest.java
----------------------------------------------------------------------
diff --git a/usage/rest-server/src/test/java/brooklyn/rest/resources/ApplicationResourceIntegrationTest.java b/usage/rest-server/src/test/java/brooklyn/rest/resources/ApplicationResourceIntegrationTest.java
index 0eda001..2ee4f25 100644
--- a/usage/rest-server/src/test/java/brooklyn/rest/resources/ApplicationResourceIntegrationTest.java
+++ b/usage/rest-server/src/test/java/brooklyn/rest/resources/ApplicationResourceIntegrationTest.java
@@ -55,7 +55,7 @@ public class ApplicationResourceIntegrationTest extends BrooklynRestResourceTest
     private static final Logger log = LoggerFactory.getLogger(ApplicationResourceIntegrationTest.class);
 
     private final ApplicationSpec redisSpec = ApplicationSpec.builder().name("redis-app")
-            .entities(ImmutableSet.of(new EntitySpec("redis-ent", "brooklyn.entity.nosql.redis.RedisStore")))
+            .entities(ImmutableSet.of(new EntitySpec("redis-ent", "org.apache.brooklyn.entity.nosql.redis.RedisStore")))
             .locations(ImmutableSet.of("localhost"))
             .build();
 

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/rest-server/src/test/java/brooklyn/rest/resources/CatalogResourceTest.java
----------------------------------------------------------------------
diff --git a/usage/rest-server/src/test/java/brooklyn/rest/resources/CatalogResourceTest.java b/usage/rest-server/src/test/java/brooklyn/rest/resources/CatalogResourceTest.java
index 8350b48..eb3d2ea 100644
--- a/usage/rest-server/src/test/java/brooklyn/rest/resources/CatalogResourceTest.java
+++ b/usage/rest-server/src/test/java/brooklyn/rest/resources/CatalogResourceTest.java
@@ -202,7 +202,7 @@ public class CatalogResourceTest extends BrooklynRestResourceTest {
     // not of the entity itself, so the test won't make sense any more.
     public void testGetCatalogEntityDetails() {
         CatalogEntitySummary details = client()
-                .resource(URI.create("/v1/catalog/entities/brooklyn.entity.nosql.redis.RedisStore"))
+                .resource(URI.create("/v1/catalog/entities/org.apache.brooklyn.entity.nosql.redis.RedisStore"))
                 .get(CatalogEntitySummary.class);
         assertTrue(details.toString().contains("redis.port"), "expected more config, only got: "+details);
         String iconUrl = "/v1/catalog/icon/" + details.getSymbolicName();
@@ -215,7 +215,7 @@ public class CatalogResourceTest extends BrooklynRestResourceTest {
     // not of the entity itself, so the test won't make sense any more.
     public void testGetCatalogEntityPlusVersionDetails() {
         CatalogEntitySummary details = client()
-                .resource(URI.create("/v1/catalog/entities/brooklyn.entity.nosql.redis.RedisStore:0.0.0.SNAPSHOT"))
+                .resource(URI.create("/v1/catalog/entities/org.apache.brooklyn.entity.nosql.redis.RedisStore:0.0.0.SNAPSHOT"))
                 .get(CatalogEntitySummary.class);
         assertTrue(details.toString().contains("redis.port"), "expected more config, only got: "+details);
         String expectedIconUrl = "/v1/catalog/icon/" + details.getSymbolicName() + "/" + details.getVersion();
@@ -236,7 +236,7 @@ public class CatalogResourceTest extends BrooklynRestResourceTest {
     }
 
     private void addTestCatalogItemRedisAsEntity(String catalogItemId) {
-        addTestCatalogItem(catalogItemId, null, TEST_VERSION, "brooklyn.entity.nosql.redis.RedisStore");
+        addTestCatalogItem(catalogItemId, null, TEST_VERSION, "org.apache.brooklyn.entity.nosql.redis.RedisStore");
     }
 
     private void addTestCatalogItem(String catalogItemId, String itemType, String version, String service) {


[16/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java
new file mode 100644
index 0000000..6d16c9a
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java
@@ -0,0 +1,594 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.math.BigInteger;
+import java.net.Socket;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.annotation.Nullable;
+import javax.management.ObjectName;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.enricher.RollingTimeWindowMeanEnricher;
+import brooklyn.enricher.TimeWeightedDeltaEnricher;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.entity.effector.EffectorBody;
+import brooklyn.entity.java.JavaAppUtils;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.event.basic.Sensors;
+import brooklyn.event.feed.function.FunctionFeed;
+import brooklyn.event.feed.function.FunctionPollConfig;
+import brooklyn.event.feed.jmx.JmxAttributePollConfig;
+import brooklyn.event.feed.jmx.JmxFeed;
+import brooklyn.event.feed.jmx.JmxHelper;
+import brooklyn.event.feed.jmx.JmxOperationPollConfig;
+import brooklyn.location.MachineLocation;
+import brooklyn.location.MachineProvisioningLocation;
+import brooklyn.location.basic.Machines;
+import brooklyn.location.cloud.CloudLocationConfig;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.config.ConfigBag;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.guava.Maybe;
+import brooklyn.util.text.Strings;
+import brooklyn.util.text.TemplateProcessor;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Function;
+import com.google.common.base.Functions;
+import com.google.common.base.Joiner;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.base.Splitter;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Implementation of {@link CassandraNode}.
+ */
+public class CassandraNodeImpl extends SoftwareProcessImpl implements CassandraNode {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraNodeImpl.class);
+
+    private final AtomicReference<Boolean> detectedCloudSensors = new AtomicReference<Boolean>(false);
+    
+    public CassandraNodeImpl() {
+    }
+    
+    @Override
+    public void init() {
+        super.init();
+        
+        getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
+            @Override
+            public String call(ConfigBag parameters) {
+                return executeScript((String)parameters.getStringKey("commands"));
+            }
+        });
+        
+        Entities.checkRequiredUrl(this, getCassandraConfigTemplateUrl());
+        Entities.getRequiredUrlConfig(this, CASSANDRA_RACKDC_CONFIG_TEMPLATE_URL);
+        
+        connectEnrichers();
+    }
+    
+    /**
+     * Some clouds (e.g. Rackspace) give us VMs that have two nics: one for private and one for public.
+     * If the private IP is used then it doesn't work, even for a cluster purely internal to Rackspace!
+     * 
+     * TODO Ugly. Need to understand more and find a better fix. Perhaps in Cassandra itself if necessary.
+     * Also need to investigate further:
+     *  - does it still fail if BroadcastAddress is set to private IP?
+     *  - is `openIptables` opening it up for both interfaces?
+     *  - for aws->rackspace comms between nodes (thus using the public IP), will it be listening on an accessible port?
+     *  - ideally do a check, open a server on one port on the machine, see if it is contactable on the public address;
+     *    and set that as a flag on the cloud
+     */
+    protected void setCloudPreferredSensorNames() {
+        if (detectedCloudSensors.get()) return;
+        synchronized (detectedCloudSensors) {
+            if (detectedCloudSensors.get()) return;
+
+            MachineProvisioningLocation<?> loc = getProvisioningLocation();
+            if (loc != null) {
+                try {
+                    Method method = loc.getClass().getMethod("getProvider");
+                    method.setAccessible(true);
+                    String provider = (String) method.invoke(loc);
+                    String result = "(nothing special)";
+                    if (provider!=null) {
+                        if (provider.contains("rackspace") || provider.contains("cloudservers") || provider.contains("softlayer")) {
+                            /* These clouds have 2 NICs and it has to be consistent, so use public IP here to allow external access;
+                             * (TODO internal access could be configured to improve performance / lower cost, 
+                             * if we know all nodes are visible to each other) */
+                            if (getConfig(LISTEN_ADDRESS_SENSOR)==null)
+                                setConfig(LISTEN_ADDRESS_SENSOR, CassandraNode.ADDRESS.getName());
+                            if (getConfig(BROADCAST_ADDRESS_SENSOR)==null)
+                                setConfig(BROADCAST_ADDRESS_SENSOR, CassandraNode.ADDRESS.getName());
+                            result = "public IP for both listen and broadcast";
+                        } else if (provider.contains("google-compute")) {
+                            /* Google nodes cannot reach themselves/each-other on the public IP,
+                             * and there is no hostname, so use private IP here */
+                            if (getConfig(LISTEN_ADDRESS_SENSOR)==null)
+                                setConfig(LISTEN_ADDRESS_SENSOR, CassandraNode.SUBNET_HOSTNAME.getName());
+                            if (getConfig(BROADCAST_ADDRESS_SENSOR)==null)
+                                setConfig(BROADCAST_ADDRESS_SENSOR, CassandraNode.SUBNET_HOSTNAME.getName());
+                            result = "private IP for both listen and broadcast";
+                        }
+                    }
+                    log.debug("Cassandra NICs inferred {} for {}; using location {}, based on provider {}", new Object[] {result, this, loc, provider});
+                } catch (Exception e) {
+                    log.debug("Cassandra NICs auto-detection failed for {} in location {}: {}", new Object[] {this, loc, e});
+                }
+            }
+            detectedCloudSensors.set(true);
+        }
+    }
+    
+    @Override
+    protected void preStart() {
+        super.preStart();
+        setCloudPreferredSensorNames();
+    }
+    
+    // Used for freemarker
+    public String getMajorMinorVersion() {
+        String version = getConfig(CassandraNode.SUGGESTED_VERSION);
+        if (Strings.isBlank(version)) return "";
+        List<String> versionParts = ImmutableList.copyOf(Splitter.on(".").split(version));
+        return versionParts.get(0) + (versionParts.size() > 1 ? "."+versionParts.get(1) : "");
+    }
+    
+    public String getCassandraConfigTemplateUrl() {
+        String templatedUrl = getConfig(CassandraNode.CASSANDRA_CONFIG_TEMPLATE_URL);
+        return TemplateProcessor.processTemplateContents(templatedUrl, this, ImmutableMap.<String, Object>of());
+    }
+
+    @Override public Integer getGossipPort() { return getAttribute(CassandraNode.GOSSIP_PORT); }
+    @Override public Integer getSslGossipPort() { return getAttribute(CassandraNode.SSL_GOSSIP_PORT); }
+    @Override public Integer getThriftPort() { return getAttribute(CassandraNode.THRIFT_PORT); }
+    @Override public Integer getNativeTransportPort() { return getAttribute(CassandraNode.NATIVE_TRANSPORT_PORT); }
+    @Override public String getClusterName() { return getAttribute(CassandraNode.CLUSTER_NAME); }
+    
+    @Override public int getNumTokensPerNode() {
+        return getConfig(CassandraNode.NUM_TOKENS_PER_NODE);
+    }
+
+    @Deprecated
+    @Override public BigInteger getToken() {
+        BigInteger token = getAttribute(CassandraNode.TOKEN);
+        if (token == null) {
+            token = getConfig(CassandraNode.TOKEN);
+        }
+        return token;
+    }
+    
+    @Override public Set<BigInteger> getTokens() {
+        // Prefer an already-set attribute over the config.
+        // Prefer TOKENS over TOKEN.
+        Set<BigInteger> tokens = getAttribute(CassandraNode.TOKENS);
+        if (tokens == null) {
+            BigInteger token = getAttribute(CassandraNode.TOKEN);
+            if (token != null) {
+                tokens = ImmutableSet.of(token);
+            }
+        }
+        if (tokens == null) {
+            tokens = getConfig(CassandraNode.TOKENS);
+        }
+        if (tokens == null) {
+            BigInteger token = getConfig(CassandraNode.TOKEN);
+            if (token != null) {
+                tokens = ImmutableSet.of(token);
+            }
+        }
+        return tokens;
+    }
+    
+    @Deprecated
+    @Override public String getTokenAsString() {
+        BigInteger token = getToken();
+        if (token==null) return "";
+        return ""+token;
+    }
+
+    @Override public String getTokensAsString() {
+        // TODO check what is required when replacing failed node.
+        // with vnodes in Cassandra 2.x, don't bother supplying token
+        Set<BigInteger> tokens = getTokens();
+        if (tokens == null) return "";
+        return Joiner.on(",").join(tokens);
+    }
+    
+    @Override public String getListenAddress() {
+        String sensorName = getConfig(LISTEN_ADDRESS_SENSOR);
+        if (Strings.isNonBlank(sensorName))
+            return Entities.submit(this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))).getUnchecked();
+        
+        String subnetAddress = getAttribute(CassandraNode.SUBNET_ADDRESS);
+        return Strings.isNonBlank(subnetAddress) ? subnetAddress : getAttribute(CassandraNode.ADDRESS);
+    }
+    @Override public String getBroadcastAddress() {
+        String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR);
+        if (Strings.isNonBlank(sensorName))
+            return Entities.submit(this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))).getUnchecked();
+        
+        String snitchName = getConfig(CassandraNode.ENDPOINT_SNITCH_NAME);
+        if (snitchName.equals("Ec2MultiRegionSnitch") || snitchName.contains("MultiCloudSnitch")) {
+            // http://www.datastax.com/documentation/cassandra/2.0/mobile/cassandra/architecture/architectureSnitchEC2MultiRegion_c.html
+            // describes that the listen_address is set to the private IP, and the broadcast_address is set to the public IP.
+            return getAttribute(CassandraNode.ADDRESS);
+        } else if (!getDriver().isClustered()) {
+            return getListenAddress();
+        } else {
+            // In other situations, prefer the hostname, so other regions can see it
+            // *Unless* hostname resolves at the target to a local-only interface which is different to ADDRESS
+            // (workaround for issue deploying to localhost)
+            String hostname = getAttribute(CassandraNode.HOSTNAME);
+            try {
+                String resolvedAddress = getDriver().getResolvedAddress(hostname);
+                if (resolvedAddress==null) {
+                    log.debug("Cassandra using broadcast address "+getListenAddress()+" for "+this+" because hostname "+hostname+" could not be resolved at remote machine");
+                    return getListenAddress();
+                }
+                if (resolvedAddress.equals("127.0.0.1")) {
+                    log.debug("Cassandra using broadcast address "+getListenAddress()+" for "+this+" because hostname "+hostname+" resolves to 127.0.0.1");
+                    return getListenAddress();                    
+                }
+                return hostname;
+            } catch (Exception e) {
+                Exceptions.propagateIfFatal(e);
+                log.warn("Error resolving hostname "+hostname+" for "+this+": "+e, e);
+                return hostname;
+            }
+        }
+    }
+    /** not always the private IP, if public IP has been insisted on for broadcast, e.g. setting up a rack topology */
+    // have not confirmed this does the right thing in all clouds ... only used for rack topology however
+    public String getPrivateIp() {
+        String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR);
+        if (Strings.isNonBlank(sensorName)) {
+            return getAttribute(Sensors.newStringSensor(sensorName));
+        } else {
+            String subnetAddress = getAttribute(CassandraNode.SUBNET_ADDRESS);
+            return Strings.isNonBlank(subnetAddress) ? subnetAddress : getAttribute(CassandraNode.ADDRESS);
+        }
+    }
+    public String getPublicIp() {
+        // may need to be something else in google
+        return getAttribute(CassandraNode.ADDRESS);
+    }
+
+    @Override public String getRpcAddress() {
+        String sensorName = getConfig(RPC_ADDRESS_SENSOR);
+        if (Strings.isNonBlank(sensorName))
+            return Entities.submit(this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))).getUnchecked();
+        return "0.0.0.0";
+    }
+    
+    @Override public String getSeeds() { 
+        Set<Entity> seeds = getConfig(CassandraNode.INITIAL_SEEDS);
+        if (seeds==null) {
+            log.warn("No seeds available when requested for "+this, new Throwable("source of no Cassandra seeds when requested"));
+            return null;
+        }
+        String snitchName = getConfig(CassandraNode.ENDPOINT_SNITCH_NAME);
+        MutableSet<String> seedsHostnames = MutableSet.of();
+        for (Entity entity : seeds) {
+            // tried removing ourselves if there are other nodes, but that is a BAD idea!
+            // blows up with a "java.lang.RuntimeException: No other nodes seen!"
+            
+            if (snitchName.equals("Ec2MultiRegionSnitch") || snitchName.contains("MultiCloudSnitch")) {
+                // http://www.datastax.com/documentation/cassandra/2.0/mobile/cassandra/architecture/architectureSnitchEC2MultiRegion_c.html
+                // says the seeds should be public IPs.
+                seedsHostnames.add(entity.getAttribute(CassandraNode.ADDRESS));
+            } else {
+                String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR);
+                if (Strings.isNonBlank(sensorName)) {
+                    seedsHostnames.add(entity.getAttribute(Sensors.newStringSensor(sensorName)));
+                } else {
+                    Maybe<String> optionalSeedHostname = Machines.findSubnetOrPublicHostname(entity);
+                    if (optionalSeedHostname.isPresent()) {
+                        String seedHostname = optionalSeedHostname.get();
+                        seedsHostnames.add(seedHostname);
+                    } else {
+                        log.warn("In node {}, seed hostname missing for {}; not including in seeds list", this, entity);
+                    }
+                }
+            }
+        }
+        
+        String result = Strings.join(seedsHostnames, ",");
+        log.info("Seeds for {}: {}", this, result);
+        return result;
+    }
+
+    // referenced by cassandra-rackdc.properties, read by some of the cassandra snitches
+    public String getDatacenterName() {
+        String name = getAttribute(CassandraNode.DATACENTER_NAME);
+        if (name == null) {
+            MachineLocation machine = getMachineOrNull();
+            MachineProvisioningLocation<?> provisioningLocation = getProvisioningLocation();
+            if (machine != null) {
+                name = machine.getConfig(CloudLocationConfig.CLOUD_REGION_ID);
+            }
+            if (name == null && provisioningLocation != null) {
+                name = provisioningLocation.getConfig(CloudLocationConfig.CLOUD_REGION_ID);
+            }
+            if (name == null) {
+                name = "UNKNOWN_DATACENTER";
+            }
+            setAttribute((AttributeSensor<String>)DATACENTER_NAME, name);
+        }
+        return name;
+    }
+
+    public String getRackName() {
+        String name = getAttribute(CassandraNode.RACK_NAME);
+        if (name == null) {
+            MachineLocation machine = getMachineOrNull();
+            MachineProvisioningLocation<?> provisioningLocation = getProvisioningLocation();
+            if (machine != null) {
+                name = machine.getConfig(CloudLocationConfig.CLOUD_AVAILABILITY_ZONE_ID);
+            }
+            if (name == null && provisioningLocation != null) {
+                name = provisioningLocation.getConfig(CloudLocationConfig.CLOUD_AVAILABILITY_ZONE_ID);
+            }
+            if (name == null) {
+                name = "UNKNOWN_RACK";
+            }
+            setAttribute((AttributeSensor<String>)RACK_NAME, name);
+        }
+        return name;
+    }
+
+    @Override
+    public Class<? extends CassandraNodeDriver> getDriverInterface() {
+        return CassandraNodeDriver.class;
+    }
+    
+    @Override
+    public CassandraNodeDriver getDriver() {
+        return (CassandraNodeDriver) super.getDriver();
+    }
+
+    private volatile JmxFeed jmxFeed;
+    private volatile FunctionFeed functionFeed;
+    private JmxFeed jmxMxBeanFeed;
+    private JmxHelper jmxHelper;
+    private ObjectName storageServiceMBean = JmxHelper.createObjectName("org.apache.cassandra.db:type=StorageService");
+    private ObjectName readStageMBean = JmxHelper.createObjectName("org.apache.cassandra.request:type=ReadStage");
+    private ObjectName mutationStageMBean = JmxHelper.createObjectName("org.apache.cassandra.request:type=MutationStage");
+    private ObjectName snitchMBean = JmxHelper.createObjectName("org.apache.cassandra.db:type=EndpointSnitchInfo");
+
+    
+    @SuppressWarnings({ "unchecked", "rawtypes" })
+    @Override
+    protected void connectSensors() {
+        // "cassandra" isn't really a protocol, but okay for now
+        setAttribute(DATASTORE_URL, "cassandra://"+getAttribute(HOSTNAME)+":"+getAttribute(THRIFT_PORT));
+        
+        super.connectSensors();
+
+        jmxHelper = new JmxHelper(this);
+        jmxFeed = JmxFeed.builder()
+                .entity(this)
+                .period(3000, TimeUnit.MILLISECONDS)
+                .helper(jmxHelper)
+                .pollAttribute(new JmxAttributePollConfig<Boolean>(SERVICE_UP_JMX)
+                        .objectName(storageServiceMBean)
+                        .attributeName("Initialized")
+                        .onSuccess(Functions.forPredicate(Predicates.notNull()))
+                        .onException(Functions.constant(false)))
+                .pollAttribute(new JmxAttributePollConfig<Set<BigInteger>>(TOKENS)
+                        .objectName(storageServiceMBean)
+                        .attributeName("TokenToEndpointMap")
+                        .onSuccess(new Function<Object, Set<BigInteger>>() {
+                            @Override
+                            public Set<BigInteger> apply(@Nullable Object arg) {
+                                Map input = (Map)arg;
+                                if (input == null || input.isEmpty()) return null;
+                                // FIXME does not work on aws-ec2, uses RFC1918 address
+                                Predicate<String> self = Predicates.in(ImmutableList.of(getAttribute(HOSTNAME), getAttribute(ADDRESS), getAttribute(SUBNET_ADDRESS), getAttribute(SUBNET_HOSTNAME)));
+                                Set<String> tokens = Maps.filterValues(input, self).keySet();
+                                Set<BigInteger> result = Sets.newLinkedHashSet();
+                                for (String token : tokens) {
+                                    result.add(new BigInteger(token));
+                                }
+                                return result;
+                            }})
+                        .onException(Functions.<Set<BigInteger>>constant(null)))
+                .pollAttribute(new JmxAttributePollConfig<BigInteger>(TOKEN)
+                        .objectName(storageServiceMBean)
+                        .attributeName("TokenToEndpointMap")
+                        .onSuccess(new Function<Object, BigInteger>() {
+                            @Override
+                            public BigInteger apply(@Nullable Object arg) {
+                                Map input = (Map)arg;
+                                // TODO remove duplication from setting TOKENS
+                                if (input == null || input.isEmpty()) return null;
+                                // FIXME does not work on aws-ec2, uses RFC1918 address
+                                Predicate<String> self = Predicates.in(ImmutableList.of(getAttribute(HOSTNAME), getAttribute(ADDRESS), getAttribute(SUBNET_ADDRESS), getAttribute(SUBNET_HOSTNAME)));
+                                Set<String> tokens = Maps.filterValues(input, self).keySet();
+                                String token = Iterables.getFirst(tokens, null);
+                                return (token != null) ? new BigInteger(token) : null;
+                            }})
+                        .onException(Functions.<BigInteger>constant(null)))
+                .pollOperation(new JmxOperationPollConfig<String>(DATACENTER_NAME)
+                        .period(60, TimeUnit.SECONDS)
+                        .objectName(snitchMBean)
+                        .operationName("getDatacenter")
+                        .operationParams(ImmutableList.of(getBroadcastAddress()))
+                        .onException(Functions.<String>constant(null)))
+                .pollOperation(new JmxOperationPollConfig<String>(RACK_NAME)
+                        .period(60, TimeUnit.SECONDS)
+                        .objectName(snitchMBean)
+                        .operationName("getRack")
+                        .operationParams(ImmutableList.of(getBroadcastAddress()))
+                        .onException(Functions.<String>constant(null)))
+                .pollAttribute(new JmxAttributePollConfig<Integer>(PEERS)
+                        .objectName(storageServiceMBean)
+                        .attributeName("TokenToEndpointMap")
+                        .onSuccess(new Function<Object, Integer>() {
+                            @Override
+                            public Integer apply(@Nullable Object arg) {
+                                Map input = (Map)arg;
+                                if (input == null || input.isEmpty()) return 0;
+                                return input.size();
+                            }
+                        })
+                        .onException(Functions.constant(-1)))
+                .pollAttribute(new JmxAttributePollConfig<Integer>(LIVE_NODE_COUNT)
+                        .objectName(storageServiceMBean)
+                        .attributeName("LiveNodes")
+                        .onSuccess(new Function<Object, Integer>() {
+                            @Override
+                            public Integer apply(@Nullable Object arg) {
+                                List input = (List)arg;
+                                if (input == null || input.isEmpty()) return 0;
+                                return input.size();
+                            }
+                        })
+                        .onException(Functions.constant(-1)))
+                .pollAttribute(new JmxAttributePollConfig<Integer>(READ_ACTIVE)
+                        .objectName(readStageMBean)
+                        .attributeName("ActiveCount")
+                        .onException(Functions.constant((Integer)null)))
+                .pollAttribute(new JmxAttributePollConfig<Long>(READ_PENDING)
+                        .objectName(readStageMBean)
+                        .attributeName("PendingTasks")
+                        .onException(Functions.constant((Long)null)))
+                .pollAttribute(new JmxAttributePollConfig<Long>(READ_COMPLETED)
+                        .objectName(readStageMBean)
+                        .attributeName("CompletedTasks")
+                        .onException(Functions.constant((Long)null)))
+                .pollAttribute(new JmxAttributePollConfig<Integer>(WRITE_ACTIVE)
+                        .objectName(mutationStageMBean)
+                        .attributeName("ActiveCount")
+                        .onException(Functions.constant((Integer)null)))
+                .pollAttribute(new JmxAttributePollConfig<Long>(WRITE_PENDING)
+                        .objectName(mutationStageMBean)
+                        .attributeName("PendingTasks")
+                        .onException(Functions.constant((Long)null)))
+                .pollAttribute(new JmxAttributePollConfig<Long>(WRITE_COMPLETED)
+                        .objectName(mutationStageMBean)
+                        .attributeName("CompletedTasks")
+                        .onException(Functions.constant((Long)null)))
+                .build();
+        
+        functionFeed = FunctionFeed.builder()
+                .entity(this)
+                .period(3000, TimeUnit.MILLISECONDS)
+                .poll(new FunctionPollConfig<Long, Long>(THRIFT_PORT_LATENCY)
+                        .onException(Functions.constant((Long)null))
+                        .callable(new Callable<Long>() {
+                            public Long call() {
+                                try {
+                                    long start = System.currentTimeMillis();
+                                    Socket s = new Socket(getAttribute(Attributes.HOSTNAME), getThriftPort());
+                                    s.close();
+                                    long latency = System.currentTimeMillis() - start;
+                                    computeServiceUp();
+                                    return latency;
+                                } catch (Exception e) {
+                                    if (log.isDebugEnabled())
+                                        log.debug("Cassandra thrift port poll failure: "+e);
+                                    setAttribute(SERVICE_UP, false);
+                                    return null;
+                                }
+                            }
+                            public void computeServiceUp() {
+                                // this will wait an additional poll period after thrift port is up,
+                                // as the caller will not have set yet, but that will help ensure it is really healthy!
+                                setAttribute(SERVICE_UP,
+                                        getAttribute(THRIFT_PORT_LATENCY)!=null && getAttribute(THRIFT_PORT_LATENCY)>=0 && 
+                                        Boolean.TRUE.equals(getAttribute(SERVICE_UP_JMX)));
+                            }
+                        }))
+                .build();
+        
+        jmxMxBeanFeed = JavaAppUtils.connectMXBeanSensors(this);
+    }
+    
+    protected void connectEnrichers() {
+        connectEnrichers(Duration.TEN_SECONDS);
+    }
+    
+    protected void connectEnrichers(Duration windowPeriod) {
+        JavaAppUtils.connectJavaAppServerPolicies(this);
+
+        addEnricher(TimeWeightedDeltaEnricher.<Long>getPerSecondDeltaEnricher(this, READ_COMPLETED, READS_PER_SECOND_LAST));
+        addEnricher(TimeWeightedDeltaEnricher.<Long>getPerSecondDeltaEnricher(this, WRITE_COMPLETED, WRITES_PER_SECOND_LAST));
+        
+        if (windowPeriod!=null) {
+            addEnricher(new RollingTimeWindowMeanEnricher<Long>(this, THRIFT_PORT_LATENCY, 
+                    THRIFT_PORT_LATENCY_IN_WINDOW, windowPeriod));
+            addEnricher(new RollingTimeWindowMeanEnricher<Double>(this, READS_PER_SECOND_LAST, 
+                    READS_PER_SECOND_IN_WINDOW, windowPeriod));
+            addEnricher(new RollingTimeWindowMeanEnricher<Double>(this, WRITES_PER_SECOND_LAST, 
+                    WRITES_PER_SECOND_IN_WINDOW, windowPeriod));
+        }
+    }
+    
+    @Override
+    public void disconnectSensors() {
+        super.disconnectSensors();
+
+        if (jmxFeed != null) jmxFeed.stop();
+        if (jmxMxBeanFeed != null) jmxMxBeanFeed.stop();
+        if (jmxHelper != null) jmxHelper.terminate();
+        if (functionFeed != null) functionFeed.stop();
+    }
+
+    @Override
+    public void setToken(String token) {
+        try {
+            if (!jmxHelper.isConnected()) jmxHelper.connect();;
+            jmxHelper.operation(storageServiceMBean, "move", token);
+            log.info("Moved server {} to token {}", getId(), token);
+        } catch (IOException ioe) {
+            Throwables.propagate(ioe);
+        }
+    }
+    
+    @Override
+    public String executeScript(String commands) {
+        return getDriver().executeScriptAsync(commands).block().getStdout();
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java
new file mode 100644
index 0000000..d9fd1c1
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java
@@ -0,0 +1,420 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityLocal;
+import brooklyn.entity.database.DatastoreMixins;
+import brooklyn.entity.java.JavaSoftwareProcessSshDriver;
+import brooklyn.entity.java.UsesJmx;
+import brooklyn.entity.software.SshEffectorTasks;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.location.Location;
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.location.basic.Machines;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.management.TaskWrapper;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.guava.Maybe;
+import brooklyn.util.net.Networking;
+import brooklyn.util.os.Os;
+import brooklyn.util.ssh.BashCommands;
+import brooklyn.util.stream.Streams;
+import brooklyn.util.task.DynamicTasks;
+import brooklyn.util.task.Tasks;
+import brooklyn.util.task.system.ProcessTaskWrapper;
+import brooklyn.util.text.Identifiers;
+import brooklyn.util.text.Strings;
+import brooklyn.util.text.TemplateProcessor;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+/**
+ * Start a {@link CassandraNode} in a {@link Location} accessible over ssh.
+ */
+public class CassandraNodeSshDriver extends JavaSoftwareProcessSshDriver implements CassandraNodeDriver {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraNodeSshDriver.class);
+
+    protected Maybe<String> resolvedAddressCache = Maybe.absent();
+
+    public CassandraNodeSshDriver(CassandraNodeImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    protected String getLogFileLocation() { return Os.mergePathsUnix(getRunDir(),"cassandra.log"); }
+
+    @Override
+    public Integer getGossipPort() { return entity.getAttribute(CassandraNode.GOSSIP_PORT); }
+
+    @Override
+    public Integer getSslGossipPort() { return entity.getAttribute(CassandraNode.SSL_GOSSIP_PORT); }
+
+    @Override
+    public Integer getThriftPort() { return entity.getAttribute(CassandraNode.THRIFT_PORT); }
+
+    @Override
+    public Integer getNativeTransportPort() { return entity.getAttribute(CassandraNode.NATIVE_TRANSPORT_PORT); }
+
+    @Override
+    public String getClusterName() { return entity.getAttribute(CassandraNode.CLUSTER_NAME); }
+
+    @Override
+    public String getCassandraConfigTemplateUrl() {
+        String templatedUrl = entity.getConfig(CassandraNode.CASSANDRA_CONFIG_TEMPLATE_URL);
+        return TemplateProcessor.processTemplateContents(templatedUrl, this, ImmutableMap.<String, Object>of());
+    }
+
+    @Override
+    public String getCassandraConfigFileName() { return entity.getConfig(CassandraNode.CASSANDRA_CONFIG_FILE_NAME); }
+
+    public String getEndpointSnitchName() { return entity.getConfig(CassandraNode.ENDPOINT_SNITCH_NAME); }
+
+    public String getCassandraRackdcConfigTemplateUrl() { return entity.getConfig(CassandraNode.CASSANDRA_RACKDC_CONFIG_TEMPLATE_URL); }
+
+    public String getCassandraRackdcConfigFileName() { return entity.getConfig(CassandraNode.CASSANDRA_RACKDC_CONFIG_FILE_NAME); }
+
+    public String getMirrorUrl() { return entity.getConfig(CassandraNode.MIRROR_URL); }
+
+    protected String getDefaultUnpackedDirectoryName() {
+        return "apache-cassandra-"+getVersion();
+    }
+
+    protected boolean isV2() {
+        String version = getVersion();
+        return version.startsWith("2.");
+    }
+
+    @Override
+    public boolean installJava() {
+        if (isV2()) {
+            return checkForAndInstallJava("1.8");
+        } else {
+            return super.installJava();
+        }
+    }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(getDefaultUnpackedDirectoryName())));
+    }
+
+    @Override
+    public void install() {
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+
+        List<String> commands = ImmutableList.<String>builder()
+                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
+                .add(BashCommands.INSTALL_TAR)
+                .add("tar xzfv " + saveAs)
+                .build();
+
+        newScript(INSTALLING)
+                .body.append(commands)
+                .execute();
+    }
+
+    @Override
+    public Set<Integer> getPortsUsed() {
+        return ImmutableSet.<Integer>builder()
+                .addAll(super.getPortsUsed())
+                .addAll(getPortMap().values())
+                .build();
+    }
+
+    protected Map<String, Integer> getPortMap() {
+        return ImmutableMap.<String, Integer>builder()
+                .put("jmxPort", entity.getAttribute(UsesJmx.JMX_PORT))
+                .put("rmiPort", entity.getAttribute(UsesJmx.RMI_REGISTRY_PORT))
+                .put("gossipPort", getGossipPort())
+                .put("sslGossipPort", getSslGossipPort())
+                .put("thriftPort", getThriftPort())
+                .build();
+    }
+
+    @Override
+    public void customize() {
+        log.debug("Customizing {} (Cluster {})", entity, getClusterName());
+        Networking.checkPortsValid(getPortMap());
+
+        customizeInitialSeeds();
+
+        String logFileEscaped = getLogFileLocation().replace("/", "\\/"); // escape slashes
+
+        ImmutableList.Builder<String> commands = new ImmutableList.Builder<String>()
+                .add(String.format("cp -R %s/{bin,conf,lib,interface,pylib,tools} .", getExpandedInstallDir()))
+                .add("mkdir -p data")
+                .add("mkdir -p brooklyn_commands")
+                .add(String.format("sed -i.bk 's/log4j.appender.R.File=.*/log4j.appender.R.File=%s/g' %s/conf/log4j-server.properties", logFileEscaped, getRunDir()))
+                .add(String.format("sed -i.bk '/JMX_PORT/d' %s/conf/cassandra-env.sh", getRunDir()))
+                // Script sets 180k on Linux which gives Java error:  The stack size specified is too small, Specify at least 228k
+                .add(String.format("sed -i.bk 's/-Xss180k/-Xss280k/g' %s/conf/cassandra-env.sh", getRunDir()));
+
+        newScript(CUSTOMIZING)
+                .body.append(commands.build())
+                .failOnNonZeroResultCode()
+                .execute();
+
+        // Copy the cassandra.yaml configuration file across
+        String destinationConfigFile = Os.mergePathsUnix(getRunDir(), "conf", getCassandraConfigFileName());
+        copyTemplate(getCassandraConfigTemplateUrl(), destinationConfigFile);
+
+        // Copy the cassandra-rackdc.properties configuration file across
+        String rackdcDestinationFile = Os.mergePathsUnix(getRunDir(), "conf", getCassandraRackdcConfigFileName());
+        copyTemplate(getCassandraRackdcConfigTemplateUrl(), rackdcDestinationFile);
+
+        customizeCopySnitch();
+    }
+
+    protected void customizeCopySnitch() {
+        // Copy the custom snitch jar file across
+        String customSnitchJarUrl = entity.getConfig(CassandraNode.CUSTOM_SNITCH_JAR_URL);
+        if (Strings.isNonBlank(customSnitchJarUrl)) {
+            int lastSlashIndex = customSnitchJarUrl.lastIndexOf("/");
+            String customSnitchJarName = (lastSlashIndex > 0) ? customSnitchJarUrl.substring(lastSlashIndex+1) : "customBrooklynSnitch.jar";
+            String jarDestinationFile = Os.mergePathsUnix(getRunDir(), "lib", customSnitchJarName);
+            InputStream customSnitchJarStream = checkNotNull(resource.getResourceFromUrl(customSnitchJarUrl), "%s could not be loaded", customSnitchJarUrl);
+            try {
+                getMachine().copyTo(customSnitchJarStream, jarDestinationFile);
+            } finally {
+                Streams.closeQuietly(customSnitchJarStream);
+            }
+        }
+    }
+
+    protected void customizeInitialSeeds() {
+        if (entity.getConfig(CassandraNode.INITIAL_SEEDS)==null) {
+            if (isClustered()) {
+                entity.setConfig(CassandraNode.INITIAL_SEEDS,
+                    DependentConfiguration.attributeWhenReady(entity.getParent(), CassandraDatacenter.CURRENT_SEEDS));
+            } else {
+                entity.setConfig(CassandraNode.INITIAL_SEEDS, MutableSet.<Entity>of(entity));
+            }
+        }
+    }
+
+    @Override
+    public boolean isClustered() {
+        return entity.getParent() instanceof CassandraDatacenter;
+    }
+
+    @Override
+    public void launch() {
+        String subnetHostname = Machines.findSubnetOrPublicHostname(entity).get();
+        Set<Entity> seeds = getEntity().getConfig(CassandraNode.INITIAL_SEEDS);
+        List<Entity> ancestors = getCassandraAncestors();
+        log.info("Launching " + entity + ": " +
+                "cluster "+getClusterName()+", " +
+                "hostname (public) " + getEntity().getAttribute(Attributes.HOSTNAME) + ", " +
+                "hostname (subnet) " + subnetHostname + ", " +
+                "seeds "+((CassandraNode)entity).getSeeds()+" (from "+seeds+")");
+
+        boolean isFirst = seeds.iterator().next().equals(entity);
+        if (isClustered() && !isFirst && CassandraDatacenter.WAIT_FOR_FIRST) {
+            // wait for the first node
+            long firstStartTime = Entities.submit(entity, DependentConfiguration.attributeWhenReady(
+                ancestors.get(ancestors.size()-1), CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC)).getUnchecked();
+            // optionally force a delay before starting subsequent nodes; see comment at CassandraCluster.DELAY_AFTER_FIRST
+            Duration toWait = Duration.millis(firstStartTime + CassandraDatacenter.DELAY_AFTER_FIRST.toMilliseconds() -  System.currentTimeMillis());
+            if (toWait.toMilliseconds()>0) {
+                log.info("Launching " + entity + ": delaying launch of non-first node by "+toWait+" to prevent schema disagreements");
+                Tasks.setBlockingDetails("Pausing to ensure first node has time to start");
+                Time.sleep(toWait);
+                Tasks.resetBlockingDetails();
+            }
+        }
+
+        List<Entity> queuedStart = null;
+        if (CassandraDatacenter.DELAY_BETWEEN_STARTS!=null && !ancestors.isEmpty()) {
+            Entity root = ancestors.get(ancestors.size()-1);
+            // TODO currently use the class as a semaphore; messy, and obviously will not federate;
+            // should develop a brooklyn framework semaphore (similar to that done on SshMachineLocation)
+            // and use it - note however the synch block is very very short so relatively safe at least
+            synchronized (CassandraNode.class) {
+                queuedStart = root.getAttribute(CassandraDatacenter.QUEUED_START_NODES);
+                if (queuedStart==null) {
+                    queuedStart = new ArrayList<Entity>();
+                    ((EntityLocal)root).setAttribute(CassandraDatacenter.QUEUED_START_NODES, queuedStart);
+                }
+                queuedStart.add(getEntity());
+                ((EntityLocal)root).setAttribute(CassandraDatacenter.QUEUED_START_NODES, queuedStart);
+            }
+            do {
+                // get it again in case it is backed by something external
+                queuedStart = root.getAttribute(CassandraDatacenter.QUEUED_START_NODES);
+                if (queuedStart.get(0).equals(getEntity())) break;
+                synchronized (queuedStart) {
+                    try {
+                        queuedStart.wait(1000);
+                    } catch (InterruptedException e) {
+                        Exceptions.propagate(e);
+                    }
+                }
+            } while (true);
+
+            // TODO should look at last start time... but instead we always wait
+            CassandraDatacenter.DELAY_BETWEEN_STARTS.countdownTimer().waitForExpiryUnchecked();
+        }
+
+        try {
+            // Relies on `bin/cassandra -p <pidfile>`, rather than us writing pid file ourselves.
+            newScript(MutableMap.of(USE_PID_FILE, false), LAUNCHING)
+                    .body.append(
+                            // log the date to attempt to debug occasional http://wiki.apache.org/cassandra/FAQ#schema_disagreement
+                            // (can be caused by machines out of synch time-wise; but in our case it seems to be caused by other things!)
+                            "echo date on cassandra server `hostname` when launching is `date`",
+                            launchEssentialCommand(),
+                            "echo after essential command")
+                    .execute();
+            if (!isClustered()) {
+                InputStream creationScript = DatastoreMixins.getDatabaseCreationScript(entity);
+                if (creationScript!=null) {
+                    Tasks.setBlockingDetails("Pausing to ensure Cassandra (singleton) has started before running creation script");
+                    Time.sleep(Duration.seconds(20));
+                    Tasks.resetBlockingDetails();
+                    executeScriptAsync(Streams.readFullyString(creationScript));
+                }
+            }
+            if (isClustered() && isFirst) {
+                for (Entity ancestor: getCassandraAncestors()) {
+                    ((EntityLocal)ancestor).setAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, System.currentTimeMillis());
+                }
+            }
+        } finally {
+            if (queuedStart!=null) {
+                Entity head = queuedStart.remove(0);
+                checkArgument(head.equals(getEntity()), "first queued node was "+head+" but we are "+getEntity());
+                synchronized (queuedStart) {
+                    queuedStart.notifyAll();
+                }
+            }
+        }
+    }
+
+    /** returns cassandra-related ancestors (datacenter, fabric), with datacenter first and fabric last */
+    protected List<Entity> getCassandraAncestors() {
+        List<Entity> result = new ArrayList<Entity>();
+        Entity ancestor = getEntity().getParent();
+        while (ancestor!=null) {
+            if (ancestor instanceof CassandraDatacenter || ancestor instanceof CassandraFabric)
+                result.add(ancestor);
+            ancestor = ancestor.getParent();
+        }
+        return result;
+    }
+
+    protected String launchEssentialCommand() {
+        if (isV2()) {
+            return String.format("./bin/cassandra -p %s > ./cassandra-console.log 2>&1", getPidFile());
+        } else {
+            // TODO Could probably get rid of the nohup here, as script does equivalent itself
+            // with `exec ... <&- &`
+            return String.format("nohup ./bin/cassandra -p %s > ./cassandra-console.log 2>&1 &", getPidFile());
+        }
+    }
+
+    public String getPidFile() { return Os.mergePathsUnix(getRunDir(), "cassandra.pid"); }
+
+    @Override
+    public boolean isRunning() {
+        return newScript(MutableMap.of(USE_PID_FILE, getPidFile()), CHECK_RUNNING).execute() == 0;
+    }
+
+    @Override
+    public void stop() {
+        newScript(MutableMap.of(USE_PID_FILE, getPidFile()), STOPPING).execute();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    protected Map<String,String> getCustomJavaSystemProperties() {
+        return MutableMap.<String, String>builder()
+                .putAll(super.getCustomJavaSystemProperties())
+                .put("cassandra.config", getCassandraConfigFileName())
+                .build();
+    }
+
+    @Override
+    public Map<String, String> getShellEnvironment() {
+        return MutableMap.<String, String>builder()
+                .putAll(super.getShellEnvironment())
+                .put("CASSANDRA_HOME", getRunDir())
+                .put("CASSANDRA_CONF", Os.mergePathsUnix(getRunDir(), "conf"))
+                .renameKey("JAVA_OPTS", "JVM_OPTS")
+                .build();
+    }
+
+    @Override
+    public ProcessTaskWrapper<Integer> executeScriptAsync(String commands) {
+        String fileToRun = Os.mergePathsUnix("brooklyn_commands", "cassandra-commands-"+Identifiers.makeRandomId(8));
+        TaskWrapper<Void> task = SshEffectorTasks.put(Os.mergePathsUnix(getRunDir(), fileToRun))
+                .machine(getMachine())
+                .contents(commands)
+                .summary("copying cassandra script to execute "+fileToRun)
+                .newTask();
+        DynamicTasks.queueIfPossible(task).orSubmitAndBlock(getEntity()).andWaitForSuccess();
+        return executeScriptFromInstalledFileAsync(fileToRun);
+    }
+
+    public ProcessTaskWrapper<Integer> executeScriptFromInstalledFileAsync(String fileToRun) {
+        ProcessTaskWrapper<Integer> task = SshEffectorTasks.ssh(
+                        "cd "+getRunDir(),
+                        scriptInvocationCommand(getThriftPort(), fileToRun))
+                .machine(getMachine())
+                .summary("executing cassandra script "+fileToRun)
+                .newTask();
+        DynamicTasks.queueIfPossible(task).orSubmitAndBlock(getEntity());
+        return task;
+    }
+
+    protected String scriptInvocationCommand(Integer optionalThriftPort, String fileToRun) {
+        return "bin/cassandra-cli " +
+                (optionalThriftPort != null ? "--port " + optionalThriftPort : "") +
+                " --file "+fileToRun;
+    }
+
+    @Override
+    public String getResolvedAddress(String hostname) {
+        return resolvedAddressCache.or(BrooklynAccessUtils.resolvedAddressSupplier(getEntity(), getMachine(), hostname));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerator.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerator.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerator.java
new file mode 100644
index 0000000..6401c03
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerator.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.math.BigInteger;
+import java.util.Set;
+
+public interface TokenGenerator {
+
+    BigInteger max();
+    BigInteger min();
+    BigInteger range();
+
+    void setOrigin(BigInteger shift);
+    
+    BigInteger newToken();
+    
+    BigInteger getTokenForReplacementNode(BigInteger oldToken);
+    
+    Set<BigInteger> getTokensForReplacementNode(Set<BigInteger> oldTokens);
+    
+    /**
+     * Indicates that we are starting a new cluster of the given number of nodes,
+     * so expect that number of consecutive calls to {@link #newToken()}.
+     * 
+     * @param numNewNodes
+     */
+    void growingCluster(int numNewNodes);
+
+    void shrinkingCluster(Set<BigInteger> nodesToRemove);
+    
+    void refresh(Set<BigInteger> currentNodes);
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerators.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerators.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerators.java
new file mode 100644
index 0000000..b1362d2
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/TokenGenerators.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.io.Serializable;
+import java.math.BigInteger;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import brooklyn.util.collections.MutableList;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+public class TokenGenerators {
+
+    /**
+     * Sub-classes are recommended to call {@link #checkRangeValid()} at construction time.
+     */
+    public static abstract class AbstractTokenGenerator implements TokenGenerator, Serializable {
+        
+        private static final long serialVersionUID = -1884526356161711176L;
+        
+        public static final BigInteger TWO = BigInteger.valueOf(2);
+        
+        public abstract BigInteger max();
+        public abstract BigInteger min();
+        public abstract BigInteger range();
+
+        private final Set<BigInteger> currentTokens = Sets.newTreeSet();
+        private final List<BigInteger> nextTokens = Lists.newArrayList();
+        private BigInteger origin = BigInteger.ZERO;
+        
+        protected void checkRangeValid() {
+            Preconditions.checkState(range().equals(max().subtract(min()).add(BigInteger.ONE)), 
+                    "min=%s; max=%s; range=%s", min(), max(), range());
+        }
+        
+        @Override
+        public void setOrigin(BigInteger shift) {
+            this.origin = Preconditions.checkNotNull(shift, "shift");
+        }
+        
+        /**
+         * Unless we're explicitly starting a new cluster or resizing by a pre-defined number of nodes, then
+         * let Cassandra decide (i.e. return null).
+         */
+        @Override
+        public synchronized BigInteger newToken() {
+            BigInteger result = (nextTokens.isEmpty()) ? null : nextTokens.remove(0);
+            if (result != null) currentTokens.add(result);
+            return result;
+        }
+
+        @Override
+        public synchronized BigInteger getTokenForReplacementNode(BigInteger oldToken) {
+            checkNotNull(oldToken, "oldToken");
+            return normalize(oldToken.subtract(BigInteger.ONE));
+        }
+
+        @Override
+        public synchronized Set<BigInteger> getTokensForReplacementNode(Set<BigInteger> oldTokens) {
+            checkNotNull(oldTokens, "oldToken");
+            Set<BigInteger> result = Sets.newLinkedHashSet();
+            for (BigInteger oldToken : oldTokens) {
+                result.add(getTokenForReplacementNode(oldToken));
+            }
+            return result;
+        }
+        
+        @Override
+        public synchronized void growingCluster(int numNewNodes) {
+            if (currentTokens.isEmpty() && nextTokens.isEmpty()) {
+                nextTokens.addAll(generateEquidistantTokens(numNewNodes));
+            } else {
+                // simple strategy which iteratively finds best midpoint
+                for (int i=0; i<numNewNodes; i++) {
+                    nextTokens.add(generateBestNextToken());
+                }
+            }
+        }
+
+        @Override
+        public synchronized void shrinkingCluster(Set<BigInteger> nodesToRemove) {
+            currentTokens.remove(nodesToRemove);
+        }
+
+        @Override
+        public synchronized void refresh(Set<BigInteger> currentNodes) {
+            currentTokens.clear();
+            currentTokens.addAll(currentNodes);
+        }
+
+        private List<BigInteger> generateEquidistantTokens(int numTokens) {
+            List<BigInteger> result = Lists.newArrayList();
+            for (int i = 0; i < numTokens; i++) {
+                BigInteger token = range().multiply(BigInteger.valueOf(i)).divide(BigInteger.valueOf(numTokens)).add(min());
+                token = normalize(token.add(origin));
+                result.add(token);
+            }
+            return result;
+        }
+        
+        private BigInteger normalize(BigInteger input) {
+            while (input.compareTo(min()) < 0)
+                input = input.add(range());
+            while (input.compareTo(max()) > 0)
+                input = input.subtract(range());
+            return input;
+        }
+        
+        private BigInteger generateBestNextToken() {
+            List<BigInteger> allTokens = MutableList.<BigInteger>of().appendAll(currentTokens).appendAll(nextTokens);
+            Collections.sort(allTokens);
+            Iterator<BigInteger> ti = allTokens.iterator();
+            
+            BigInteger thisValue = ti.next();
+            BigInteger prevValue = allTokens.get(allTokens.size()-1).subtract(range());
+            
+            BigInteger bestNewTokenSoFar = normalize(prevValue.add(thisValue).divide(TWO));
+            BigInteger biggestRangeSizeSoFar = thisValue.subtract(prevValue);
+            
+            while (ti.hasNext()) {
+                prevValue = thisValue;
+                thisValue = ti.next();
+                
+                BigInteger rangeHere = thisValue.subtract(prevValue);
+                if (rangeHere.compareTo(biggestRangeSizeSoFar) > 0) {
+                    bestNewTokenSoFar = prevValue.add(thisValue).divide(TWO);
+                    biggestRangeSizeSoFar = rangeHere;
+                }
+            }
+            return bestNewTokenSoFar;
+        }
+
+    }
+
+    public static class PosNeg63TokenGenerator extends AbstractTokenGenerator {
+        private static final long serialVersionUID = 7327403957176106754L;
+        
+        public static final BigInteger MIN_TOKEN = TWO.pow(63).negate();
+        public static final BigInteger MAX_TOKEN = TWO.pow(63).subtract(BigInteger.ONE);
+        public static final BigInteger RANGE = TWO.pow(64);
+
+        public PosNeg63TokenGenerator() {
+            checkRangeValid();
+        }
+
+        @Override public BigInteger max() { return MAX_TOKEN; }
+        @Override public BigInteger min() { return MIN_TOKEN; }
+        @Override public BigInteger range() { return RANGE; }
+    }
+    
+    /** token generator used by cassandra pre v1.2 */
+    public static class NonNeg127TokenGenerator extends AbstractTokenGenerator {
+        private static final long serialVersionUID = 1357426905711548198L;
+        
+        public static final BigInteger MIN_TOKEN = BigInteger.ZERO;
+        public static final BigInteger MAX_TOKEN = TWO.pow(127).subtract(BigInteger.ONE);
+        public static final BigInteger RANGE = TWO.pow(127);
+
+        public NonNeg127TokenGenerator() {
+            checkRangeValid();
+        }
+        
+        @Override public BigInteger max() { return MAX_TOKEN; }
+        @Override public BigInteger min() { return MIN_TOKEN; }
+        @Override public BigInteger range() { return RANGE; }
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java
new file mode 100644
index 0000000..b009485
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.time.Duration;
+
+import com.google.common.reflect.TypeToken;
+
+@Catalog(name="CouchBase Cluster", description="Couchbase is an open source, distributed (shared-nothing architecture) "
+        + "NoSQL document-oriented database that is optimized for interactive applications.")
+@ImplementedBy(CouchbaseClusterImpl.class)
+public interface CouchbaseCluster extends DynamicCluster {
+
+    AttributeSensor<Integer> ACTUAL_CLUSTER_SIZE = Sensors.newIntegerSensor("coucbase.cluster.actualClusterSize", "returns the actual number of nodes in the cluster");
+
+    @SuppressWarnings("serial")
+    AttributeSensor<Set<Entity>> COUCHBASE_CLUSTER_UP_NODES = Sensors.newSensor(new TypeToken<Set<Entity>>() {
+    }, "couchbase.cluster.clusterEntities", "the set of service up nodes");
+
+    @SuppressWarnings("serial")
+    AttributeSensor<List<String>> COUCHBASE_CLUSTER_BUCKETS = Sensors.newSensor(new TypeToken<List<String>>() {
+    }, "couchbase.cluster.buckets", "Names of all the buckets the couchbase cluster");
+
+    AttributeSensor<Entity> COUCHBASE_PRIMARY_NODE = Sensors.newSensor(Entity.class, "couchbase.cluster.primaryNode", "The primary couchbase node to query and issue add-server and rebalance on");
+
+    AttributeSensor<Boolean> IS_CLUSTER_INITIALIZED = Sensors.newBooleanSensor("couchbase.cluster.isClusterInitialized", "flag to emit if the couchbase cluster was intialized");
+
+    @SetFromFlag("clusterName")
+    ConfigKey<String> CLUSTER_NAME = ConfigKeys.newStringConfigKey("couchbase.cluster.name", "Optional name for this cluster");
+
+    @SetFromFlag("intialQuorumSize")
+    ConfigKey<Integer> INITIAL_QUORUM_SIZE = ConfigKeys.newIntegerConfigKey("couchbase.cluster.intialQuorumSize", "Initial cluster quorum size - number of initial nodes that must have been successfully started to report success (if < 0, then use value of INITIAL_SIZE)",
+            -1);
+
+    @SetFromFlag("delayBeforeAdvertisingCluster")
+    ConfigKey<Duration> DELAY_BEFORE_ADVERTISING_CLUSTER = ConfigKeys.newConfigKey(Duration.class, "couchbase.cluster.delayBeforeAdvertisingCluster", "Delay after cluster is started before checking and advertising its availability", Duration.TEN_SECONDS);
+
+    // TODO not sure if this is needed; previously waited 3m (SERVICE_UP_TIME_OUT) but that seems absurdly long
+    @SetFromFlag("postStartStabilizationDelay")
+    ConfigKey<Duration> NODES_STARTED_STABILIZATION_DELAY = ConfigKeys.newConfigKey(Duration.class, "couchbase.cluster.postStartStabilizationDelay", "Delay after nodes have been started before treating it as a cluster", Duration.TEN_SECONDS);
+    
+    @SetFromFlag("adminUsername")
+    ConfigKey<String> COUCHBASE_ADMIN_USERNAME = CouchbaseNode.COUCHBASE_ADMIN_USERNAME;
+
+    @SetFromFlag("adminPassword")
+    ConfigKey<String> COUCHBASE_ADMIN_PASSWORD = CouchbaseNode.COUCHBASE_ADMIN_PASSWORD;
+
+    @SuppressWarnings("serial")
+    AttributeSensor<List<String>> COUCHBASE_CLUSTER_UP_NODE_ADDRESSES = Sensors.newSensor(new TypeToken<List<String>>() {},
+            "couchbase.cluster.node.addresses", "List of host:port of all active nodes in the cluster (http admin port, and public hostname/IP)");
+    AttributeSensor<String> COUCHBASE_CLUSTER_CONNECTION_URL = Sensors.newStringSensor(
+            "couchbase.cluster.connection.url", "Couchbase-style URL to connect to the cluster (e.g. http://127.0.0.1:8091/ or couchbase://10.0.0.1,10.0.0.2/)");
+    
+    // Interesting stats
+    AttributeSensor<Double> OPS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.ops", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/ops");
+    AttributeSensor<Double> EP_BG_FETCHED_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.ep.bg.fetched", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/ep_bg_fetched");
+    AttributeSensor<Double> CURR_ITEMS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.curr.items", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/curr_items");
+    AttributeSensor<Double> VB_REPLICA_CURR_ITEMS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.vb.replica.curr.items", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/vb_replica_curr_items");
+    AttributeSensor<Double> GET_HITS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.get.hits", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/get_hits");
+    AttributeSensor<Double> CMD_GET_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.cmd.get", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/cmd_get");
+    AttributeSensor<Double> CURR_ITEMS_TOT_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.curr.items.tot", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/curr_items_tot");
+    // Although these are Double (after aggregation), they need to be coerced to Long for ByteSizeStrings rendering
+    AttributeSensor<Long> COUCH_DOCS_DATA_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.docs.data.size", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_docs_data_size");
+    AttributeSensor<Long> MEM_USED_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.mem.used", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/mem_used");
+    AttributeSensor<Long> COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.views.actual.disk.size", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_views_actual_disk_size");
+    AttributeSensor<Long> COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.docs.actual.disk.size", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_docs_actual_disk_size");
+    AttributeSensor<Long> COUCH_VIEWS_DATA_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.views.data.size", 
+            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_views_data_size");
+    
+    AttributeSensor<Boolean> BUCKET_CREATION_IN_PROGRESS = Sensors.newBooleanSensor("couchbase.cluster.bucketCreationInProgress", "Indicates that a bucket is currently being created, and" +
+            "further bucket creation should be deferred");
+
+    /**
+     * createBuckets is a list of all the buckets to be created on the couchbase cluster
+     * the buckets will be created on the primary node of the cluster
+     * each map entry for a bucket should contain the following parameters:
+     * - <"bucket",(String) name of the bucket (default: default)>
+     * - <"bucket-type",(String) name of bucket type (default: couchbase)>
+     * - <"bucket-port",(Integer) the bucket port to connect to (default: 11222)>
+     * - <"bucket-ramsize",(Integer) ram size allowed for bucket (default: 200)>
+     * - <"bucket-replica",(Integer) number of replicas for the bucket (default: 1)>
+     */
+    @SuppressWarnings("serial")
+    @SetFromFlag("createBuckets")
+    ConfigKey<List<Map<String, Object>>> CREATE_BUCKETS = ConfigKeys.newConfigKey(new TypeToken<List<Map<String, Object>>>() {}, 
+            "couchbase.cluster.createBuckets", "a list of all dedicated port buckets to be created on the couchbase cluster");
+    
+    @SuppressWarnings("serial")
+    @SetFromFlag("replication")
+    ConfigKey<List<Map<String,Object>>> REPLICATION = ConfigKeys.newConfigKey(new TypeToken<List<Map<String,Object>>>() {}, 
+            "couchbase.cluster.replicationConfiguration", "List of replication rules to configure, each rule including target (id of another cluster) and mode (unidirectional or bidirectional)");
+
+    int getQuorumSize();
+}


[09/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/app.config
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/app.config b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/app.config
deleted file mode 100644
index 7ee8a37..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/app.config
+++ /dev/null
@@ -1,353 +0,0 @@
-%% Brooklyn note: file from 1.4.8 Mac install, with erlang section added, and ports templated
-
-%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
-%% ex: ft=erlang ts=4 sw=4 et
-[
- %% Riak Client APIs config
- {riak_api, [
-            %% pb_backlog is the maximum length to which the queue of pending
-            %% connections may grow. If set, it must be an integer >= 0.
-            %% By default the value is 5. If you anticipate a huge number of
-            %% connections being initialised *simultaneously*, set this number
-            %% higher.
-            %% {pb_backlog, 64},
-             
-            %% pb is a list of IP addresses and TCP ports that the Riak 
-            %% Protocol Buffers interface will bind.
-            {pb, [ {"0.0.0.0", ${entity.riakPbPort?c} } ]}
-            ]},
-
- %% Riak Core config
- {riak_core, [
-              %% Default location of ringstate
-              {ring_state_dir, "./data/ring"},
-
-              %% Default ring creation size.  Make sure it is a power of 2,
-              %% e.g. 16, 32, 64, 128, 256, 512 etc
-              %{ring_creation_size, 64},
-
-              %% http is a list of IP addresses and TCP ports that the Riak
-              %% HTTP interface will bind.
-              {http, [ {"0.0.0.0", ${entity.riakWebPort?c} } ]},
-
-              %% https is a list of IP addresses and TCP ports that the Riak
-              %% HTTPS interface will bind.
-              %{https, [{ "0.0.0.0", ${entity.riakWebPort?c} }]},
-
-              %% Default cert and key locations for https can be overridden
-              %% with the ssl config variable, for example:
-              %{ssl, [
-              %       {certfile, "./etc/cert.pem"},
-              %       {keyfile, "./etc/key.pem"}
-              %      ]},
-
-              %% riak_handoff_port is the TCP port that Riak uses for
-              %% intra-cluster data handoff.
-              {handoff_port, ${entity.handoffListenerPort?c} },
-
-              %% To encrypt riak_core intra-cluster data handoff traffic,
-              %% uncomment the following line and edit its path to an
-              %% appropriate certfile and keyfile.  (This example uses a
-              %% single file with both items concatenated together.)
-              %{handoff_ssl_options, [{certfile, "/tmp/erlserver.pem"}]},
-
-              %% DTrace support
-              %% Do not enable 'dtrace_support' unless your Erlang/OTP
-              %% runtime is compiled to support DTrace.  DTrace is
-              %% available in R15B01 (supported by the Erlang/OTP
-              %% official source package) and in R14B04 via a custom
-              %% source repository & branch.
-              {dtrace_support, false},
-
-              %% Health Checks
-              %% If disabled, health checks registered by an application will
-              %% be ignored. NOTE: this option cannot be changed at runtime.
-              %% To re-enable, the setting must be changed and the node restarted.
-              %% NOTE: As of Riak 1.3.2, health checks are deprecated as they
-              %% may interfere with the new overload protection mechanisms.
-              %% If there is a good reason to re-enable them, you must uncomment
-              %% this line and also add an entry in the riak_kv section:
-              %%          {riak_kv, [ ..., {enable_health_checks, true}, ...]}
-              %% {enable_health_checks, true},
-
-              %% Platform-specific installation paths (substituted by rebar)
-              {platform_bin_dir, "./bin"},
-              {platform_data_dir, "./data"},
-              {platform_etc_dir, "./etc"},
-              {platform_lib_dir, "./lib"},
-              {platform_log_dir, "./log"}
-             ]},
-
- %% Riak KV config
- {riak_kv, [
-            %% Storage_backend specifies the Erlang module defining the storage
-            %% mechanism that will be used on this node.
-            {storage_backend, riak_kv_bitcask_backend},
-
-            %% raw_name is the first part of all URLS used by the Riak raw HTTP
-            %% interface.  See riak_web.erl and raw_http_resource.erl for
-            %% details.
-            %{raw_name, "riak"},
-
-            %% Enable active anti-entropy subsystem + optional debug messages:
-            %%   {anti_entropy, {on|off, []}},
-            %%   {anti_entropy, {on|off, [debug]}},
-            {anti_entropy, {on, []}},
-
-            %% Restrict how fast AAE can build hash trees. Building the tree
-            %% for a given partition requires a full scan over that partition's
-            %% data. Once built, trees stay built until they are expired.
-            %% Config is of the form:
-            %%   {num-builds, per-timespan-in-milliseconds}
-            %% Default is 1 build per hour.
-            {anti_entropy_build_limit, {1, 3600000}},
-
-            %% Determine how often hash trees are expired after being built.
-            %% Periodically expiring a hash tree ensures the on-disk hash tree
-            %% data stays consistent with the actual k/v backend data. It also
-            %% helps Riak identify silent disk failures and bit rot. However,
-            %% expiration is not needed for normal AAE operation and should be
-            %% infrequent for performance reasons. The time is specified in
-            %% milliseconds. The default is 1 week.
-            {anti_entropy_expire, 604800000},
-
-            %% Limit how many AAE exchanges/builds can happen concurrently.
-            {anti_entropy_concurrency, 2},
-
-            %% The tick determines how often the AAE manager looks for work
-            %% to do (building/expiring trees, triggering exchanges, etc).
-            %% The default is every 15 seconds. Lowering this value will
-            %% speedup the rate that all replicas are synced across the cluster.
-            %% Increasing the value is not recommended.
-            {anti_entropy_tick, 15000},
-
-            %% The directory where AAE hash trees are stored.
-            {anti_entropy_data_dir, "./data/anti_entropy"},
-
-            %% The LevelDB options used by AAE to generate the LevelDB-backed
-            %% on-disk hashtrees.
-            {anti_entropy_leveldb_opts, [{write_buffer_size, 4194304},
-                                         {max_open_files, 20}]},
-
-            %% mapred_name is URL used to submit map/reduce requests to Riak.
-            {mapred_name, "mapred"},
-
-            %% mapred_2i_pipe indicates whether secondary-index
-            %% MapReduce inputs are queued in parallel via their own
-            %% pipe ('true'), or serially via a helper process
-            %% ('false' or undefined).  Set to 'false' or leave
-            %% undefined during a rolling upgrade from 1.0.
-            {mapred_2i_pipe, true},
-
-            %% Each of the following entries control how many Javascript
-            %% virtual machines are available for executing map, reduce,
-            %% pre- and post-commit hook functions.
-            {map_js_vm_count, 8 },
-            {reduce_js_vm_count, 6 },
-            {hook_js_vm_count, 2 },
-
-            %% js_max_vm_mem is the maximum amount of memory, in megabytes,
-            %% allocated to the Javascript VMs. If unset, the default is
-            %% 8MB.
-            {js_max_vm_mem, 8},
-
-            %% js_thread_stack is the maximum amount of thread stack, in megabyes,
-            %% allocate to the Javascript VMs. If unset, the default is 16MB.
-            %% NOTE: This is not the same as the C thread stack.
-            {js_thread_stack, 16},
-
-            %% js_source_dir should point to a directory containing Javascript
-            %% source files which will be loaded by Riak when it initializes
-            %% Javascript VMs.
-            %{js_source_dir, "/tmp/js_source"},
-
-            %% http_url_encoding determines how Riak treats URL encoded
-            %% buckets, keys, and links over the REST API. When set to 'on'
-            %% Riak always decodes encoded values sent as URLs and Headers.
-            %% Otherwise, Riak defaults to compatibility mode where links
-            %% are decoded, but buckets and keys are not. The compatibility
-            %% mode will be removed in a future release.
-            {http_url_encoding, on},
-
-            %% Switch to vnode-based vclocks rather than client ids.  This
-            %% significantly reduces the number of vclock entries.
-            %% Only set true if *all* nodes in the cluster are upgraded to 1.0
-            {vnode_vclocks, true},
-
-            %% This option toggles compatibility of keylisting with 1.0
-            %% and earlier versions.  Once a rolling upgrade to a version
-            %% > 1.0 is completed for a cluster, this should be set to
-            %% true for better control of memory usage during key listing
-            %% operations
-            {listkeys_backpressure, true},
-
-            %% This option specifies how many of each type of fsm may exist
-            %% concurrently.  This is for overload protection and is a new
-            %% mechanism that obsoletes 1.3's health checks. Note that this number
-            %% represents two potential processes, so +P in vm.args should be at 
-            %% least 3X the fsm_limit.
-            {fsm_limit, 50000},
-
-            %% Uncomment to make non-paginated results be sorted the
-            %% same way paginated results are: by term, then key.
-            %% In Riak 1.4.* before 1.4.4, all results were sorted this way
-            %% by default, which can adversely affect performance in some cases.
-            %% Setting this to true emulates that behavior.
-            %% {secondary_index_sort_default, true},
-
-            %% object_format controls which binary representation of a riak_object 
-            %% is stored on disk.
-            %% Current options are: v0, v1.
-            %% v0: Original erlang:term_to_binary format. Higher space overhead.
-            %% v1: New format for more compact storage of small values.
-            {object_format, v1}
-           ]},
-
- %% Riak Search Config
- {riak_search, [
-                %% To enable Search functionality set this 'true'.
-                {enabled, false}
-               ]},
-
- %% Merge Index Config
- {merge_index, [
-                %% The root dir to store search merge_index data
-                {data_root, "./data/merge_index"},
-
-                %% Size, in bytes, of the in-memory buffer.  When this
-                %% threshold has been reached the data is transformed
-                %% into a segment file which resides on disk.
-                {buffer_rollover_size, 1048576},
-
-                %% Overtime the segment files need to be compacted.
-                %% This is the maximum number of segments that will be
-                %% compacted at once.  A lower value will lead to
-                %% quicker but more frequent compactions.
-                {max_compact_segments, 20}
-               ]},
-
- %% Bitcask Config
- {bitcask, [
-             %% Configure how Bitcask writes data to disk.
-             %%   erlang: Erlang's built-in file API
-             %%      nif: Direct calls to the POSIX C API
-             %%
-             %% The NIF mode provides higher throughput for certain
-             %% workloads, but has the potential to negatively impact
-             %% the Erlang VM, leading to higher worst-case latencies
-             %% and possible throughput collapse.
-             {io_mode, erlang},
-
-             {data_root, "./data/bitcask"}
-           ]},
-
- %% eLevelDB Config
- {eleveldb, [
-             {data_root, "./data/leveldb"}
-            ]},
-
- %% Lager Config
- {lager, [
-            %% What handlers to install with what arguments
-            %% The defaults for the logfiles are to rotate the files when
-            %% they reach 10Mb or at midnight, whichever comes first, and keep
-            %% the last 5 rotations. See the lager README for a description of
-            %% the time rotation format:
-            %% https://github.com/basho/lager/blob/master/README.org
-            %%
-            %% If you wish to disable rotation, you can either set the size to 0
-            %% and the rotation time to "", or instead specify a 2-tuple that only
-            %% consists of {Logfile, Level}.
-            %%
-            %% If you wish to have riak log messages to syslog, you can use a handler
-            %% like this:
-            %%   {lager_syslog_backend, ["riak", daemon, info]},
-            %%
-            {handlers, [ 
-                           {lager_file_backend, [ 
-                               {"./log/error.log", error, 10485760, "$D0", 5}, 
-                               {"./log/console.log", info, 10485760, "$D0", 5} 
-                           ]} 
-                       ] },
-
-            %% Whether to write a crash log, and where.
-            %% Commented/omitted/undefined means no crash logger.
-            {crash_log, "./log/crash.log"},
-
-            %% Maximum size in bytes of events in the crash log - defaults to 65536
-            {crash_log_msg_size, 65536},
-
-            %% Maximum size of the crash log in bytes, before its rotated, set
-            %% to 0 to disable rotation - default is 0
-            {crash_log_size, 10485760},
-
-            %% What time to rotate the crash log - default is no time
-            %% rotation. See the lager README for a description of this format:
-            %% https://github.com/basho/lager/blob/master/README.org
-            {crash_log_date, "$D0"},
-
-            %% Number of rotated crash logs to keep, 0 means keep only the
-            %% current one - default is 0
-            {crash_log_count, 5},
-
-            %% Whether to redirect error_logger messages into lager - defaults to true
-            {error_logger_redirect, true},
-
-            %% maximum number of error_logger messages to handle in a second
-            %% lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup
-            {error_logger_hwm, 100}
-        ]},
-
- %% riak_sysmon config
- {riak_sysmon, [
-         %% To disable forwarding events of a particular type, use a
-         %% limit of 0.
-         {process_limit, 30},
-         {port_limit, 2},
-
-         %% Finding reasonable limits for a given workload is a matter
-         %% of experimentation.
-         %% NOTE: Enabling the 'gc_ms_limit' monitor (by setting non-zero)
-         %%       can cause performance problems on multi-CPU systems.
-         {gc_ms_limit, 0},
-         {heap_word_limit, 40111000},
-
-         %% Configure the following items to 'false' to disable logging
-         %% of that event type.
-         {busy_port, true},
-         {busy_dist_port, true}
-        ]},
-
- %% SASL config
- {sasl, [
-         {sasl_error_logger, false}
-        ]},
-
- %% riak_control config
- {riak_control, [
-                %% Set to false to disable the admin panel.
-                {enabled, true},
-
-                %% Authentication style used for access to the admin
-                %% panel. Valid styles are 'userlist' <TODO>.
-                {auth, userlist},
-
-                %% If auth is set to 'userlist' then this is the
-                %% list of usernames and passwords for access to the
-                %% admin panel.
-                {userlist, [{"user", "pass"}
-                           ]},
-
-                %% The admin panel is broken up into multiple
-                %% components, each of which is enabled or disabled
-                %% by one of these settings.
-                {admin, true}
-                ]},
- 
- %% erlang, constrain port range so we can open the internal firewall ports               
- { kernel, [
-            {inet_dist_listen_min, ${entity.erlangPortRangeStart?c}},
-            {inet_dist_listen_max, ${entity.erlangPortRangeEnd?c}}
-          ]}
-
-].

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml
deleted file mode 100644
index 93ef146..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-name: Cluster Riak & Solr
-location:
-  jclouds:aws-ec2:us-east-1:
-    osFamily: centos
-    osVersionRegex: 6\..*
-services:
-- type: brooklyn.entity.nosql.riak.RiakCluster
-  initialSize: 2
-  memberSpec:
-    $brooklyn:entitySpec:
-      type: brooklyn.entity.nosql.riak.RiakNode
-      searchEnabled: true
-  brooklyn.config:
-    provisioning.properties:
-      minCores: 2
-      minRam: 6gb
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-mac.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-mac.conf b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-mac.conf
deleted file mode 100644
index d123000..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-mac.conf
+++ /dev/null
@@ -1,494 +0,0 @@
-## Brooklyn note: file from 2.0.1 Mac install, with erlang section added, and ports templated
-
-## Where to emit the default log messages (typically at 'info'
-## severity):
-## off: disabled
-## file: the file specified by log.console.file
-## console: to standard output (seen when using `riak attach-direct`)
-## both: log.console.file and standard out.
-##
-## Default: file
-##
-## Acceptable values:
-##   - one of: off, file, console, both
-log.console = file
-
-## The severity level of the console log, default is 'info'.
-##
-## Default: info
-##
-## Acceptable values:
-##   - one of: debug, info, notice, warning, error, critical, alert, emergency, none
-log.console.level = info
-
-## When 'log.console' is set to 'file' or 'both', the file where
-## console messages will be logged.
-##
-## Default: $(platform_log_dir)/console.log
-##
-## Acceptable values:
-##   - the path to a file
-log.console.file = $(platform_log_dir)/console.log
-
-## The file where error messages will be logged.
-##
-## Default: $(platform_log_dir)/error.log
-##
-## Acceptable values:
-##   - the path to a file
-log.error.file = $(platform_log_dir)/error.log
-
-## When set to 'on', enables log output to syslog.
-##
-## Default: off
-##
-## Acceptable values:
-##   - on or off
-log.syslog = off
-
-## Whether to enable the crash log.
-##
-## Default: on
-##
-## Acceptable values:
-##   - on or off
-log.crash = on
-
-## If the crash log is enabled, the file where its messages will
-## be written.
-##
-## Default: $(platform_log_dir)/crash.log
-##
-## Acceptable values:
-##   - the path to a file
-log.crash.file = $(platform_log_dir)/crash.log
-
-## Maximum size in bytes of individual messages in the crash log
-##
-## Default: 64KB
-##
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-log.crash.maximum_message_size = 64KB
-
-## Maximum size of the crash log in bytes, before it is rotated
-##
-## Default: 10MB
-##
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-log.crash.size = 10MB
-
-## The schedule on which to rotate the crash log.  For more
-## information see:
-## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation
-##
-## Default: $D0
-##
-## Acceptable values:
-##   - text
-log.crash.rotation = $D0
-
-## The number of rotated crash logs to keep. When set to
-## 'current', only the current open log file is kept.
-##
-## Default: 5
-##
-## Acceptable values:
-##   - an integer
-##   - the text "current"
-log.crash.rotation.keep = 5
-
-## Name of the Erlang node
-##
-## Default: riak@127.0.0.1
-##
-## Acceptable values:
-##   - text
-nodename = riak@${driver.hostname}
-
-## Cookie for distributed node communication.  All nodes in the
-## same cluster should use the same cookie or they will not be able to
-## communicate.
-##
-## Default: riak
-##
-## Acceptable values:
-##   - text
-distributed_cookie = riak
-
-## Sets the number of threads in async thread pool, valid range
-## is 0-1024. If thread support is available, the default is 64.
-## More information at: http://erlang.org/doc/man/erl.html
-##
-## Default: 64
-##
-## Acceptable values:
-##   - an integer
-erlang.async_threads = 64
-
-## The number of concurrent ports/sockets
-## Valid range is 1024-134217727
-##
-## Default: 65536
-##
-## Acceptable values:
-##   - an integer
-erlang.max_ports = 65536
-
-## Set scheduler forced wakeup interval. All run queues will be
-## scanned each Interval milliseconds. While there are sleeping
-## schedulers in the system, one scheduler will be woken for each
-## non-empty run queue found. An Interval of zero disables this
-## feature, which also is the default.
-## This feature is a workaround for lengthy executing native code, and
-## native code that do not bump reductions properly.
-## More information: http://www.erlang.org/doc/man/erl.html#+sfwi
-##
-## Acceptable values:
-##   - an integer
-## erlang.schedulers.force_wakeup_interval = 500
-
-## Enable or disable scheduler compaction of load. By default
-## scheduler compaction of load is enabled. When enabled, load
-## balancing will strive for a load distribution which causes as many
-## scheduler threads as possible to be fully loaded (i.e., not run out
-## of work). This is accomplished by migrating load (e.g. runnable
-## processes) into a smaller set of schedulers when schedulers
-## frequently run out of work. When disabled, the frequency with which
-## schedulers run out of work will not be taken into account by the
-## load balancing logic.
-## More information: http://www.erlang.org/doc/man/erl.html#+scl
-##
-## Acceptable values:
-##   - one of: true, false
-## erlang.schedulers.compaction_of_load = false
-
-## Enable or disable scheduler utilization balancing of load. By
-## default scheduler utilization balancing is disabled and instead
-## scheduler compaction of load is enabled which will strive for a
-## load distribution which causes as many scheduler threads as
-## possible to be fully loaded (i.e., not run out of work). When
-## scheduler utilization balancing is enabled the system will instead
-## try to balance scheduler utilization between schedulers. That is,
-## strive for equal scheduler utilization on all schedulers.
-## More information: http://www.erlang.org/doc/man/erl.html#+sub
-##
-## Acceptable values:
-##   - one of: true, false
-## erlang.schedulers.utilization_balancing = true
-
-## Number of partitions in the cluster (only valid when first
-## creating the cluster). Must be a power of 2, minimum 8 and maximum
-## 1024.
-##
-## Default: 64
-##
-## Acceptable values:
-##   - an integer
-## ring_size = 64
-
-## Number of concurrent node-to-node transfers allowed.
-##
-## Default: 2
-##
-## Acceptable values:
-##   - an integer
-## transfer_limit = 2
-
-## Default cert location for https can be overridden
-## with the ssl config variable, for example:
-##
-## Acceptable values:
-##   - the path to a file
-## ssl.certfile = $(platform_etc_dir)/cert.pem
-
-## Default key location for https can be overridden with the ssl
-## config variable, for example:
-##
-## Acceptable values:
-##   - the path to a file
-## ssl.keyfile = $(platform_etc_dir)/key.pem
-
-## Default signing authority location for https can be overridden
-## with the ssl config variable, for example:
-##
-## Acceptable values:
-##   - the path to a file
-## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem
-
-## DTrace support Do not enable 'dtrace' unless your Erlang/OTP
-## runtime is compiled to support DTrace.  DTrace is available in
-## R15B01 (supported by the Erlang/OTP official source package) and in
-## R14B04 via a custom source repository & branch.
-##
-## Default: off
-##
-## Acceptable values:
-##   - on or off
-dtrace = off
-
-## Platform-specific installation paths (substituted by rebar)
-##
-## Default: ./bin
-##
-## Acceptable values:
-##   - the path to a directory
-platform_bin_dir = ./bin
-
-##
-## Default: ./data
-##
-## Acceptable values:
-##   - the path to a directory
-platform_data_dir = ./data
-
-##
-## Default: ./etc
-##
-## Acceptable values:
-##   - the path to a directory
-platform_etc_dir = ./etc
-
-##
-## Default: ./lib
-##
-## Acceptable values:
-##   - the path to a directory
-platform_lib_dir = ./lib
-
-##
-## Default: ./log
-##
-## Acceptable values:
-##   - the path to a directory
-platform_log_dir = ./log
-
-## Enable consensus subsystem. Set to 'on' to enable the
-## consensus subsystem used for strongly consistent Riak operations.
-##
-## Default: off
-##
-## Acceptable values:
-##   - on or off
-## strong_consistency = on
-
-## listener.http.<name> is an IP address and TCP port that the Riak
-## HTTP interface will bind.
-##
-## Default: 127.0.0.1:8098
-##
-## Acceptable values:
-##   - an IP/port pair, e.g. 127.0.0.1:10011
-listener.http.internal = 0.0.0.0:${entity.riakWebPort?c}
-
-## listener.protobuf.<name> is an IP address and TCP port that the Riak
-## Protocol Buffers interface will bind.
-##
-## Default: 127.0.0.1:8087
-##
-## Acceptable values:
-##   - an IP/port pair, e.g. 127.0.0.1:10011
-listener.protobuf.internal = 0.0.0.0:${entity.riakPbPort?c}
-
-## The maximum length to which the queue of pending connections
-## may grow. If set, it must be an integer > 0. If you anticipate a
-## huge number of connections being initialized *simultaneously*, set
-## this number higher.
-##
-## Default: 128
-##
-## Acceptable values:
-##   - an integer
-## protobuf.backlog = 128
-
-## listener.https.<name> is an IP address and TCP port that the Riak
-## HTTPS interface will bind.
-##
-## Acceptable values:
-##   - an IP/port pair, e.g. 127.0.0.1:10011
-## listener.https.internal = 127.0.0.1:8098
-
-## How Riak will repair out-of-sync keys. Some features require
-## this to be set to 'active', including search.
-## * active: out-of-sync keys will be repaired in the background
-## * passive: out-of-sync keys are only repaired on read
-## * active-debug: like active, but outputs verbose debugging
-## information
-##
-## Default: active
-##
-## Acceptable values:
-##   - one of: active, passive, active-debug
-anti_entropy = active
-
-## Specifies the storage engine used for Riak's key-value data
-## and secondary indexes (if supported).
-##
-## Default: bitcask
-##
-## Acceptable values:
-##   - one of: bitcask, leveldb, memory, multi
-storage_backend = bitcask
-
-## Controls which binary representation of a riak value is stored
-## on disk.
-## * 0: Original erlang:term_to_binary format. Higher space overhead.
-## * 1: New format for more compact storage of small values.
-##
-## Default: 1
-##
-## Acceptable values:
-##   - the integer 1
-##   - the integer 0
-object.format = 1
-
-## Reading or writing objects bigger than this size will write a
-## warning in the logs.
-##
-## Default: 5MB
-##
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-object.size.warning_threshold = 5MB
-
-## Writing an object bigger than this will send a failure to the
-## client.
-##
-## Default: 50MB
-##
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-object.size.maximum = 50MB
-
-## Writing an object with more than this number of siblings will
-## generate a warning in the logs.
-##
-## Default: 25
-##
-## Acceptable values:
-##   - an integer
-object.siblings.warning_threshold = 25
-
-## Writing an object with more than this number of siblings will
-## send a failure to the client.
-##
-## Default: 100
-##
-## Acceptable values:
-##   - an integer
-object.siblings.maximum = 100
-
-## A path under which bitcask data files will be stored.
-##
-## Default: $(platform_data_dir)/bitcask
-##
-## Acceptable values:
-##   - the path to a directory
-bitcask.data_root = $(platform_data_dir)/bitcask
-
-## Configure how Bitcask writes data to disk.
-## erlang: Erlang's built-in file API
-## nif: Direct calls to the POSIX C API
-## The NIF mode provides higher throughput for certain
-## workloads, but has the potential to negatively impact
-## the Erlang VM, leading to higher worst-case latencies
-## and possible throughput collapse.
-##
-## Default: erlang
-##
-## Acceptable values:
-##   - one of: erlang, nif
-bitcask.io_mode = erlang
-
-## Set to 'off' to disable the admin panel.
-##
-## Default: off
-##
-## Acceptable values:
-##   - on or off
-riak_control = on
-
-## Authentication mode used for access to the admin panel.
-##
-## Default: off
-##
-## Acceptable values:
-##   - one of: off, userlist
-riak_control.auth.mode = off
-
-## If riak control's authentication mode (riak_control.auth.mode)
-## is set to 'userlist' then this is the list of usernames and
-## passwords for access to the admin panel.
-## To create users with given names, add entries of the format:
-## riak_control.auth.user.USERNAME.password = PASSWORD
-## replacing USERNAME with the desired username and PASSWORD with the
-## desired password for that user.
-##
-## Acceptable values:
-##   - text
-## riak_control.auth.user.admin.password = pass
-
-## This parameter defines the percentage of total server memory
-## to assign to LevelDB. LevelDB will dynamically adjust its internal
-## cache sizes to stay within this size.  The memory size can
-## alternately be assigned as a byte count via leveldb.maximum_memory
-## instead.
-##
-## Default: 70
-##
-## Acceptable values:
-##   - an integer
-leveldb.maximum_memory.percent = 70
-
-## To enable Search set this 'on'.
-##
-## Default: off
-##
-## Acceptable values:
-##   - on or off
-search = off
-
-## How long Riak will wait for Solr to start. The start sequence
-## will be tried twice. If both attempts timeout, then the Riak node
-## will be shutdown. This may need to be increased as more data is
-## indexed and Solr takes longer to start. Values lower than 1s will
-## be rounded up to the minimum 1s.
-##
-## Default: 30s
-##
-## Acceptable values:
-##   - a time duration with units, e.g. '10s' for 10 seconds
-search.solr.start_timeout = 30s
-
-## The port number which Solr binds to.
-## NOTE: Binds on every interface.
-##
-## Default: 8093
-##
-## Acceptable values:
-##   - an integer
-search.solr.port = ${entity.searchSolrPort?c}
-
-## The port number which Solr JMX binds to.
-## NOTE: Binds on every interface.
-##
-## Default: 8985
-##
-## Acceptable values:
-##   - an integer
-search.solr.jmx_port = ${entity.searchSolrJmxPort?c}
-
-## The options to pass to the Solr JVM.  Non-standard options,
-## i.e. -XX, may not be portable across JVM implementations.
-## E.g. -XX:+UseCompressedStrings
-##
-## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
-##
-## Acceptable values:
-##   - text
-search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
-
-## erlang, constrain port range so we can open the internal firewall ports
-erlang.distribution.port_range.minimum = ${entity.erlangPortRangeStart?c}
-erlang.distribution.port_range.maximum = ${entity.erlangPortRangeEnd?c}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml
deleted file mode 100644
index 6767cb7..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-name: Riak Cluster with Webapp Cluster
-location: jclouds:softlayer:sjc01
-services:
-- type: brooklyn.entity.nosql.riak.RiakCluster
-  initialSize: 2
-  id: cluster
-  brooklyn.config:
-    install.version: 2.0.0
-- type: brooklyn.entity.webapp.ControlledDynamicWebAppCluster
-  name: Web Cluster
-  brooklyn.config:
-    initialSize: 2
-    controlleddynamicwebappcluster.controllerSpec:
-      $brooklyn:entitySpec:
-        type: brooklyn.entity.proxy.nginx.NginxController
-        brooklyn.config:
-          member.sensor.hostname: "host.subnet.hostname"
-    wars.root: "https://s3-eu-west-1.amazonaws.com/brooklyn-clocker/brooklyn-example-hello-world-sql-webapp.war"
-    java.sysprops: 
-      brooklyn.example.riak.nodes: $brooklyn:component("cluster").attributeWhenReady("riak.cluster.nodeList")
-
-# Alternative URL for War file if available on classpath
-# "classpath://brooklyn-example-hello-world-sql-webapp.war"

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp.yaml b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp.yaml
deleted file mode 100644
index db2bef1..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak-with-webapp.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-name: Riak Cluster with Webapp
-location: aws-ec2:eu-west-1
-services:
-- type: brooklyn.entity.nosql.riak.RiakCluster
-  initialSize: 2
-  id: cluster
-- type: brooklyn.entity.webapp.jboss.JBoss7Server
-  name: Web
-  brooklyn.config:
-    wars.root: "https://s3-eu-west-1.amazonaws.com/brooklyn-clocker/brooklyn-example-hello-world-sql-webapp.war"
-    java.sysprops: 
-      brooklyn.example.riak.nodes: $brooklyn:component("cluster").attributeWhenReady("riak.cluster.nodeList")
-  provisioning.properties:
-    osFamily: centos
-
-# Alternative URL for War file if available on classpath
-# "classpath://brooklyn-example-hello-world-sql-webapp.war"

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.conf b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.conf
deleted file mode 100644
index 125fa77..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.conf
+++ /dev/null
@@ -1,494 +0,0 @@
-## Brooklyn note: file from 2.0.1 Ubuntu install, with erlang section added, and ports templated
-
-## Where to emit the default log messages (typically at 'info'
-## severity):
-## off: disabled
-## file: the file specified by log.console.file
-## console: to standard output (seen when using `riak attach-direct`)
-## both: log.console.file and standard out.
-## 
-## Default: file
-## 
-## Acceptable values:
-##   - one of: off, file, console, both
-log.console = file
-
-## The severity level of the console log, default is 'info'.
-## 
-## Default: info
-## 
-## Acceptable values:
-##   - one of: debug, info, notice, warning, error, critical, alert, emergency, none
-log.console.level = info
-
-## When 'log.console' is set to 'file' or 'both', the file where
-## console messages will be logged.
-## 
-## Default: $(platform_log_dir)/console.log
-## 
-## Acceptable values:
-##   - the path to a file
-log.console.file = $(platform_log_dir)/console.log
-
-## The file where error messages will be logged.
-## 
-## Default: $(platform_log_dir)/error.log
-## 
-## Acceptable values:
-##   - the path to a file
-log.error.file = $(platform_log_dir)/error.log
-
-## When set to 'on', enables log output to syslog.
-## 
-## Default: off
-## 
-## Acceptable values:
-##   - on or off
-log.syslog = off
-
-## Whether to enable the crash log.
-## 
-## Default: on
-## 
-## Acceptable values:
-##   - on or off
-log.crash = on
-
-## If the crash log is enabled, the file where its messages will
-## be written.
-## 
-## Default: $(platform_log_dir)/crash.log
-## 
-## Acceptable values:
-##   - the path to a file
-log.crash.file = $(platform_log_dir)/crash.log
-
-## Maximum size in bytes of individual messages in the crash log
-## 
-## Default: 64KB
-## 
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-log.crash.maximum_message_size = 64KB
-
-## Maximum size of the crash log in bytes, before it is rotated
-## 
-## Default: 10MB
-## 
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-log.crash.size = 10MB
-
-## The schedule on which to rotate the crash log.  For more
-## information see:
-## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation
-## 
-## Default: $D0
-## 
-## Acceptable values:
-##   - text
-log.crash.rotation = $D0
-
-## The number of rotated crash logs to keep. When set to
-## 'current', only the current open log file is kept.
-## 
-## Default: 5
-## 
-## Acceptable values:
-##   - an integer
-##   - the text "current"
-log.crash.rotation.keep = 5
-
-## Name of the Erlang node
-## 
-## Default: riak@127.0.0.1
-## 
-## Acceptable values:
-##   - text
-nodename = riak@${driver.subnetHostname}
-
-## Cookie for distributed node communication.  All nodes in the
-## same cluster should use the same cookie or they will not be able to
-## communicate.
-## 
-## Default: riak
-## 
-## Acceptable values:
-##   - text
-distributed_cookie = riak
-
-## Sets the number of threads in async thread pool, valid range
-## is 0-1024. If thread support is available, the default is 64.
-## More information at: http://erlang.org/doc/man/erl.html
-## 
-## Default: 64
-## 
-## Acceptable values:
-##   - an integer
-erlang.async_threads = 64
-
-## The number of concurrent ports/sockets
-## Valid range is 1024-134217727
-## 
-## Default: 65536
-## 
-## Acceptable values:
-##   - an integer
-erlang.max_ports = 65536
-
-## Set scheduler forced wakeup interval. All run queues will be
-## scanned each Interval milliseconds. While there are sleeping
-## schedulers in the system, one scheduler will be woken for each
-## non-empty run queue found. An Interval of zero disables this
-## feature, which also is the default.
-## This feature is a workaround for lengthy executing native code, and
-## native code that do not bump reductions properly.
-## More information: http://www.erlang.org/doc/man/erl.html#+sfwi
-## 
-## Acceptable values:
-##   - an integer
-## erlang.schedulers.force_wakeup_interval = 500
-
-## Enable or disable scheduler compaction of load. By default
-## scheduler compaction of load is enabled. When enabled, load
-## balancing will strive for a load distribution which causes as many
-## scheduler threads as possible to be fully loaded (i.e., not run out
-## of work). This is accomplished by migrating load (e.g. runnable
-## processes) into a smaller set of schedulers when schedulers
-## frequently run out of work. When disabled, the frequency with which
-## schedulers run out of work will not be taken into account by the
-## load balancing logic.
-## More information: http://www.erlang.org/doc/man/erl.html#+scl
-## 
-## Acceptable values:
-##   - one of: true, false
-## erlang.schedulers.compaction_of_load = false
-
-## Enable or disable scheduler utilization balancing of load. By
-## default scheduler utilization balancing is disabled and instead
-## scheduler compaction of load is enabled which will strive for a
-## load distribution which causes as many scheduler threads as
-## possible to be fully loaded (i.e., not run out of work). When
-## scheduler utilization balancing is enabled the system will instead
-## try to balance scheduler utilization between schedulers. That is,
-## strive for equal scheduler utilization on all schedulers.
-## More information: http://www.erlang.org/doc/man/erl.html#+sub
-## 
-## Acceptable values:
-##   - one of: true, false
-## erlang.schedulers.utilization_balancing = true
-
-## Number of partitions in the cluster (only valid when first
-## creating the cluster). Must be a power of 2, minimum 8 and maximum
-## 1024.
-## 
-## Default: 64
-## 
-## Acceptable values:
-##   - an integer
-## ring_size = 64
-
-## Number of concurrent node-to-node transfers allowed.
-## 
-## Default: 2
-## 
-## Acceptable values:
-##   - an integer
-## transfer_limit = 2
-
-## Default cert location for https can be overridden
-## with the ssl config variable, for example:
-## 
-## Acceptable values:
-##   - the path to a file
-## ssl.certfile = $(platform_etc_dir)/cert.pem
-
-## Default key location for https can be overridden with the ssl
-## config variable, for example:
-## 
-## Acceptable values:
-##   - the path to a file
-## ssl.keyfile = $(platform_etc_dir)/key.pem
-
-## Default signing authority location for https can be overridden
-## with the ssl config variable, for example:
-## 
-## Acceptable values:
-##   - the path to a file
-## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem
-
-## DTrace support Do not enable 'dtrace' unless your Erlang/OTP
-## runtime is compiled to support DTrace.  DTrace is available in
-## R15B01 (supported by the Erlang/OTP official source package) and in
-## R14B04 via a custom source repository & branch.
-## 
-## Default: off
-## 
-## Acceptable values:
-##   - on or off
-dtrace = off
-
-## Platform-specific installation paths (substituted by rebar)
-## 
-## Default: /usr/sbin
-## 
-## Acceptable values:
-##   - the path to a directory
-platform_bin_dir = /usr/sbin
-
-## 
-## Default: /var/lib/riak
-## 
-## Acceptable values:
-##   - the path to a directory
-platform_data_dir = /var/lib/riak
-
-## 
-## Default: /etc/riak
-## 
-## Acceptable values:
-##   - the path to a directory
-platform_etc_dir = /etc/riak
-
-## 
-## Default: /usr/lib64/riak/lib
-## 
-## Acceptable values:
-##   - the path to a directory
-platform_lib_dir = /usr/lib64/riak/lib
-
-## 
-## Default: /var/log/riak
-## 
-## Acceptable values:
-##   - the path to a directory
-platform_log_dir = /var/log/riak
-
-## Enable consensus subsystem. Set to 'on' to enable the
-## consensus subsystem used for strongly consistent Riak operations.
-## 
-## Default: off
-## 
-## Acceptable values:
-##   - on or off
-## strong_consistency = on
-
-## listener.http.<name> is an IP address and TCP port that the Riak
-## HTTP interface will bind.
-## 
-## Default: 127.0.0.1:8098
-## 
-## Acceptable values:
-##   - an IP/port pair, e.g. 127.0.0.1:10011
-listener.http.internal = 0.0.0.0:${entity.riakWebPort?c}
-
-## listener.protobuf.<name> is an IP address and TCP port that the Riak
-## Protocol Buffers interface will bind.
-## 
-## Default: 127.0.0.1:8087
-## 
-## Acceptable values:
-##   - an IP/port pair, e.g. 127.0.0.1:10011
-listener.protobuf.internal = 0.0.0.0:${entity.riakPbPort?c}
-
-## The maximum length to which the queue of pending connections
-## may grow. If set, it must be an integer > 0. If you anticipate a
-## huge number of connections being initialized *simultaneously*, set
-## this number higher.
-## 
-## Default: 128
-## 
-## Acceptable values:
-##   - an integer
-## protobuf.backlog = 128
-
-## listener.https.<name> is an IP address and TCP port that the Riak
-## HTTPS interface will bind.
-## 
-## Acceptable values:
-##   - an IP/port pair, e.g. 127.0.0.1:10011
-## listener.https.internal = 127.0.0.1:8098
-
-## How Riak will repair out-of-sync keys. Some features require
-## this to be set to 'active', including search.
-## * active: out-of-sync keys will be repaired in the background
-## * passive: out-of-sync keys are only repaired on read
-## * active-debug: like active, but outputs verbose debugging
-## information
-## 
-## Default: active
-## 
-## Acceptable values:
-##   - one of: active, passive, active-debug
-anti_entropy = active
-
-## Specifies the storage engine used for Riak's key-value data
-## and secondary indexes (if supported).
-## 
-## Default: bitcask
-## 
-## Acceptable values:
-##   - one of: bitcask, leveldb, memory, multi
-storage_backend = bitcask
-
-## Controls which binary representation of a riak value is stored
-## on disk.
-## * 0: Original erlang:term_to_binary format. Higher space overhead.
-## * 1: New format for more compact storage of small values.
-## 
-## Default: 1
-## 
-## Acceptable values:
-##   - the integer 1
-##   - the integer 0
-object.format = 1
-
-## Reading or writing objects bigger than this size will write a
-## warning in the logs.
-## 
-## Default: 5MB
-## 
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-object.size.warning_threshold = 5MB
-
-## Writing an object bigger than this will send a failure to the
-## client.
-## 
-## Default: 50MB
-## 
-## Acceptable values:
-##   - a byte size with units, e.g. 10GB
-object.size.maximum = 50MB
-
-## Writing an object with more than this number of siblings will
-## generate a warning in the logs.
-## 
-## Default: 25
-## 
-## Acceptable values:
-##   - an integer
-object.siblings.warning_threshold = 25
-
-## Writing an object with more than this number of siblings will
-## send a failure to the client.
-## 
-## Default: 100
-## 
-## Acceptable values:
-##   - an integer
-object.siblings.maximum = 100
-
-## A path under which bitcask data files will be stored.
-## 
-## Default: $(platform_data_dir)/bitcask
-## 
-## Acceptable values:
-##   - the path to a directory
-bitcask.data_root = $(platform_data_dir)/bitcask
-
-## Configure how Bitcask writes data to disk.
-## erlang: Erlang's built-in file API
-## nif: Direct calls to the POSIX C API
-## The NIF mode provides higher throughput for certain
-## workloads, but has the potential to negatively impact
-## the Erlang VM, leading to higher worst-case latencies
-## and possible throughput collapse.
-## 
-## Default: erlang
-## 
-## Acceptable values:
-##   - one of: erlang, nif
-bitcask.io_mode = erlang
-
-## Set to 'off' to disable the admin panel.
-## 
-## Default: off
-## 
-## Acceptable values:
-##   - on or off
-riak_control = on
-
-## Authentication mode used for access to the admin panel.
-## 
-## Default: off
-## 
-## Acceptable values:
-##   - one of: off, userlist
-riak_control.auth.mode = off
-
-## If riak control's authentication mode (riak_control.auth.mode)
-## is set to 'userlist' then this is the list of usernames and
-## passwords for access to the admin panel.
-## To create users with given names, add entries of the format:
-## riak_control.auth.user.USERNAME.password = PASSWORD
-## replacing USERNAME with the desired username and PASSWORD with the
-## desired password for that user.
-## 
-## Acceptable values:
-##   - text
-## riak_control.auth.user.admin.password = pass
-
-## This parameter defines the percentage of total server memory
-## to assign to LevelDB. LevelDB will dynamically adjust its internal
-## cache sizes to stay within this size.  The memory size can
-## alternately be assigned as a byte count via leveldb.maximum_memory
-## instead.
-## 
-## Default: 70
-## 
-## Acceptable values:
-##   - an integer
-leveldb.maximum_memory.percent = 70
-
-## To enable Search set this 'on'.
-## 
-## Default: off
-## 
-## Acceptable values:
-##   - on or off
-search = ${entity.isSearchEnabled()?string('on','off')}
-
-## How long Riak will wait for Solr to start. The start sequence
-## will be tried twice. If both attempts timeout, then the Riak node
-## will be shutdown. This may need to be increased as more data is
-## indexed and Solr takes longer to start. Values lower than 1s will
-## be rounded up to the minimum 1s.
-## 
-## Default: 30s
-## 
-## Acceptable values:
-##   - a time duration with units, e.g. '10s' for 10 seconds
-search.solr.start_timeout = 30s
-
-## The port number which Solr binds to.
-## NOTE: Binds on every interface.
-## 
-## Default: 8093
-## 
-## Acceptable values:
-##   - an integer
-search.solr.port = ${entity.searchSolrPort?c}
-
-## The port number which Solr JMX binds to.
-## NOTE: Binds on every interface.
-## 
-## Default: 8985
-## 
-## Acceptable values:
-##   - an integer
-search.solr.jmx_port = ${entity.searchSolrJmxPort?c}
-
-## The options to pass to the Solr JVM.  Non-standard options,
-## i.e. -XX, may not be portable across JVM implementations.
-## E.g. -XX:+UseCompressedStrings
-## 
-## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
-## 
-## Acceptable values:
-##   - text
-search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
-
-## erlang, constrain port range so we can open the internal firewall ports
-erlang.distribution.port_range.minimum = ${entity.erlangPortRangeStart?c}
-erlang.distribution.port_range.maximum = ${entity.erlangPortRangeEnd?c}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.md
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.md b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.md
deleted file mode 100644
index 8def551..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Riak Examples
-
-Here is a selection of examples showing how to deploy Riak.
-
-
-### A Single-Node Deployment
-
-```
-location: YOUR_CLOUD
-services:
-- type: brooklyn.entity.nosql.riak.RiakNode
-```
-
-
-### A Single-Node Deployment
-
-```
-location: YOUR_CLOUD
-services:
-- type: brooklyn.entity.nosql.riak.RiakNode
-```
-
-
-### A Cluster
-
-```
-services:
-- type: brooklyn.entity.nosql.riak.RiakCluster
-  location: YOUR_CLOUD
-  initialSize: 5
-```
-
-
-### A Cluster at a Specific Version with a Web App
-
-```
-services:
-- type: brooklyn.entity.nosql.riak.RiakCluster
-  id: cluster
-  brooklyn.config:
-    initialSize: 2
-    install.version: 2.0.0
-- type: brooklyn.entity.webapp.ControlledDynamicWebAppCluster
-  brooklyn.config:
-    initialSize: 2
-    wars.root: https://s3-eu-west-1.amazonaws.com/brooklyn-clocker/brooklyn-example-hello-world-sql-webapp.war
-    java.sysprops: 
-      brooklyn.example.riak.nodes: $brooklyn:component("cluster").attributeWhenReady("riak.cluster.nodeList")
-```
-
-----
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.png
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.png b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.png
deleted file mode 100644
index a230b04..0000000
Binary files a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/riak.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/vm.args
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/vm.args b/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/vm.args
deleted file mode 100644
index be58d78..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/riak/vm.args
+++ /dev/null
@@ -1,64 +0,0 @@
-##### Brooklyn note: File from OSX distribution of Riak 1.4.8
-
-## Name of the riak node
--name riak@${driver.subnetHostname}
-
-## Cookie for distributed erlang.  All nodes in the same cluster
-## should use the same cookie or they will not be able to communicate.
--setcookie riak
-
-## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive
-## (Disabled by default..use with caution!)
-##-heart
-
-## Enable kernel poll and a few async threads
-+K true
-+A 64
-
-## Treat error_logger warnings as warnings
-+W w
-
-## Increase number of concurrent ports/sockets
--env ERL_MAX_PORTS 64000
-
-## Tweak GC to run more often 
--env ERL_FULLSWEEP_AFTER 0
-
-## Set the location of crash dumps
--env ERL_CRASH_DUMP ./log/erl_crash.dump
-
-## Raise the ETS table limit
--env ERL_MAX_ETS_TABLES 256000
-
-## Force the erlang VM to use SMP
--smp enable
-
-## For nodes with many busy_dist_port events, Basho recommends
-## raising the sender-side network distribution buffer size.
-## 32MB may not be sufficient for some workloads and is a suggested
-## starting point.
-## The Erlang/OTP default is 1024 (1 megabyte).
-## See: http://www.erlang.org/doc/man/erl.html#%2bzdbbl
-##+zdbbl 32768
-
-## Raise the default erlang process limit 
-+P 256000
-
-## Erlang VM scheduler tuning.
-## Prerequisite: a patched VM from Basho, or a VM compiled separately
-## with this patch applied:
-##     https://gist.github.com/evanmcc/a599f4c6374338ed672e
-##+sfwi 500
-
-## Begin SSL distribution items, DO NOT DELETE OR EDIT THIS COMMENT
-
-## To enable SSL encryption of the Erlang intra-cluster communication,
-## un-comment the three lines below and make certain that the paths
-## point to correct PEM data files.  See docs TODO for details.
-
-## -proto_dist inet_ssl
-## -ssl_dist_opt client_certfile "${driver.riakEtcDir}/erlclient.pem"
-## -ssl_dist_opt server_certfile "${driver.riakEtcDir}/erlserver.pem"
-
-## End SSL distribution items, DO NOT DELETE OR EDIT THIS COMMENT
-

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/solr/solr.xml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/solr/solr.xml b/software/nosql/src/main/resources/brooklyn/entity/nosql/solr/solr.xml
deleted file mode 100644
index 6e12b5c..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/solr/solr.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-[#ftl]
-<?xml version="1.0" encoding="UTF-8"?>
-<solr>
-  <int name="coreLoadThreads">4</int>
-
-  <solrcloud>
-    <str name="host">${driver.hostname}</str>
-    <int name="hostPort">${entity.solrPort?c}</int>
-    <str name="hostContext">solr</str>
-    <int name="zkClientTimeout">15000</int>
-    <bool name="genericCoreNodeNames">true</bool>
-  </solrcloud>
-
-  <shardHandlerFactory name="shardHandlerFactory"
-    class="HttpShardHandlerFactory">
-    <int name="socketTimeout">0</int>
-    <int name="connTimeout">0</int>
-  </shardHandlerFactory>
-</solr>

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml
new file mode 100644
index 0000000..045bb45
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml
@@ -0,0 +1,644 @@
+[#ftl]
+#
+# Cassandra storage config YAML 
+
+# NOTE:
+#   See http://wiki.apache.org/cassandra/StorageConfiguration for
+#   full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: '${entity.clusterName}'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to 
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: ${entity.numTokensPerNode?c}
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node.  If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+initial_token: ${entity.tokensAsString}
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated.  After it has been dead this long, hints will be dropped.
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster.  Any IPartitioner may be used, including your
+# own as long as it is on the classpath.  Out of the box, Cassandra
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
+# 
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+#   This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+#   Hash Function instead of md5.  When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes.  BOP allows
+#   scanning rows in key order, but the ordering can generate hot spots
+#   for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+#   UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+#   ordering.  Use this as an example if you need custom collation.
+#
+# See http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+    - ${driver.runDir}/data
+
+# commit log
+commitlog_directory: ${driver.runDir}/commitlog
+
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+#       still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+#              remaining available sstables.  This means you WILL see obsolete
+#              data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint.  And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: ${driver.runDir}/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch." 
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk.  It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments.  A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been 
+# flushed to sstables.  
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+    # Addresses of hosts that are deemed contact points. 
+    # Cassandra nodes use this list of hosts to find each other and learn
+    # the topology of the ring.  You must change this if you are running
+    # multiple nodes!
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          # seeds is actually a comma-delimited list of addresses.
+          # Ex: "<ip1>,<ip2>,<ip3>"
+          - seeds: "${entity.seeds}"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.  
+#
+# Set to 1.0 to disable.  Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_.  Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.  
+# 
+# Set to 1.0 to disable.  Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables.  Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.  Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.  So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads.  These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread.  At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: ${entity.gossipPort?c}
+
+# SSL port, for encrypted communication.  Unused unless enabled in
+# encryption_options
+ssl_storage_port: ${entity.sslGossipPort?c}
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+# 
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: ${entity.listenAddress}
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+broadcast_address: ${entity.broadcastAddress}
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+native_transport_port: ${entity.nativeTransportPort?c}
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+#native_transport_min_threads: 16
+#native_transport_max_threads: 128
+
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+# 
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: ${entity.rpcAddress}
+# port for Thrift to listen for clients on
+rpc_port: ${entity.thriftPort?c}
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three out-of-the-box options for the RPC Server:
+#
+# sync  -> One thread per thrift connection. For a very large number of clients, memory
+#          will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
+#          per thread, and that will correspond to your use of virtual memory (but physical memory
+#          may be limited depending on use of stack space).
+#
+# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+#          asynchronously using a small number of threads that does not vary with the amount
+#          of thrift clients (and thus scales well to many clients). The rpc requests are still
+#          synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower.  On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data.  Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction.  Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you.  Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true 
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns.  The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory.  Larger rows will spill
+# over to disk and use a slower two-pass compaction process.  A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair.  Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise, 
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable.  Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch.  The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+#   requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+#   correlated failures. It does this by grouping machines into
+#   "datacenters" and "racks."  Cassandra will do its best not to have
+#   more than one replica on the same "rack" (which may not actually
+#   be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+#  - SimpleSnitch:
+#    Treats Strategy order as proximity. This improves cache locality
+#    when disabling read repair, which can further improve throughput.
+#    Only appropriate for single-datacenter deployments.
+#  - PropertyFileSnitch:
+#    Proximity is determined by rack and data center, which are
+#    explicitly configured in cassandra-topology.properties.
+#  - GossipingPropertyFileSnitch
+#    The rack and datacenter for the local node are defined in
+#    cassandra-rackdc.properties and propagated to other nodes via gossip.  If
+#    cassandra-topology.properties exists, it is used as a fallback, allowing
+#    migration from the PropertyFileSnitch.
+#  - RackInferringSnitch:
+#    Proximity is determined by rack and data center, which are
+#    assumed to correspond to the 3rd and 2nd octet of each node's
+#    IP address, respectively.  Unless this happens to match your
+#    deployment conventions (as it did Facebook's), this is best used
+#    as an example of writing a custom Snitch class.
+#  - Ec2Snitch:
+#    Appropriate for EC2 deployments in a single Region.  Loads Region
+#    and Availability Zone information from the EC2 API. The Region is
+#    treated as the Datacenter, and the Availability Zone as the rack.
+#    Only private IPs are used, so this will not work across multiple
+#    Regions.
+#  - Ec2MultiRegionSnitch:
+#    Uses public IPs as broadcast_address to allow cross-region
+#    connectivity.  (Thus, you should set seed addresses to the public
+#    IP as well.) You will need to open the storage_port or
+#    ssl_storage_port on the public IP firewall.  (For intra-Region
+#    traffic, Cassandra will switch to the private IP after
+#    establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: ${driver.endpointSnitchName}
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100 
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it.  This is
+# expressed as a double which represents a percentage.  Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+#  - throttle_limit -- The throttle_limit is the number of in-flight
+#                      requests per client.  Requests beyond 
+#                      that limit are queued up until
+#                      running requests can complete.
+#                      The value of 80 here is twice the number of
+#                      concurrent_reads + concurrent_writes.
+#  - default_weight -- default_weight is optional and allows for
+#                      overriding the default which is 1.
+#  - weights -- Weights are optional and will default to 1 or the
+#               overridden default_weight. The weight translates into how
+#               many requests are handled during each turn of the
+#               RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+#    throttle_limit: 80
+#    default_weight: 5
+#    weights:
+#      Keyspace1: 1
+#      Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time.  The larger the interval,
+# the smaller and less effective the sampling will be.  In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample.  All the sampled entries
+# must fit in memory.  Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs.  This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore.  For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# enable or disable client/server encryption.
+client_encryption_options:
+    enabled: false
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be:  all  - all traffic is compressed
+#          dc   - traffic between different datacenters is compressed
+#          none - nothing is compressed.
+internode_compression: all



[24/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java
deleted file mode 100644
index 603fb6d..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.annotation.Nullable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.DynamicGroup;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityPredicates;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.basic.ServiceStateLogic.ServiceNotUpLogic;
-import brooklyn.entity.effector.EffectorBody;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.location.Location;
-import brooklyn.location.basic.Machines;
-import brooklyn.policy.PolicySpec;
-import brooklyn.util.ResourceUtils;
-import brooklyn.util.collections.MutableList;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.config.ConfigBag;
-import brooklyn.util.text.Strings;
-import brooklyn.util.time.Time;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Optional;
-import com.google.common.base.Supplier;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.LinkedHashMultimap;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-
-/**
- * Implementation of {@link CassandraDatacenter}.
- * <p>
- * Several subtleties to note:
- * - a node may take some time after it is running and serving JMX to actually be contactable on its thrift port
- *   (so we wait for thrift port to be contactable)
- * - sometimes new nodes take a while to peer, and/or take a while to get a consistent schema
- *   (each up to 1m; often very close to the 1m) 
- */
-public class CassandraDatacenterImpl extends DynamicClusterImpl implements CassandraDatacenter {
-
-    /*
-     * TODO Seed management is hard!
-     *  - The ServiceRestarter is not doing customize(), so is not refreshing the seeds in cassandra.yaml.
-     *    If we have two nodes that were seeds for each other and they both restart at the same time, we'll have a split brain.
-     */
-    
-    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterImpl.class);
-
-    // Mutex for synchronizing during re-size operations
-    private final Object mutex = new Object[0];
-
-    private final Supplier<Set<Entity>> defaultSeedSupplier = new Supplier<Set<Entity>>() {
-        // Mutex for (re)calculating our seeds
-        // TODO is this very dangerous?! Calling out to SeedTracker, which calls out to alien getAttribute()/getConfig(). But I think that's ok.
-        // TODO might not need mutex? previous race was being caused by something else, other than concurrent calls!
-        private final Object seedMutex = new Object();
-        
-        @Override
-        public Set<Entity> get() {
-            synchronized (seedMutex) {
-                boolean hasPublishedSeeds = Boolean.TRUE.equals(getAttribute(HAS_PUBLISHED_SEEDS));
-                int quorumSize = getSeedQuorumSize();
-                Set<Entity> potentialSeeds = gatherPotentialSeeds();
-                Set<Entity> potentialRunningSeeds = gatherPotentialRunningSeeds();
-                boolean stillWaitingForQuorum = (!hasPublishedSeeds) && (potentialSeeds.size() < quorumSize);
-                
-                if (stillWaitingForQuorum) {
-                    if (log.isDebugEnabled()) log.debug("Not refreshed seeds of cluster {}, because still waiting for quorum (need {}; have {} potentials)", new Object[] {CassandraDatacenterImpl.class, quorumSize, potentialSeeds.size()});
-                    return ImmutableSet.of();
-                } else if (hasPublishedSeeds) {
-                    Set<Entity> currentSeeds = getAttribute(CURRENT_SEEDS);
-                    if (getAttribute(SERVICE_STATE_ACTUAL) == Lifecycle.STARTING) {
-                        if (Sets.intersection(currentSeeds, potentialSeeds).isEmpty()) {
-                            log.warn("Cluster {} lost all its seeds while starting! Subsequent failure likely, but changing seeds during startup would risk split-brain: seeds={}", new Object[] {CassandraDatacenterImpl.this, currentSeeds});
-                        }
-                        return currentSeeds;
-                    } else if (potentialRunningSeeds.isEmpty()) {
-                        // TODO Could be race where nodes have only just returned from start() and are about to 
-                        // transition to serviceUp; so don't just abandon all our seeds!
-                        log.warn("Cluster {} has no running seeds (yet?); leaving seeds as-is; but risks split-brain if these seeds come back up!", new Object[] {CassandraDatacenterImpl.this});
-                        return currentSeeds;
-                    } else {
-                        Set<Entity> result = trim(quorumSize, potentialRunningSeeds);
-                        log.debug("Cluster {} updating seeds: chosen={}; potentialRunning={}", new Object[] {CassandraDatacenterImpl.this, result, potentialRunningSeeds});
-                        return result;
-                    }
-                } else {
-                    Set<Entity> result = trim(quorumSize, potentialSeeds);
-                    if (log.isDebugEnabled()) log.debug("Cluster {} has reached seed quorum: seeds={}", new Object[] {CassandraDatacenterImpl.this, result});
-                    return result;
-                }
-            }
-        }
-        private Set<Entity> trim(int num, Set<Entity> contenders) {
-            // Prefer existing seeds wherever possible; otherwise accept any other contenders
-            Set<Entity> currentSeeds = (getAttribute(CURRENT_SEEDS) != null) ? getAttribute(CURRENT_SEEDS) : ImmutableSet.<Entity>of();
-            Set<Entity> result = Sets.newLinkedHashSet();
-            result.addAll(Sets.intersection(currentSeeds, contenders));
-            result.addAll(contenders);
-            return ImmutableSet.copyOf(Iterables.limit(result, num));
-        }
-    };
-    
-    protected SeedTracker seedTracker = new SeedTracker();
-    protected TokenGenerator tokenGenerator = null;
-
-    public CassandraDatacenterImpl() {
-    }
-
-    @Override
-    public void init() {
-        super.init();
-
-        /*
-         * subscribe to hostname, and keep an accurate set of current seeds in a sensor;
-         * then at nodes we set the initial seeds to be the current seeds when ready (non-empty)
-         */
-        subscribeToMembers(this, Attributes.HOSTNAME, new SensorEventListener<String>() {
-            @Override
-            public void onEvent(SensorEvent<String> event) {
-                seedTracker.onHostnameChanged(event.getSource(), event.getValue());
-            }
-        });
-        subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
-            @Override public void onEvent(SensorEvent<Entity> event) {
-                seedTracker.onMemberRemoved(event.getValue());
-            }
-        });
-        subscribeToMembers(this, Attributes.SERVICE_UP, new SensorEventListener<Boolean>() {
-            @Override
-            public void onEvent(SensorEvent<Boolean> event) {
-                seedTracker.onServiceUpChanged(event.getSource(), event.getValue());
-            }
-        });
-        subscribeToMembers(this, Attributes.SERVICE_STATE_ACTUAL, new SensorEventListener<Lifecycle>() {
-            @Override
-            public void onEvent(SensorEvent<Lifecycle> event) {
-                // trigger a recomputation also when lifecycle state changes, 
-                // because it might not have ruled a seed as inviable when service up went true 
-                // because service state was not yet running
-                seedTracker.onServiceUpChanged(event.getSource(), Lifecycle.RUNNING==event.getValue());
-            }
-        });
-        
-        // Track the datacenters for this cluster
-        subscribeToMembers(this, CassandraNode.DATACENTER_NAME, new SensorEventListener<String>() {
-            @Override
-            public void onEvent(SensorEvent<String> event) {
-                Entity member = event.getSource();
-                String dcName = event.getValue();
-                if (dcName != null) {
-                    Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
-                    Multimap<String, Entity> mutableDatacenterUsage = (datacenterUsage == null) ? LinkedHashMultimap.<String, Entity>create() : LinkedHashMultimap.create(datacenterUsage);
-                    Optional<String> oldDcName = getKeyOfVal(mutableDatacenterUsage, member);
-                    if (!(oldDcName.isPresent() && dcName.equals(oldDcName.get()))) {
-                        mutableDatacenterUsage.values().remove(member);
-                        mutableDatacenterUsage.put(dcName, member);
-                        setAttribute(DATACENTER_USAGE, mutableDatacenterUsage);
-                        setAttribute(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
-                    }
-                }
-            }
-            private <K,V> Optional<K> getKeyOfVal(Multimap<K,V> map, V val) {
-                for (Map.Entry<K,V> entry : map.entries()) {
-                    if (Objects.equal(val, entry.getValue())) {
-                        return Optional.of(entry.getKey());
-                    }
-                }
-                return Optional.absent();
-            }
-        });
-        subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
-            @Override public void onEvent(SensorEvent<Entity> event) {
-                Entity entity = event.getSource();
-                Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
-                if (datacenterUsage != null && datacenterUsage.containsValue(entity)) {
-                    Multimap<String, Entity> mutableDatacenterUsage = LinkedHashMultimap.create(datacenterUsage);
-                    mutableDatacenterUsage.values().remove(entity);
-                    setAttribute(DATACENTER_USAGE, mutableDatacenterUsage);
-                    setAttribute(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
-                }
-            }
-        });
-        
-        getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
-            @Override
-            public String call(ConfigBag parameters) {
-                return executeScript((String)parameters.getStringKey("commands"));
-            }
-        });
-    }
-    
-    protected Supplier<Set<Entity>> getSeedSupplier() {
-        Supplier<Set<Entity>> seedSupplier = getConfig(SEED_SUPPLIER);
-        return (seedSupplier == null) ? defaultSeedSupplier : seedSupplier;
-    }
-    
-    protected boolean useVnodes() {
-        return Boolean.TRUE.equals(getConfig(USE_VNODES));
-    }
-    
-    protected synchronized TokenGenerator getTokenGenerator() {
-        if (tokenGenerator!=null) 
-            return tokenGenerator;
-        
-        try {
-            tokenGenerator = getConfig(TOKEN_GENERATOR_CLASS).newInstance();
-            
-            BigInteger shift = getConfig(TOKEN_SHIFT);
-            if (shift==null) 
-                shift = BigDecimal.valueOf(Math.random()).multiply(
-                    new BigDecimal(tokenGenerator.range())).toBigInteger();
-            tokenGenerator.setOrigin(shift);
-            
-            return tokenGenerator;
-        } catch (Exception e) {
-            throw Throwables.propagate(e);
-        }        
-    }
-    
-    protected int getSeedQuorumSize() {
-        Integer quorumSize = getConfig(INITIAL_QUORUM_SIZE);
-        if (quorumSize!=null && quorumSize>0)
-            return quorumSize;
-        // default 2 is recommended, unless initial size is smaller
-        return Math.min(Math.max(getConfig(INITIAL_SIZE), 1), DEFAULT_SEED_QUORUM);
-    }
-
-    @Override
-    public Set<Entity> gatherPotentialSeeds() {
-        return seedTracker.gatherPotentialSeeds();
-    }
-
-    @Override
-    public Set<Entity> gatherPotentialRunningSeeds() {
-        return seedTracker.gatherPotentialRunningSeeds();
-    }
-
-    /**
-     * Sets the default {@link #MEMBER_SPEC} to describe the Cassandra nodes.
-     */
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        return getConfig(MEMBER_SPEC, EntitySpec.create(CassandraNode.class));
-    }
-
-    @Override
-    public String getClusterName() {
-        return getAttribute(CLUSTER_NAME);
-    }
-
-    @Override
-    public Collection<Entity> grow(int delta) {
-        if (useVnodes()) {
-            // nothing to do for token generator
-        } else {
-            if (getCurrentSize() == 0) {
-                getTokenGenerator().growingCluster(delta);
-            }
-        }
-        return super.grow(delta);
-    }
-    
-    @SuppressWarnings("deprecation")
-    @Override
-    protected Entity createNode(@Nullable Location loc, Map<?,?> flags) {
-        Map<Object, Object> allflags = MutableMap.copyOf(flags);
-        
-        if ((flags.containsKey(CassandraNode.TOKEN) || flags.containsKey("token")) || (flags.containsKey(CassandraNode.TOKENS) || flags.containsKey("tokens"))) {
-            // leave token config as-is
-        } else if (!useVnodes()) {
-            BigInteger token = getTokenGenerator().newToken();
-            allflags.put(CassandraNode.TOKEN, token);
-        }
-
-        if ((flags.containsKey(CassandraNode.NUM_TOKENS_PER_NODE) || flags.containsKey("numTokensPerNode"))) {
-            // leave num_tokens as-is
-        } else if (useVnodes()) {
-            Integer numTokensPerNode = getConfig(NUM_TOKENS_PER_NODE);
-            allflags.put(CassandraNode.NUM_TOKENS_PER_NODE, numTokensPerNode);
-        } else {
-            allflags.put(CassandraNode.NUM_TOKENS_PER_NODE, 1);
-        }
-        
-        return super.createNode(loc, allflags);
-    }
-
-    @Override
-    protected Entity replaceMember(Entity member, Location memberLoc, Map<?, ?> extraFlags) {
-        Set<BigInteger> oldTokens = ((CassandraNode) member).getTokens();
-        Set<BigInteger> newTokens = (oldTokens != null && oldTokens.size() > 0) ? getTokenGenerator().getTokensForReplacementNode(oldTokens) : null;
-        return super.replaceMember(member, memberLoc,  MutableMap.copyOf(extraFlags).add(CassandraNode.TOKENS, newTokens));
-    }
-
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        Machines.warnIfLocalhost(locations, "CassandraCluster does not support multiple nodes on localhost, " +
-                "due to assumptions Cassandra makes about the use of the same port numbers used across the cluster.");
-
-        // force this to be set - even if it is using the default
-        setAttribute(CLUSTER_NAME, getConfig(CLUSTER_NAME));
-        
-        super.start(locations);
-
-        connectSensors();
-
-        // TODO wait until all nodes which we think are up are consistent 
-        // i.e. all known nodes use the same schema, as reported by
-        // SshEffectorTasks.ssh("echo \"describe cluster;\" | /bin/cassandra-cli");
-        // once we've done that we can revert to using 2 seed nodes.
-        // see CassandraCluster.DEFAULT_SEED_QUORUM
-        // (also ensure the cluster is ready if we are about to run a creation script)
-        Time.sleep(getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER));
-
-        String scriptUrl = getConfig(CassandraNode.CREATION_SCRIPT_URL);
-        if (Strings.isNonEmpty(scriptUrl)) {
-            executeScript(new ResourceUtils(this).getResourceAsString(scriptUrl));
-        }
-
-        update();
-    }
-
-    protected void connectSensors() {
-        connectEnrichers();
-        
-        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
-                .displayName("Cassandra Cluster Tracker")
-                .configure("sensorsToTrack", ImmutableSet.of(Attributes.SERVICE_UP, Attributes.HOSTNAME, CassandraNode.THRIFT_PORT))
-                .configure("group", this));
-    }
-
-    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override
-        protected void onEntityChange(Entity member) {
-            if (log.isDebugEnabled()) log.debug("Node {} updated in Cluster {}", member, this);
-            ((CassandraDatacenterImpl)entity).update();
-        }
-        @Override
-        protected void onEntityAdded(Entity member) {
-            if (log.isDebugEnabled()) log.debug("Node {} added to Cluster {}", member, this);
-            ((CassandraDatacenterImpl)entity).update();
-        }
-        @Override
-        protected void onEntityRemoved(Entity member) {
-            if (log.isDebugEnabled()) log.debug("Node {} removed from Cluster {}", member, this);
-            ((CassandraDatacenterImpl)entity).update();
-        }
-    };
-
-    @SuppressWarnings("unchecked")
-    protected void connectEnrichers() {
-        List<? extends List<? extends AttributeSensor<? extends Number>>> summingEnricherSetup = ImmutableList.of(
-                ImmutableList.of(CassandraNode.READ_ACTIVE, READ_ACTIVE),
-                ImmutableList.of(CassandraNode.READ_PENDING, READ_PENDING),
-                ImmutableList.of(CassandraNode.WRITE_ACTIVE, WRITE_ACTIVE),
-                ImmutableList.of(CassandraNode.WRITE_PENDING, WRITE_PENDING)
-        );
-        
-        List<? extends List<? extends AttributeSensor<? extends Number>>> averagingEnricherSetup = ImmutableList.of(
-                ImmutableList.of(CassandraNode.READS_PER_SECOND_LAST, READS_PER_SECOND_LAST_PER_NODE),
-                ImmutableList.of(CassandraNode.WRITES_PER_SECOND_LAST, WRITES_PER_SECOND_LAST_PER_NODE),
-                ImmutableList.of(CassandraNode.WRITES_PER_SECOND_IN_WINDOW, WRITES_PER_SECOND_IN_WINDOW_PER_NODE),
-                ImmutableList.of(CassandraNode.READS_PER_SECOND_IN_WINDOW, READS_PER_SECOND_IN_WINDOW_PER_NODE),
-                ImmutableList.of(CassandraNode.THRIFT_PORT_LATENCY, THRIFT_PORT_LATENCY_PER_NODE),
-                ImmutableList.of(CassandraNode.THRIFT_PORT_LATENCY_IN_WINDOW, THRIFT_PORT_LATENCY_IN_WINDOW_PER_NODE),
-                ImmutableList.of(CassandraNode.PROCESS_CPU_TIME_FRACTION_LAST, PROCESS_CPU_TIME_FRACTION_LAST_PER_NODE),
-                ImmutableList.of(CassandraNode.PROCESS_CPU_TIME_FRACTION_IN_WINDOW, PROCESS_CPU_TIME_FRACTION_IN_WINDOW_PER_NODE)
-        );
-        
-        for (List<? extends AttributeSensor<? extends Number>> es : summingEnricherSetup) {
-            AttributeSensor<? extends Number> t = es.get(0);
-            AttributeSensor<? extends Number> total = es.get(1);
-            addEnricher(Enrichers.builder()
-                    .aggregating(t)
-                    .publishing(total)
-                    .fromMembers()
-                    .computingSum()
-                    .defaultValueForUnreportedSensors(null)
-                    .valueToReportIfNoSensors(null)
-                    .build());
-        }
-        
-        for (List<? extends AttributeSensor<? extends Number>> es : averagingEnricherSetup) {
-            AttributeSensor<Number> t = (AttributeSensor<Number>) es.get(0);
-            AttributeSensor<Double> average = (AttributeSensor<Double>) es.get(1);
-            addEnricher(Enrichers.builder()
-                    .aggregating(t)
-                    .publishing(average)
-                    .fromMembers()
-                    .computingAverage()
-                    .defaultValueForUnreportedSensors(null)
-                    .valueToReportIfNoSensors(null)
-                    .build());
-
-        }
-    }
-
-    @Override
-    public void stop() {
-        disconnectSensors();
-        
-        super.stop();
-    }
-    
-    protected void disconnectSensors() {
-    }
-
-    @Override
-    public void update() {
-        synchronized (mutex) {
-            // Update our seeds, as necessary
-            seedTracker.refreshSeeds();
-            
-            // Choose the first available cluster member to set host and port (and compute one-up)
-            Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE));
-
-            if (upNode.isPresent()) {
-                setAttribute(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME));
-                setAttribute(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT));
-
-                List<String> currentNodes = getAttribute(CASSANDRA_CLUSTER_NODES);
-                Set<String> oldNodes = (currentNodes != null) ? ImmutableSet.copyOf(currentNodes) : ImmutableSet.<String>of();
-                Set<String> newNodes = MutableSet.<String>of();
-                for (Entity member : getMembers()) {
-                    if (member instanceof CassandraNode && Boolean.TRUE.equals(member.getAttribute(SERVICE_UP))) {
-                        String hostname = member.getAttribute(Attributes.HOSTNAME);
-                        Integer thriftPort = member.getAttribute(CassandraNode.THRIFT_PORT);
-                        if (hostname != null && thriftPort != null) {
-                            newNodes.add(HostAndPort.fromParts(hostname, thriftPort).toString());
-                        }
-                    }
-                }
-                if (Sets.symmetricDifference(oldNodes, newNodes).size() > 0) {
-                    setAttribute(CASSANDRA_CLUSTER_NODES, MutableList.copyOf(newNodes));
-                }
-            } else {
-                setAttribute(HOSTNAME, null);
-                setAttribute(THRIFT_PORT, null);
-                setAttribute(CASSANDRA_CLUSTER_NODES, Collections.<String>emptyList());
-            }
-
-            ServiceNotUpLogic.updateNotUpIndicatorRequiringNonEmptyList(this, CASSANDRA_CLUSTER_NODES);
-        }
-    }
-    
-    /**
-     * For tracking our seeds. This gets fiddly! High-level logic is:
-     * <ul>
-     *   <li>If we have never reached quorum (i.e. have never published seeds), then continue to wait for quorum;
-     *       because entity-startup may be blocking for this. This is handled by the seedSupplier.
-     *   <li>If we previously reached quorum (i.e. have previousy published seeds), then always update;
-     *       we never want stale/dead entities listed in our seeds.
-     *   <li>If an existing seed looks unhealthy, then replace it.
-     *   <li>If a new potential seed becomes available (and we're in need of more), then add it.
-     * <ul>
-     * 
-     * Also note that {@link CassandraFabric} can take over, because it know about multiple sub-clusters!
-     * It will provide a different {@link CassandraDatacenter#SEED_SUPPLIER}. Each time we think that our seeds
-     * need to change, we call that. The fabric will call into {@link CassandraDatacenterImpl#gatherPotentialSeeds()}
-     * to find out what's available.
-     * 
-     * @author aled
-     */
-    protected class SeedTracker {
-        private final Map<Entity, Boolean> memberUpness = Maps.newLinkedHashMap();
-        
-        public void onMemberRemoved(Entity member) {
-            Set<Entity> seeds = getSeeds();
-            boolean maybeRemove = seeds.contains(member);
-            memberUpness.remove(member);
-            
-            if (maybeRemove) {
-                refreshSeeds();
-            } else {
-                if (log.isTraceEnabled()) log.trace("Seeds considered stable for cluster {} (node {} removed)", new Object[] {CassandraDatacenterImpl.this, member});
-                return;
-            }
-        }
-        public void onHostnameChanged(Entity member, String hostname) {
-            Set<Entity> seeds = getSeeds();
-            int quorum = getSeedQuorumSize();
-            boolean isViable = isViableSeed(member);
-            boolean maybeAdd = isViable && seeds.size() < quorum;
-            boolean maybeRemove = seeds.contains(member) && !isViable;
-            
-            if (maybeAdd || maybeRemove) {
-                refreshSeeds();
-            } else {
-                if (log.isTraceEnabled()) log.trace("Seeds considered stable for cluster {} (node {} changed hostname {})", new Object[] {CassandraDatacenterImpl.this, member, hostname});
-                return;
-            }
-        }
-        public void onServiceUpChanged(Entity member, Boolean serviceUp) {
-            Boolean oldVal = memberUpness.put(member, serviceUp);
-            if (Objects.equal(oldVal, serviceUp)) {
-                if (log.isTraceEnabled()) log.trace("Ignoring duplicate service-up in "+CassandraDatacenterImpl.this+" for "+member+", "+serviceUp);
-            }
-            Set<Entity> seeds = getSeeds();
-            int quorum = getSeedQuorumSize();
-            boolean isViable = isViableSeed(member);
-            boolean maybeAdd = isViable && seeds.size() < quorum;
-            boolean maybeRemove = seeds.contains(member) && !isViable;
-            
-            if (log.isDebugEnabled())
-                log.debug("Considering refresh of seeds for "+CassandraDatacenterImpl.this+" because "+member+" is now "+serviceUp+" ("+isViable+" / "+maybeAdd+" / "+maybeRemove+")");
-            if (maybeAdd || maybeRemove) {
-                refreshSeeds();
-            } else {
-                if (log.isTraceEnabled()) log.trace("Seeds considered stable for cluster {} (node {} changed serviceUp {})", new Object[] {CassandraDatacenterImpl.this, member, serviceUp});
-                return;
-            }
-        }
-        protected Set<Entity> getSeeds() {
-            Set<Entity> result = getAttribute(CURRENT_SEEDS);
-            return (result == null) ? ImmutableSet.<Entity>of() : result;
-        }
-        public void refreshSeeds() {
-            Set<Entity> oldseeds = getAttribute(CURRENT_SEEDS);
-            Set<Entity> newseeds = getSeedSupplier().get();
-            if (Objects.equal(oldseeds, newseeds)) {
-                if (log.isTraceEnabled()) log.debug("Seed refresh no-op for cluster {}: still={}", new Object[] {CassandraDatacenterImpl.this, oldseeds});
-            } else {
-                if (log.isDebugEnabled()) log.debug("Refreshing seeds of cluster {}: now={}; old={}", new Object[] {this, newseeds, oldseeds});
-                setAttribute(CURRENT_SEEDS, newseeds);
-                if (newseeds != null && newseeds.size() > 0) {
-                    setAttribute(HAS_PUBLISHED_SEEDS, true);
-                }
-            }
-        }
-        public Set<Entity> gatherPotentialSeeds() {
-            Set<Entity> result = Sets.newLinkedHashSet();
-            for (Entity member : getMembers()) {
-                if (isViableSeed(member)) {
-                    result.add(member);
-                }
-            }
-            if (log.isTraceEnabled()) log.trace("Viable seeds in Cluster {}: {}", new Object[] {result});
-            return result;
-        }
-        public Set<Entity> gatherPotentialRunningSeeds() {
-            Set<Entity> result = Sets.newLinkedHashSet();
-            for (Entity member : getMembers()) {
-                if (isRunningSeed(member)) {
-                    result.add(member);
-                }
-            }
-            if (log.isTraceEnabled()) log.trace("Viable running seeds in Cluster {}: {}", new Object[] {result});
-            return result;
-        }
-        public boolean isViableSeed(Entity member) {
-            // TODO would be good to reuse the better logic in ServiceFailureDetector
-            // (e.g. if that didn't just emit a notification but set a sensor as well?)
-            boolean managed = Entities.isManaged(member);
-            String hostname = member.getAttribute(Attributes.HOSTNAME);
-            boolean serviceUp = Boolean.TRUE.equals(member.getAttribute(Attributes.SERVICE_UP));
-            Lifecycle serviceState = member.getAttribute(Attributes.SERVICE_STATE_ACTUAL);
-            boolean hasFailed = !managed || (serviceState == Lifecycle.ON_FIRE) || (serviceState == Lifecycle.RUNNING && !serviceUp) || (serviceState == Lifecycle.STOPPED);
-            boolean result = (hostname != null && !hasFailed);
-            if (log.isTraceEnabled()) log.trace("Node {} in Cluster {}: viableSeed={}; hostname={}; serviceUp={}; serviceState={}; hasFailed={}", new Object[] {member, this, result, hostname, serviceUp, serviceState, hasFailed});
-            return result;
-        }
-        public boolean isRunningSeed(Entity member) {
-            boolean viableSeed = isViableSeed(member);
-            boolean serviceUp = Boolean.TRUE.equals(member.getAttribute(Attributes.SERVICE_UP));
-            Lifecycle serviceState = member.getAttribute(Attributes.SERVICE_STATE_ACTUAL);
-            boolean result = viableSeed && serviceUp && serviceState == Lifecycle.RUNNING;
-            if (log.isTraceEnabled()) log.trace("Node {} in Cluster {}: runningSeed={}; viableSeed={}; serviceUp={}; serviceState={}", new Object[] {member, this, result, viableSeed, serviceUp, serviceState});
-            return result;
-        }
-    }
-    
-    @Override
-    public String executeScript(String commands) {
-        Entity someChild = Iterables.getFirst(getMembers(), null);
-        if (someChild==null)
-            throw new IllegalStateException("No Cassandra nodes available");
-        // FIXME cross-etntity method-style calls such as below do not set up a queueing context (DynamicSequentialTask) 
-//        return ((CassandraNode)someChild).executeScript(commands);
-        return Entities.invokeEffector(this, someChild, CassandraNode.EXECUTE_SCRIPT, MutableMap.of("commands", commands)).getUnchecked();
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabric.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabric.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabric.java
deleted file mode 100644
index 8dc0f28..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabric.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.util.Set;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.annotation.Effector;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.MethodEffector;
-import brooklyn.entity.group.DynamicFabric;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.location.Location;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Multimap;
-import com.google.common.reflect.TypeToken;
-
-/**
- * A fabric of {@link CassandraNode}s, which forms a cluster spanning multiple locations.
- * <p>
- * Each {@link CassandraDatacenter} child instance is actually just a part of the whole cluster. It consists of the
- * nodes in that single location (which normally corresponds to a "datacenter" in Cassandra terminology).
- */
-@Catalog(name="Apache Cassandra Database Fabric", description="Cassandra is a highly scalable, eventually " +
-        "consistent, distributed, structured key-value store which provides a ColumnFamily-based data model " +
-        "richer than typical key/value systems", iconUrl="classpath:///cassandra-logo.jpeg")
-@ImplementedBy(CassandraFabricImpl.class)
-public interface CassandraFabric extends DynamicFabric {
-
-    ConfigKey<Integer> INITIAL_QUORUM_SIZE = ConfigKeys.newIntegerConfigKey(
-            "fabric.initial.quorumSize",
-            "Initial fabric quorum size - number of initial nodes that must have been successfully started " +
-            "to report success (if less than 0, then use a value based on INITIAL_SIZE of clusters)",
-            -1);
-    
-    @SuppressWarnings("serial")
-    ConfigKey<Function<Location, String>> DATA_CENTER_NAMER = ConfigKeys.newConfigKey(new TypeToken<Function<Location, String>>(){}, 
-            "cassandra.fabric.datacenter.namer",
-            "Function used to provide the cassandra.replication.datacenterName for a given location");
-
-    int DEFAULT_SEED_QUORUM = 5;
-    
-    AttributeSensor<Multimap<String,Entity>> DATACENTER_USAGE = CassandraDatacenter.DATACENTER_USAGE;
-
-    AttributeSensor<Set<String>> DATACENTERS = CassandraDatacenter.DATACENTERS;
-
-    AttributeSensor<Set<Entity>> CURRENT_SEEDS = CassandraDatacenter.CURRENT_SEEDS;
-
-    AttributeSensor<Boolean> HAS_PUBLISHED_SEEDS = CassandraDatacenter.HAS_PUBLISHED_SEEDS;
-
-    AttributeSensor<String> HOSTNAME = CassandraDatacenter.HOSTNAME;
-
-    AttributeSensor<Integer> THRIFT_PORT = CassandraDatacenter.THRIFT_PORT;
-
-    MethodEffector<Void> UPDATE = new MethodEffector<Void>(CassandraFabric.class, "update");
-
-    @Effector(description="Updates the cluster members")
-    void update();
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java
deleted file mode 100644
index 2f874e6..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.util.Collection;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.DynamicGroup;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityInternal;
-import brooklyn.entity.basic.EntityPredicates;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicFabricImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.location.Location;
-import brooklyn.policy.PolicySpec;
-import brooklyn.util.collections.CollectionFunctionals;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.time.Time;
-
-import com.google.common.base.Function;
-import com.google.common.base.Objects;
-import com.google.common.base.Optional;
-import com.google.common.base.Supplier;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.LinkedHashMultimap;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-
-/**
- * Implementation of {@link CassandraDatacenter}.
- * <p>
- * Serveral subtleties to note:
- * - a node may take some time after it is running and serving JMX to actually be contactable on its thrift port
- *   (so we wait for thrift port to be contactable)
- * - sometimes new nodes take a while to peer, and/or take a while to get a consistent schema
- *   (each up to 1m; often very close to the 1m) 
- */
-public class CassandraFabricImpl extends DynamicFabricImpl implements CassandraFabric {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraFabricImpl.class);
-
-    // Mutex for synchronizing during re-size operations
-    private final Object mutex = new Object[0];
-
-    private final Supplier<Set<Entity>> defaultSeedSupplier = new Supplier<Set<Entity>>() {
-        @Override public Set<Entity> get() {
-            // TODO Remove duplication from CassandraClusterImpl.defaultSeedSupplier
-            Set<Entity> seeds = getAttribute(CURRENT_SEEDS);
-            boolean hasPublishedSeeds = Boolean.TRUE.equals(getAttribute(HAS_PUBLISHED_SEEDS));
-            int quorumSize = getSeedQuorumSize();
-            
-            // update seeds if we're not quorate; note this may not work for dynamically adding new datacenters
-            // as we do not take a new seed from the new datacenter
-            if (seeds == null || seeds.size() < quorumSize || containsDownEntity(seeds)) {
-                Set<Entity> newseeds;
-                Map<CassandraDatacenter,Set<Entity>> potentialSeeds = MutableMap.of();
-                int potentialSeedCount = 0;
-                for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
-                    Set<Entity> dcPotentialSeeds = member.gatherPotentialSeeds();
-                    potentialSeeds.put(member, dcPotentialSeeds);
-                    potentialSeedCount += dcPotentialSeeds.size();
-                }
-                
-                if (hasPublishedSeeds) {
-                    Set<Entity> currentSeeds = getAttribute(CURRENT_SEEDS);
-                    Lifecycle serviceState = getAttribute(SERVICE_STATE_ACTUAL);
-                    if (serviceState == Lifecycle.STARTING) {
-                        if (Sets.intersection(currentSeeds, ImmutableSet.copyOf(Iterables.concat(potentialSeeds.values()))).isEmpty()) {
-                            log.warn("Fabric {} lost all its seeds while starting! Subsequent failure likely, but changing seeds during startup would risk split-brain: seeds={}", new Object[] {CassandraFabricImpl.this, currentSeeds});
-                        }
-                        newseeds = currentSeeds;
-                    } else if (serviceState == Lifecycle.STOPPING || serviceState == Lifecycle.STOPPED) {
-                        if (log.isTraceEnabled()) log.trace("Fabric {} ignoring any potential seed-changes, because {}: seeds={}", new Object[] {CassandraFabricImpl.this, serviceState, currentSeeds});
-                        newseeds = currentSeeds;
-                    } else if (potentialSeedCount == 0) {
-                        // TODO Could be race where nodes have only just returned from start() and are about to 
-                        // transition to serviceUp; so don't just abandon all our seeds!
-                        log.warn("Fabric {} has no seeds (after startup); leaving seeds as-is; but risks split-brain if these seeds come back up!", new Object[] {CassandraFabricImpl.this});
-                        newseeds = currentSeeds;
-                    } else if (!allNonEmpty(potentialSeeds.values())) {
-                        log.warn("Fabric {} has datacenter with no seeds (after startup); leaving seeds as-is; but risks split-brain if these seeds come back up!", new Object[] {CassandraFabricImpl.this});
-                        newseeds = currentSeeds;
-                    } else {
-                        Set<Entity> result = selectSeeds(quorumSize, potentialSeeds);
-                        if (log.isDebugEnabled() && !Objects.equal(seeds, result)) {
-                            log.debug("Fabric {} updating seeds: chosen={}; potential={}", new Object[] {CassandraFabricImpl.this, result, potentialSeeds});
-                        }
-                        newseeds = result;
-                    }
-                } else if (potentialSeedCount < quorumSize) {
-                    if (log.isDebugEnabled()) log.debug("Not setting seeds of fabric {} yet, because still waiting for quorum (need {}; have {} potentials from {} members)", new Object[] {CassandraFabricImpl.this, quorumSize, potentialSeedCount, getMembers()});
-                    newseeds = ImmutableSet.of();
-                } else if (!allNonEmpty(potentialSeeds.values())) {
-                    if (log.isDebugEnabled()) {
-                        Map<CassandraDatacenter, Integer> datacenterCounts = Maps.transformValues(potentialSeeds, CollectionFunctionals.sizeFunction());
-                        log.debug("Not setting seeds of fabric {} yet, because not all datacenters have seeds (sizes are {})", new Object[] {CassandraFabricImpl.this, datacenterCounts});
-                    }
-                    newseeds = ImmutableSet.of();
-                } else {
-                    // yay, we're quorate
-                    Set<Entity> result = selectSeeds(quorumSize, potentialSeeds);
-                    log.info("Fabric {} has reached seed quorum: seeds={}", new Object[] {CassandraFabricImpl.this, result});
-                    newseeds = result;
-                }
-                
-                if (!Objects.equal(seeds, newseeds)) {
-                    setAttribute(CURRENT_SEEDS, newseeds);
-                    
-                    if (newseeds != null && newseeds.size() > 0) {
-                        setAttribute(HAS_PUBLISHED_SEEDS, true);
-                        
-                        // Need to tell every datacenter that seeds are ready.
-                        // Otherwise a datacenter might get no more changes (e.g. to nodes' hostnames etc), 
-                        // and not call seedSupplier.get() again.
-                        for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
-                            member.update();
-                        }
-                    }
-                    return newseeds;
-                } else {
-                    return seeds;
-                }
-            } else {
-                if (log.isTraceEnabled()) log.trace("Not refresheed seeds of fabric {}, because have quorum {} (of {} members), and none are down: seeds={}", 
-                        new Object[] {CassandraFabricImpl.class, quorumSize, getMembers().size(), seeds});
-                return seeds;
-            }
-        }
-        private boolean allNonEmpty(Collection<? extends Collection<Entity>> contenders) {
-            for (Collection<Entity> contender: contenders)
-                if (contender.isEmpty()) return false;
-            return true;
-        }
-        private Set<Entity> selectSeeds(int num, Map<CassandraDatacenter,? extends Collection<Entity>> contenders) {
-            // Prefer existing seeds wherever possible;
-            // otherwise prefer a seed from each sub-cluster;
-            // otherwise accept any other contenders
-            Set<Entity> currentSeeds = (getAttribute(CURRENT_SEEDS) != null) ? getAttribute(CURRENT_SEEDS) : ImmutableSet.<Entity>of();
-            MutableSet<Entity> result = MutableSet.of();
-            result.addAll(Sets.intersection(currentSeeds, ImmutableSet.copyOf(contenders.values())));
-            for (CassandraDatacenter cluster : contenders.keySet()) {
-                Set<Entity> contendersInCluster = Sets.newLinkedHashSet(contenders.get(cluster));
-                if (contendersInCluster.size() > 0 && Sets.intersection(result, contendersInCluster).isEmpty()) {
-                    result.add(Iterables.getFirst(contendersInCluster, null));
-                }
-            }
-            result.addAll(Iterables.concat(contenders.values()));
-            return ImmutableSet.copyOf(Iterables.limit(result, num));
-        }
-        private boolean containsDownEntity(Set<Entity> seeds) {
-            for (Entity seed : seeds) {
-                if (!isViableSeed(seed)) {
-                    return true;
-                }
-            }
-            return false;
-        }
-        public boolean isViableSeed(Entity member) {
-            // TODO remove duplication from CassandraClusterImpl.SeedTracker.isViableSeed
-            boolean managed = Entities.isManaged(member);
-            String hostname = member.getAttribute(Attributes.HOSTNAME);
-            boolean serviceUp = Boolean.TRUE.equals(member.getAttribute(Attributes.SERVICE_UP));
-            Lifecycle serviceState = member.getAttribute(Attributes.SERVICE_STATE_ACTUAL);
-            boolean hasFailed = !managed || (serviceState == Lifecycle.ON_FIRE) || (serviceState == Lifecycle.RUNNING && !serviceUp) || (serviceState == Lifecycle.STOPPED);
-            boolean result = (hostname != null && !hasFailed);
-            if (log.isTraceEnabled()) log.trace("Node {} in Fabric {}: viableSeed={}; hostname={}; serviceUp={}; serviceState={}; hasFailed={}", new Object[] {member, CassandraFabricImpl.this, result, hostname, serviceUp, serviceState, hasFailed});
-            return result;
-        }
-    };
-
-    public CassandraFabricImpl() {
-    }
-
-    @Override
-    public void init() {
-        super.init();
-
-        if (!getConfigRaw(CassandraDatacenter.SEED_SUPPLIER, true).isPresentAndNonNull())
-            setConfig(CassandraDatacenter.SEED_SUPPLIER, getSeedSupplier());
-        
-        // track members
-        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
-                .displayName("Cassandra Fabric Tracker")
-                .configure("group", this));
-
-        // Track first node's startup
-        subscribeToMembers(this, CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, new SensorEventListener<Long>() {
-            @Override
-            public void onEvent(SensorEvent<Long> event) {
-                Long oldval = getAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC);
-                Long newval = event.getValue();
-                if (oldval == null && newval != null) {
-                    setAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, newval);
-                    for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
-                        ((EntityInternal)member).setAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, newval);
-                    }
-                }
-            }
-        });
-        
-        // Track the datacenters for this cluster
-        subscribeToMembers(this, CassandraDatacenter.DATACENTER_USAGE, new SensorEventListener<Multimap<String,Entity>>() {
-            @Override
-            public void onEvent(SensorEvent<Multimap<String,Entity>> event) {
-                Multimap<String, Entity> usage = calculateDatacenterUsage();
-                setAttribute(DATACENTER_USAGE, usage);
-                setAttribute(DATACENTERS, usage.keySet());
-            }
-        });
-        subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
-            @Override public void onEvent(SensorEvent<Entity> event) {
-                Multimap<String, Entity> usage = calculateDatacenterUsage();
-                setAttribute(DATACENTER_USAGE, usage);
-                setAttribute(DATACENTERS, usage.keySet());
-            }
-        });
-    }
-
-    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override
-        protected void onEntityChange(Entity member) {
-            if (log.isDebugEnabled()) log.debug("Location {} updated in Fabric {}", member, entity);
-            ((CassandraFabricImpl)entity).update();
-        }
-        @Override
-        protected void onEntityAdded(Entity member) {
-            if (log.isDebugEnabled()) log.debug("Location {} added to Fabric {}", member, entity);
-            ((CassandraFabricImpl)entity).update();
-        }
-        @Override
-        protected void onEntityRemoved(Entity member) {
-            if (log.isDebugEnabled()) log.debug("Location {} removed from Fabric {}", member, entity);
-            ((CassandraFabricImpl)entity).update();
-        }
-    };
-
-    protected int getSeedQuorumSize() {
-        Integer quorumSize = getConfig(INITIAL_QUORUM_SIZE);
-        if (quorumSize!=null && quorumSize>0)
-            return quorumSize;
-
-        int initialSizeSum = 0;
-        for (CassandraDatacenter cluster : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
-            initialSizeSum += cluster.getConfig(CassandraDatacenter.INITIAL_SIZE);
-        }
-        if (initialSizeSum>5) initialSizeSum /= 2;
-        else if (initialSizeSum>3) initialSizeSum -= 2;
-        else if (initialSizeSum>2) initialSizeSum -= 1;
-        
-        return Math.min(Math.max(initialSizeSum, 1), CassandraFabric.DEFAULT_SEED_QUORUM);
-    }
-
-    /**
-     * Sets the default {@link #MEMBER_SPEC} to describe the Cassandra sub-clusters.
-     */
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        // Need to set the seedSupplier, even if the caller has overridden the CassandraCluster config
-        // (unless they've explicitly overridden the seedSupplier as well!)
-        // TODO probably don't need to anymore, as it is set on the Fabric here -- just make sure there is a default!
-        EntitySpec<?> custom = getConfig(MEMBER_SPEC);
-        if (custom == null) {
-            return EntitySpec.create(CassandraDatacenter.class)
-                    .configure(CassandraDatacenter.SEED_SUPPLIER, getSeedSupplier());
-        } else if (custom.getConfig().containsKey(CassandraDatacenter.SEED_SUPPLIER) || custom.getFlags().containsKey("seedSupplier")) {
-            return custom;
-        } else {
-            return EntitySpec.create(custom)
-                    .configure(CassandraDatacenter.SEED_SUPPLIER, getSeedSupplier());
-        }
-    }
-    
-    @Override
-    protected Entity createCluster(Location location, Map flags) {
-        Function<Location, String> dataCenterNamer = getConfig(DATA_CENTER_NAMER);
-        if (dataCenterNamer != null) {
-            flags = ImmutableMap.builder()
-                .putAll(flags)
-                .put(CassandraNode.DATACENTER_NAME, dataCenterNamer.apply(location))
-                .build();
-        }
-        return super.createCluster(location, flags);
-    }
-
-    /**
-     * Prefers one node per location, and then others from anywhere.
-     * Then trims result down to the "quorumSize".
-     */
-    public Supplier<Set<Entity>> getSeedSupplier() {
-        return defaultSeedSupplier;
-    }
-
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        super.start(locations);
-
-        connectSensors();
-
-        // TODO wait until all nodes which we think are up are consistent 
-        // i.e. all known nodes use the same schema, as reported by
-        // SshEffectorTasks.ssh("echo \"describe cluster;\" | /bin/cassandra-cli");
-        // once we've done that we can revert to using 2 seed nodes.
-        // see CassandraCluster.DEFAULT_SEED_QUORUM
-        Time.sleep(getConfig(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER));
-
-        update();
-    }
-
-    protected void connectSensors() {
-        connectEnrichers();
-    }
-    
-    protected void connectEnrichers() {
-        // TODO Aggregate across sub-clusters
-
-        subscribeToMembers(this, SERVICE_UP, new SensorEventListener<Boolean>() {
-            @Override public void onEvent(SensorEvent<Boolean> event) {
-                setAttribute(SERVICE_UP, calculateServiceUp());
-            }
-        });
-    }
-
-    @Override
-    public void stop() {
-        disconnectSensors();
-        
-        super.stop();
-    }
-    
-    protected void disconnectSensors() {
-    }
-
-    protected boolean calculateServiceUp() {
-        Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE));
-        return upNode.isPresent();
-    }
-
-    protected Multimap<String, Entity> calculateDatacenterUsage() {
-        Multimap<String, Entity> result = LinkedHashMultimap.<String, Entity>create();
-        for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
-            Multimap<String, Entity> memberUsage = member.getAttribute(CassandraDatacenter.DATACENTER_USAGE);
-            if (memberUsage != null) result.putAll(memberUsage);
-        }
-        return result;
-    }
-
-    @Override
-    public void update() {
-        synchronized (mutex) {
-            for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
-                member.update();
-            }
-
-            calculateServiceUp();
-
-            // Choose the first available location to set host and port (and compute one-up)
-            Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE));
-
-            if (upNode.isPresent()) {
-                setAttribute(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME));
-                setAttribute(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT));
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNode.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNode.java
deleted file mode 100644
index aa8d445..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNode.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.math.BigInteger;
-import java.util.Set;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Effector;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.BrooklynConfigKeys;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.database.DatastoreMixins;
-import brooklyn.entity.java.UsesJavaMXBeans;
-import brooklyn.entity.java.UsesJmx;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.BasicConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.location.basic.PortRanges;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.time.Duration;
-
-import com.google.common.reflect.TypeToken;
-
-/**
- * An {@link brooklyn.entity.Entity} that represents a Cassandra node in a {@link CassandraDatacenter}.
- */
-@Catalog(name="Apache Cassandra Node", description="Cassandra is a highly scalable, eventually " +
-        "consistent, distributed, structured key-value store which provides a ColumnFamily-based data model " +
-        "richer than typical key/value systems", iconUrl="classpath:///cassandra-logo.jpeg")
-@ImplementedBy(CassandraNodeImpl.class)
-public interface CassandraNode extends DatastoreMixins.DatastoreCommon, SoftwareProcess, UsesJmx, UsesJavaMXBeans, DatastoreMixins.HasDatastoreUrl, DatastoreMixins.CanExecuteScript {
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "1.2.16");
-    // when this changes remember to put a copy under releng2:/var/www/developer/brooklyn/repository/ !
-    // TODO experiment with supporting 2.0.x
-
-    @SetFromFlag("downloadUrl")
-    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "${driver.mirrorUrl}/${version}/apache-cassandra-${version}-bin.tar.gz");
-
-    /** download mirror, if desired */
-    @SetFromFlag("mirrorUrl")
-    ConfigKey<String> MIRROR_URL = new BasicConfigKey<String>(String.class, "cassandra.install.mirror.url", "URL of mirror", 
-        "http://www.mirrorservice.org/sites/ftp.apache.org/cassandra"
-        // for older versions, but slower:
-//        "http://archive.apache.org/dist/cassandra/"
-        );
-
-    @SetFromFlag("tgzUrl")
-    ConfigKey<String> TGZ_URL = new BasicConfigKey<String>(String.class, "cassandra.install.tgzUrl", "URL of TGZ download file");
-
-    @SetFromFlag("clusterName")
-    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = CassandraDatacenter.CLUSTER_NAME;
-
-    @SetFromFlag("snitchName")
-    ConfigKey<String> ENDPOINT_SNITCH_NAME = CassandraDatacenter.ENDPOINT_SNITCH_NAME;
-
-    @SetFromFlag("gossipPort")
-    PortAttributeSensorAndConfigKey GOSSIP_PORT = new PortAttributeSensorAndConfigKey("cassandra.gossip.port", "Cassandra Gossip communications port", PortRanges.fromString("7000+"));
-
-    @SetFromFlag("sslGgossipPort")
-    PortAttributeSensorAndConfigKey SSL_GOSSIP_PORT = new PortAttributeSensorAndConfigKey("cassandra.ssl-gossip.port", "Cassandra Gossip SSL communications port", PortRanges.fromString("7001+"));
-
-    @SetFromFlag("thriftPort")
-    PortAttributeSensorAndConfigKey THRIFT_PORT = new PortAttributeSensorAndConfigKey("cassandra.thrift.port", "Cassandra Thrift RPC port", PortRanges.fromString("9160+"));
-
-    @SetFromFlag("nativePort")
-    PortAttributeSensorAndConfigKey NATIVE_TRANSPORT_PORT = new PortAttributeSensorAndConfigKey("cassandra.native.port", "Cassandra Native Transport port", PortRanges.fromString("9042+"));
-
-    @SetFromFlag("rmiRegistryPort")
-    // cassandra nodetool and others want 7199 - not required, but useful
-    PortAttributeSensorAndConfigKey RMI_REGISTRY_PORT = new PortAttributeSensorAndConfigKey(UsesJmx.RMI_REGISTRY_PORT, 
-        PortRanges.fromInteger(7199));
-
-    // some of the cassandra tooing (eg nodetool) use RMI, but we want JMXMP, so do both!
-    ConfigKey<JmxAgentModes> JMX_AGENT_MODE = ConfigKeys.newConfigKeyWithDefault(UsesJmx.JMX_AGENT_MODE, JmxAgentModes.JMXMP_AND_RMI);
-    
-    @SetFromFlag("customSnitchJarUrl")
-    ConfigKey<String> CUSTOM_SNITCH_JAR_URL = ConfigKeys.newStringConfigKey("cassandra.config.customSnitchUrl", 
-            "URL for a jar file to be uploaded (e.g. \"classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar\"); defaults to null which means nothing to upload", 
-            null);
-
-    @SetFromFlag("cassandraConfigTemplateUrl")
-    ConfigKey<String> CASSANDRA_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
-            "cassandra.config.templateUrl", "A URL (in freemarker format) for a cassandra.yaml config file (in freemarker format)", 
-            "classpath://brooklyn/entity/nosql/cassandra/cassandra-${entity.majorMinorVersion}.yaml");
-
-    @SetFromFlag("cassandraConfigFileName")
-    ConfigKey<String> CASSANDRA_CONFIG_FILE_NAME = ConfigKeys.newStringConfigKey(
-            "cassandra.config.fileName", "Name for the copied config file", "cassandra.yaml");
-
-    @SetFromFlag("cassandraRackdcConfigTemplateUrl")
-    ConfigKey<String> CASSANDRA_RACKDC_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
-            "cassandra.config.rackdc.templateUrl", "Template file (in freemarker format) for the cassandra-rackdc.properties config file", 
-            "classpath://brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties");
-
-    @SetFromFlag("cassandraRackdcConfigFileName")
-    ConfigKey<String> CASSANDRA_RACKDC_CONFIG_FILE_NAME = ConfigKeys.newStringConfigKey(
-            "cassandra.config.rackdc.fileName", "Name for the copied rackdc config file (used for configuring replication, when a suitable snitch is used)", "cassandra-rackdc.properties");
-    
-    @SetFromFlag("datacenterName")
-    BasicAttributeSensorAndConfigKey<String> DATACENTER_NAME = new BasicAttributeSensorAndConfigKey<String>(
-            String.class, "cassandra.replication.datacenterName", "Datacenter name (used for configuring replication, when a suitable snitch is used)", 
-            null);
-
-    @SetFromFlag("rackName")
-    BasicAttributeSensorAndConfigKey<String> RACK_NAME = new BasicAttributeSensorAndConfigKey<String>(
-            String.class, "cassandra.replication.rackName", "Rack name (used for configuring replication, when a suitable snitch is used)", 
-            null);
-
-    ConfigKey<Integer> NUM_TOKENS_PER_NODE = ConfigKeys.newIntegerConfigKey("cassandra.numTokensPerNode",
-            "Number of tokens per node; if using vnodes, should set this to a value like 256",
-            1);
-    
-    /**
-     * @deprecated since 0.7; use {@link #TOKENS}
-     */
-    @SetFromFlag("token")
-    @Deprecated
-    BasicAttributeSensorAndConfigKey<BigInteger> TOKEN = new BasicAttributeSensorAndConfigKey<BigInteger>(
-            BigInteger.class, "cassandra.token", "Cassandra Token");
-
-    @SetFromFlag("tokens")
-    BasicAttributeSensorAndConfigKey<Set<BigInteger>> TOKENS = new BasicAttributeSensorAndConfigKey<Set<BigInteger>>(
-            new TypeToken<Set<BigInteger>>() {}, "cassandra.tokens", "Cassandra Tokens");
-
-    AttributeSensor<Integer> PEERS = Sensors.newIntegerSensor( "cassandra.peers", "Number of peers in cluster");
-
-    AttributeSensor<Integer> LIVE_NODE_COUNT = Sensors.newIntegerSensor( "cassandra.liveNodeCount", "Number of live nodes in cluster");
-
-    /* Metrics for read/write performance. */
-
-    AttributeSensor<Long> READ_PENDING = Sensors.newLongSensor("cassandra.read.pending", "Current pending ReadStage tasks");
-    AttributeSensor<Integer> READ_ACTIVE = Sensors.newIntegerSensor("cassandra.read.active", "Current active ReadStage tasks");
-    AttributeSensor<Long> READ_COMPLETED = Sensors.newLongSensor("cassandra.read.completed", "Total completed ReadStage tasks");
-    AttributeSensor<Long> WRITE_PENDING = Sensors.newLongSensor("cassandra.write.pending", "Current pending MutationStage tasks");
-    AttributeSensor<Integer> WRITE_ACTIVE = Sensors.newIntegerSensor("cassandra.write.active", "Current active MutationStage tasks");
-    AttributeSensor<Long> WRITE_COMPLETED = Sensors.newLongSensor("cassandra.write.completed", "Total completed MutationStage tasks");
-    
-    AttributeSensor<Boolean> SERVICE_UP_JMX = Sensors.newBooleanSensor("cassandra.service.jmx.up", "Whether JMX is up for this service");
-    AttributeSensor<Long> THRIFT_PORT_LATENCY = Sensors.newLongSensor("cassandra.thrift.latency", "Latency for thrift port connection (ms) or null if down");
-
-    AttributeSensor<Double> READS_PER_SECOND_LAST = Sensors.newDoubleSensor("cassandra.reads.perSec.last", "Reads/sec (last datapoint)");
-    AttributeSensor<Double> WRITES_PER_SECOND_LAST = Sensors.newDoubleSensor("cassandra.write.perSec.last", "Writes/sec (last datapoint)");
-
-    AttributeSensor<Double> THRIFT_PORT_LATENCY_IN_WINDOW = Sensors.newDoubleSensor("cassandra.thrift.latency.windowed", "Latency for thrift port (ms, averaged over time window)");
-    AttributeSensor<Double> READS_PER_SECOND_IN_WINDOW = Sensors.newDoubleSensor("cassandra.reads.perSec.windowed", "Reads/sec (over time window)");
-    AttributeSensor<Double> WRITES_PER_SECOND_IN_WINDOW = Sensors.newDoubleSensor("cassandra.writes.perSec.windowed", "Writes/sec (over time window)");
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    ConfigKey<Set<Entity>> INITIAL_SEEDS = (ConfigKey)ConfigKeys.newConfigKey(Set.class, "cassandra.cluster.seeds.initial", 
-            "List of cluster nodes to seed this node");
-
-    ConfigKey<Duration> START_TIMEOUT = ConfigKeys.newConfigKeyWithDefault(BrooklynConfigKeys.START_TIMEOUT, Duration.FIVE_MINUTES);
-    
-    ConfigKey<String> LISTEN_ADDRESS_SENSOR = ConfigKeys.newStringConfigKey("cassandra.listenAddressSensor", "sensor name from which to take the listen address; default (null) is a smart lookup");
-    ConfigKey<String> BROADCAST_ADDRESS_SENSOR = ConfigKeys.newStringConfigKey("cassandra.broadcastAddressSensor", "sensor name from which to take the broadcast address; default (null) is a smart lookup");
-    ConfigKey<String> RPC_ADDRESS_SENSOR = ConfigKeys.newStringConfigKey("cassandra.rpcAddressSensor", "sensor name from which to take the RPC address; default (null) is 0.0.0.0");
-
-    Effector<String> EXECUTE_SCRIPT = CassandraDatacenter.EXECUTE_SCRIPT;
-
-    /* Accessors used from template */
-    
-    String getMajorMinorVersion();
-    Integer getGossipPort();
-    Integer getSslGossipPort();
-    Integer getThriftPort();
-    Integer getNativeTransportPort();
-    String getClusterName();
-    String getListenAddress();
-    String getBroadcastAddress();
-    String getRpcAddress();
-    String getSeeds();
-    
-    String getPrivateIp();
-    String getPublicIp();
-    
-    /**
-     * In range 0 to (2^127)-1; or null if not yet set or known.
-     * Returns the first token if more than one token.
-     * @deprecated since 0.7; see {@link #getTokens()}
-     */
-    @Deprecated
-    BigInteger getToken();
-
-    int getNumTokensPerNode();
-
-    Set<BigInteger> getTokens();
-
-    /**
-     * string value of token (with no commas, which freemarker introduces!) or blank if none
-     * @deprecated since 0.7; use {@link #getTokensAsString()}
-     */
-    @Deprecated
-    String getTokenAsString();
-
-    /** string value of comma-separated tokens; or blank if none */
-    String getTokensAsString();
-
-    /* For configuration */
-    
-    void setToken(String token);
-    
-    /* Using Cassandra */
-    
-    String executeScript(String commands);
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java
deleted file mode 100644
index 62850bd..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import brooklyn.entity.java.JavaSoftwareProcessDriver;
-import brooklyn.util.task.system.ProcessTaskWrapper;
-
-public interface CassandraNodeDriver extends JavaSoftwareProcessDriver {
-
-    Integer getGossipPort();
-
-    Integer getSslGossipPort();
-
-    Integer getThriftPort();
-
-    Integer getNativeTransportPort();
-
-    String getClusterName();
-
-    String getCassandraConfigTemplateUrl();
-
-    String getCassandraConfigFileName();
-
-    boolean isClustered();
-
-    ProcessTaskWrapper<Integer> executeScriptAsync(String commands);
-
-    /** returns the address that the given hostname resolves to at the target */
-    String getResolvedAddress(String hostname);
-
-}


[07/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.conf b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.conf
new file mode 100644
index 0000000..125fa77
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.conf
@@ -0,0 +1,494 @@
+## Brooklyn note: file from 2.0.1 Ubuntu install, with erlang section added, and ports templated
+
+## Where to emit the default log messages (typically at 'info'
+## severity):
+## off: disabled
+## file: the file specified by log.console.file
+## console: to standard output (seen when using `riak attach-direct`)
+## both: log.console.file and standard out.
+## 
+## Default: file
+## 
+## Acceptable values:
+##   - one of: off, file, console, both
+log.console = file
+
+## The severity level of the console log, default is 'info'.
+## 
+## Default: info
+## 
+## Acceptable values:
+##   - one of: debug, info, notice, warning, error, critical, alert, emergency, none
+log.console.level = info
+
+## When 'log.console' is set to 'file' or 'both', the file where
+## console messages will be logged.
+## 
+## Default: $(platform_log_dir)/console.log
+## 
+## Acceptable values:
+##   - the path to a file
+log.console.file = $(platform_log_dir)/console.log
+
+## The file where error messages will be logged.
+## 
+## Default: $(platform_log_dir)/error.log
+## 
+## Acceptable values:
+##   - the path to a file
+log.error.file = $(platform_log_dir)/error.log
+
+## When set to 'on', enables log output to syslog.
+## 
+## Default: off
+## 
+## Acceptable values:
+##   - on or off
+log.syslog = off
+
+## Whether to enable the crash log.
+## 
+## Default: on
+## 
+## Acceptable values:
+##   - on or off
+log.crash = on
+
+## If the crash log is enabled, the file where its messages will
+## be written.
+## 
+## Default: $(platform_log_dir)/crash.log
+## 
+## Acceptable values:
+##   - the path to a file
+log.crash.file = $(platform_log_dir)/crash.log
+
+## Maximum size in bytes of individual messages in the crash log
+## 
+## Default: 64KB
+## 
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+log.crash.maximum_message_size = 64KB
+
+## Maximum size of the crash log in bytes, before it is rotated
+## 
+## Default: 10MB
+## 
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+log.crash.size = 10MB
+
+## The schedule on which to rotate the crash log.  For more
+## information see:
+## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation
+## 
+## Default: $D0
+## 
+## Acceptable values:
+##   - text
+log.crash.rotation = $D0
+
+## The number of rotated crash logs to keep. When set to
+## 'current', only the current open log file is kept.
+## 
+## Default: 5
+## 
+## Acceptable values:
+##   - an integer
+##   - the text "current"
+log.crash.rotation.keep = 5
+
+## Name of the Erlang node
+## 
+## Default: riak@127.0.0.1
+## 
+## Acceptable values:
+##   - text
+nodename = riak@${driver.subnetHostname}
+
+## Cookie for distributed node communication.  All nodes in the
+## same cluster should use the same cookie or they will not be able to
+## communicate.
+## 
+## Default: riak
+## 
+## Acceptable values:
+##   - text
+distributed_cookie = riak
+
+## Sets the number of threads in async thread pool, valid range
+## is 0-1024. If thread support is available, the default is 64.
+## More information at: http://erlang.org/doc/man/erl.html
+## 
+## Default: 64
+## 
+## Acceptable values:
+##   - an integer
+erlang.async_threads = 64
+
+## The number of concurrent ports/sockets
+## Valid range is 1024-134217727
+## 
+## Default: 65536
+## 
+## Acceptable values:
+##   - an integer
+erlang.max_ports = 65536
+
+## Set scheduler forced wakeup interval. All run queues will be
+## scanned each Interval milliseconds. While there are sleeping
+## schedulers in the system, one scheduler will be woken for each
+## non-empty run queue found. An Interval of zero disables this
+## feature, which also is the default.
+## This feature is a workaround for lengthy executing native code, and
+## native code that do not bump reductions properly.
+## More information: http://www.erlang.org/doc/man/erl.html#+sfwi
+## 
+## Acceptable values:
+##   - an integer
+## erlang.schedulers.force_wakeup_interval = 500
+
+## Enable or disable scheduler compaction of load. By default
+## scheduler compaction of load is enabled. When enabled, load
+## balancing will strive for a load distribution which causes as many
+## scheduler threads as possible to be fully loaded (i.e., not run out
+## of work). This is accomplished by migrating load (e.g. runnable
+## processes) into a smaller set of schedulers when schedulers
+## frequently run out of work. When disabled, the frequency with which
+## schedulers run out of work will not be taken into account by the
+## load balancing logic.
+## More information: http://www.erlang.org/doc/man/erl.html#+scl
+## 
+## Acceptable values:
+##   - one of: true, false
+## erlang.schedulers.compaction_of_load = false
+
+## Enable or disable scheduler utilization balancing of load. By
+## default scheduler utilization balancing is disabled and instead
+## scheduler compaction of load is enabled which will strive for a
+## load distribution which causes as many scheduler threads as
+## possible to be fully loaded (i.e., not run out of work). When
+## scheduler utilization balancing is enabled the system will instead
+## try to balance scheduler utilization between schedulers. That is,
+## strive for equal scheduler utilization on all schedulers.
+## More information: http://www.erlang.org/doc/man/erl.html#+sub
+## 
+## Acceptable values:
+##   - one of: true, false
+## erlang.schedulers.utilization_balancing = true
+
+## Number of partitions in the cluster (only valid when first
+## creating the cluster). Must be a power of 2, minimum 8 and maximum
+## 1024.
+## 
+## Default: 64
+## 
+## Acceptable values:
+##   - an integer
+## ring_size = 64
+
+## Number of concurrent node-to-node transfers allowed.
+## 
+## Default: 2
+## 
+## Acceptable values:
+##   - an integer
+## transfer_limit = 2
+
+## Default cert location for https can be overridden
+## with the ssl config variable, for example:
+## 
+## Acceptable values:
+##   - the path to a file
+## ssl.certfile = $(platform_etc_dir)/cert.pem
+
+## Default key location for https can be overridden with the ssl
+## config variable, for example:
+## 
+## Acceptable values:
+##   - the path to a file
+## ssl.keyfile = $(platform_etc_dir)/key.pem
+
+## Default signing authority location for https can be overridden
+## with the ssl config variable, for example:
+## 
+## Acceptable values:
+##   - the path to a file
+## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem
+
+## DTrace support Do not enable 'dtrace' unless your Erlang/OTP
+## runtime is compiled to support DTrace.  DTrace is available in
+## R15B01 (supported by the Erlang/OTP official source package) and in
+## R14B04 via a custom source repository & branch.
+## 
+## Default: off
+## 
+## Acceptable values:
+##   - on or off
+dtrace = off
+
+## Platform-specific installation paths (substituted by rebar)
+## 
+## Default: /usr/sbin
+## 
+## Acceptable values:
+##   - the path to a directory
+platform_bin_dir = /usr/sbin
+
+## 
+## Default: /var/lib/riak
+## 
+## Acceptable values:
+##   - the path to a directory
+platform_data_dir = /var/lib/riak
+
+## 
+## Default: /etc/riak
+## 
+## Acceptable values:
+##   - the path to a directory
+platform_etc_dir = /etc/riak
+
+## 
+## Default: /usr/lib64/riak/lib
+## 
+## Acceptable values:
+##   - the path to a directory
+platform_lib_dir = /usr/lib64/riak/lib
+
+## 
+## Default: /var/log/riak
+## 
+## Acceptable values:
+##   - the path to a directory
+platform_log_dir = /var/log/riak
+
+## Enable consensus subsystem. Set to 'on' to enable the
+## consensus subsystem used for strongly consistent Riak operations.
+## 
+## Default: off
+## 
+## Acceptable values:
+##   - on or off
+## strong_consistency = on
+
+## listener.http.<name> is an IP address and TCP port that the Riak
+## HTTP interface will bind.
+## 
+## Default: 127.0.0.1:8098
+## 
+## Acceptable values:
+##   - an IP/port pair, e.g. 127.0.0.1:10011
+listener.http.internal = 0.0.0.0:${entity.riakWebPort?c}
+
+## listener.protobuf.<name> is an IP address and TCP port that the Riak
+## Protocol Buffers interface will bind.
+## 
+## Default: 127.0.0.1:8087
+## 
+## Acceptable values:
+##   - an IP/port pair, e.g. 127.0.0.1:10011
+listener.protobuf.internal = 0.0.0.0:${entity.riakPbPort?c}
+
+## The maximum length to which the queue of pending connections
+## may grow. If set, it must be an integer > 0. If you anticipate a
+## huge number of connections being initialized *simultaneously*, set
+## this number higher.
+## 
+## Default: 128
+## 
+## Acceptable values:
+##   - an integer
+## protobuf.backlog = 128
+
+## listener.https.<name> is an IP address and TCP port that the Riak
+## HTTPS interface will bind.
+## 
+## Acceptable values:
+##   - an IP/port pair, e.g. 127.0.0.1:10011
+## listener.https.internal = 127.0.0.1:8098
+
+## How Riak will repair out-of-sync keys. Some features require
+## this to be set to 'active', including search.
+## * active: out-of-sync keys will be repaired in the background
+## * passive: out-of-sync keys are only repaired on read
+## * active-debug: like active, but outputs verbose debugging
+## information
+## 
+## Default: active
+## 
+## Acceptable values:
+##   - one of: active, passive, active-debug
+anti_entropy = active
+
+## Specifies the storage engine used for Riak's key-value data
+## and secondary indexes (if supported).
+## 
+## Default: bitcask
+## 
+## Acceptable values:
+##   - one of: bitcask, leveldb, memory, multi
+storage_backend = bitcask
+
+## Controls which binary representation of a riak value is stored
+## on disk.
+## * 0: Original erlang:term_to_binary format. Higher space overhead.
+## * 1: New format for more compact storage of small values.
+## 
+## Default: 1
+## 
+## Acceptable values:
+##   - the integer 1
+##   - the integer 0
+object.format = 1
+
+## Reading or writing objects bigger than this size will write a
+## warning in the logs.
+## 
+## Default: 5MB
+## 
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+object.size.warning_threshold = 5MB
+
+## Writing an object bigger than this will send a failure to the
+## client.
+## 
+## Default: 50MB
+## 
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+object.size.maximum = 50MB
+
+## Writing an object with more than this number of siblings will
+## generate a warning in the logs.
+## 
+## Default: 25
+## 
+## Acceptable values:
+##   - an integer
+object.siblings.warning_threshold = 25
+
+## Writing an object with more than this number of siblings will
+## send a failure to the client.
+## 
+## Default: 100
+## 
+## Acceptable values:
+##   - an integer
+object.siblings.maximum = 100
+
+## A path under which bitcask data files will be stored.
+## 
+## Default: $(platform_data_dir)/bitcask
+## 
+## Acceptable values:
+##   - the path to a directory
+bitcask.data_root = $(platform_data_dir)/bitcask
+
+## Configure how Bitcask writes data to disk.
+## erlang: Erlang's built-in file API
+## nif: Direct calls to the POSIX C API
+## The NIF mode provides higher throughput for certain
+## workloads, but has the potential to negatively impact
+## the Erlang VM, leading to higher worst-case latencies
+## and possible throughput collapse.
+## 
+## Default: erlang
+## 
+## Acceptable values:
+##   - one of: erlang, nif
+bitcask.io_mode = erlang
+
+## Set to 'off' to disable the admin panel.
+## 
+## Default: off
+## 
+## Acceptable values:
+##   - on or off
+riak_control = on
+
+## Authentication mode used for access to the admin panel.
+## 
+## Default: off
+## 
+## Acceptable values:
+##   - one of: off, userlist
+riak_control.auth.mode = off
+
+## If riak control's authentication mode (riak_control.auth.mode)
+## is set to 'userlist' then this is the list of usernames and
+## passwords for access to the admin panel.
+## To create users with given names, add entries of the format:
+## riak_control.auth.user.USERNAME.password = PASSWORD
+## replacing USERNAME with the desired username and PASSWORD with the
+## desired password for that user.
+## 
+## Acceptable values:
+##   - text
+## riak_control.auth.user.admin.password = pass
+
+## This parameter defines the percentage of total server memory
+## to assign to LevelDB. LevelDB will dynamically adjust its internal
+## cache sizes to stay within this size.  The memory size can
+## alternately be assigned as a byte count via leveldb.maximum_memory
+## instead.
+## 
+## Default: 70
+## 
+## Acceptable values:
+##   - an integer
+leveldb.maximum_memory.percent = 70
+
+## To enable Search set this 'on'.
+## 
+## Default: off
+## 
+## Acceptable values:
+##   - on or off
+search = ${entity.isSearchEnabled()?string('on','off')}
+
+## How long Riak will wait for Solr to start. The start sequence
+## will be tried twice. If both attempts timeout, then the Riak node
+## will be shutdown. This may need to be increased as more data is
+## indexed and Solr takes longer to start. Values lower than 1s will
+## be rounded up to the minimum 1s.
+## 
+## Default: 30s
+## 
+## Acceptable values:
+##   - a time duration with units, e.g. '10s' for 10 seconds
+search.solr.start_timeout = 30s
+
+## The port number which Solr binds to.
+## NOTE: Binds on every interface.
+## 
+## Default: 8093
+## 
+## Acceptable values:
+##   - an integer
+search.solr.port = ${entity.searchSolrPort?c}
+
+## The port number which Solr JMX binds to.
+## NOTE: Binds on every interface.
+## 
+## Default: 8985
+## 
+## Acceptable values:
+##   - an integer
+search.solr.jmx_port = ${entity.searchSolrJmxPort?c}
+
+## The options to pass to the Solr JVM.  Non-standard options,
+## i.e. -XX, may not be portable across JVM implementations.
+## E.g. -XX:+UseCompressedStrings
+## 
+## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
+## 
+## Acceptable values:
+##   - text
+search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
+
+## erlang, constrain port range so we can open the internal firewall ports
+erlang.distribution.port_range.minimum = ${entity.erlangPortRangeStart?c}
+erlang.distribution.port_range.maximum = ${entity.erlangPortRangeEnd?c}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.md
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.md b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.md
new file mode 100644
index 0000000..1523b5f
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.md
@@ -0,0 +1,67 @@
+# Riak Examples
+
+Here is a selection of examples showing how to deploy Riak.
+
+
+### A Single-Node Deployment
+
+```
+location: YOUR_CLOUD
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakNode
+```
+
+
+### A Single-Node Deployment
+
+```
+location: YOUR_CLOUD
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakNode
+```
+
+
+### A Cluster
+
+```
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakCluster
+  location: YOUR_CLOUD
+  initialSize: 5
+```
+
+
+### A Cluster at a Specific Version with a Web App
+
+```
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakCluster
+  id: cluster
+  brooklyn.config:
+    initialSize: 2
+    install.version: 2.0.0
+- type: brooklyn.entity.webapp.ControlledDynamicWebAppCluster
+  brooklyn.config:
+    initialSize: 2
+    wars.root: https://s3-eu-west-1.amazonaws.com/brooklyn-clocker/brooklyn-example-hello-world-sql-webapp.war
+    java.sysprops: 
+      brooklyn.example.riak.nodes: $brooklyn:component("cluster").attributeWhenReady("riak.cluster.nodeList")
+```
+
+----
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.png
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.png b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.png
new file mode 100644
index 0000000..a230b04
Binary files /dev/null and b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.png differ

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/vm.args
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/vm.args b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/vm.args
new file mode 100644
index 0000000..be58d78
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/vm.args
@@ -0,0 +1,64 @@
+##### Brooklyn note: File from OSX distribution of Riak 1.4.8
+
+## Name of the riak node
+-name riak@${driver.subnetHostname}
+
+## Cookie for distributed erlang.  All nodes in the same cluster
+## should use the same cookie or they will not be able to communicate.
+-setcookie riak
+
+## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive
+## (Disabled by default..use with caution!)
+##-heart
+
+## Enable kernel poll and a few async threads
++K true
++A 64
+
+## Treat error_logger warnings as warnings
++W w
+
+## Increase number of concurrent ports/sockets
+-env ERL_MAX_PORTS 64000
+
+## Tweak GC to run more often 
+-env ERL_FULLSWEEP_AFTER 0
+
+## Set the location of crash dumps
+-env ERL_CRASH_DUMP ./log/erl_crash.dump
+
+## Raise the ETS table limit
+-env ERL_MAX_ETS_TABLES 256000
+
+## Force the erlang VM to use SMP
+-smp enable
+
+## For nodes with many busy_dist_port events, Basho recommends
+## raising the sender-side network distribution buffer size.
+## 32MB may not be sufficient for some workloads and is a suggested
+## starting point.
+## The Erlang/OTP default is 1024 (1 megabyte).
+## See: http://www.erlang.org/doc/man/erl.html#%2bzdbbl
+##+zdbbl 32768
+
+## Raise the default erlang process limit 
++P 256000
+
+## Erlang VM scheduler tuning.
+## Prerequisite: a patched VM from Basho, or a VM compiled separately
+## with this patch applied:
+##     https://gist.github.com/evanmcc/a599f4c6374338ed672e
+##+sfwi 500
+
+## Begin SSL distribution items, DO NOT DELETE OR EDIT THIS COMMENT
+
+## To enable SSL encryption of the Erlang intra-cluster communication,
+## un-comment the three lines below and make certain that the paths
+## point to correct PEM data files.  See docs TODO for details.
+
+## -proto_dist inet_ssl
+## -ssl_dist_opt client_certfile "${driver.riakEtcDir}/erlclient.pem"
+## -ssl_dist_opt server_certfile "${driver.riakEtcDir}/erlserver.pem"
+
+## End SSL distribution items, DO NOT DELETE OR EDIT THIS COMMENT
+

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/solr/solr.xml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/solr/solr.xml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/solr/solr.xml
new file mode 100644
index 0000000..6e12b5c
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/solr/solr.xml
@@ -0,0 +1,19 @@
+[#ftl]
+<?xml version="1.0" encoding="UTF-8"?>
+<solr>
+  <int name="coreLoadThreads">4</int>
+
+  <solrcloud>
+    <str name="host">${driver.hostname}</str>
+    <int name="hostPort">${entity.solrPort?c}</int>
+    <str name="hostContext">solr</str>
+    <int name="zkClientTimeout">15000</int>
+    <bool name="genericCoreNodeNames">true</bool>
+  </solrcloud>
+
+  <shardHandlerFactory name="shardHandlerFactory"
+    class="HttpShardHandlerFactory">
+    <int name="socketTimeout">0</int>
+    <int name="connTimeout">0</int>
+  </shardHandlerFactory>
+</solr>

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java
deleted file mode 100644
index 4cc01c1..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import org.testng.annotations.BeforeMethod;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.location.Location;
-
-/**
- * Cassandra test framework for integration and live tests.
- */
-public class AbstractCassandraNodeTest extends BrooklynAppLiveTestSupport {
-
-    protected Location testLocation;
-    protected CassandraNode cassandra;
-
-    @BeforeMethod(alwaysRun = true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        testLocation = app.newLocalhostProvisioningLocation();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java
deleted file mode 100644
index 5395705..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertNull;
-import static org.testng.Assert.assertTrue;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.Assert;
-
-import brooklyn.entity.basic.Attributes;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.text.Identifiers;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.netflix.astyanax.AstyanaxContext;
-import com.netflix.astyanax.Cluster;
-import com.netflix.astyanax.Keyspace;
-import com.netflix.astyanax.MutationBatch;
-import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
-import com.netflix.astyanax.connectionpool.OperationResult;
-import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
-import com.netflix.astyanax.connectionpool.exceptions.SchemaDisagreementException;
-import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
-import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
-import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
-import com.netflix.astyanax.model.Column;
-import com.netflix.astyanax.model.ColumnFamily;
-import com.netflix.astyanax.model.ColumnList;
-import com.netflix.astyanax.serializers.StringSerializer;
-import com.netflix.astyanax.thrift.ThriftFamilyFactory;
-
-/**
- * Cassandra testing using Astyanax API.
- */
-public class AstyanaxSupport {
-    private static final Logger log = LoggerFactory.getLogger(AstyanaxSupport.class);
-
-    public final String clusterName;
-    public final String hostname;
-    public final int thriftPort;
-    
-    public AstyanaxSupport(CassandraNode node) {
-        this(node.getClusterName(), node.getAttribute(Attributes.HOSTNAME), node.getThriftPort());
-    }
-    
-    public AstyanaxSupport(String clusterName, String hostname, int thriftPort) {
-        this.clusterName = clusterName;
-        this.hostname = hostname;
-        this.thriftPort = thriftPort;
-    }
-    
-    public AstyanaxContext<Keyspace> newAstyanaxContextForKeyspace(String keyspace) {
-        AstyanaxContext<Keyspace> context = new AstyanaxContext.Builder()
-                .forCluster(clusterName)
-                .forKeyspace(keyspace)
-                .withAstyanaxConfiguration(new AstyanaxConfigurationImpl()
-                        .setDiscoveryType(NodeDiscoveryType.NONE))
-                .withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("BrooklynPool")
-                        .setPort(thriftPort)
-                        .setMaxConnsPerHost(1)
-                        .setConnectTimeout(5000) // 10s
-                        .setSeeds(String.format("%s:%d", hostname, thriftPort)))
-                .withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
-                .buildKeyspace(ThriftFamilyFactory.getInstance());
-
-        context.start();
-        return context;
-    }
-    
-    public AstyanaxContext<Cluster> newAstyanaxContextForCluster() {
-        AstyanaxContext<Cluster> context = new AstyanaxContext.Builder()
-                .forCluster(clusterName)
-                .withAstyanaxConfiguration(new AstyanaxConfigurationImpl()
-                        .setDiscoveryType(NodeDiscoveryType.NONE))
-                .withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("BrooklynPool")
-                        .setPort(thriftPort)
-                        .setMaxConnsPerHost(1)
-                        .setConnectTimeout(5000) // 10s
-                        .setSeeds(String.format("%s:%d", hostname, thriftPort)))
-                .withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
-                .buildCluster(ThriftFamilyFactory.getInstance());
-
-        context.start();
-        return context;
-    }
-    
-    public static class AstyanaxSample extends AstyanaxSupport {
-        
-        public static class Builder {
-            protected CassandraNode node;
-            protected String clusterName;
-            protected String hostname;
-            protected Integer thriftPort;
-            protected String columnFamilyName = Identifiers.makeRandomId(8);
-            
-            public Builder node(CassandraNode val) {
-                this.node = val;
-                clusterName = node.getClusterName();
-                hostname = node.getAttribute(Attributes.HOSTNAME);
-                thriftPort = node.getThriftPort();
-                return this;
-            }
-            public Builder host(String clusterName, String hostname, int thriftPort) {
-                this.clusterName = clusterName;
-                this.hostname = hostname;
-                this.thriftPort = thriftPort;
-                return this;
-            }
-            public Builder columnFamilyName(String val) {
-                this.columnFamilyName = val;
-                return this;
-            }
-            public AstyanaxSample build() {
-                return new AstyanaxSample(this);
-            }
-        }
-        
-        public static Builder builder() {
-            return new Builder();
-        }
-        
-        public final String columnFamilyName;
-        public final ColumnFamily<String, String> sampleColumnFamily;
-
-        public AstyanaxSample(CassandraNode node) {
-            this(builder().node(node));
-        }
-
-        public AstyanaxSample(String clusterName, String hostname, int thriftPort) {
-            this(builder().host(clusterName, hostname, thriftPort));
-        }
-
-        protected AstyanaxSample(Builder builder) {
-            super(builder.clusterName, builder.hostname, builder.thriftPort);
-            columnFamilyName = checkNotNull(builder.columnFamilyName, "columnFamilyName");
-            sampleColumnFamily = new ColumnFamily<String, String>(
-                    columnFamilyName, // Column Family Name
-                    StringSerializer.get(), // Key Serializer
-                    StringSerializer.get()); // Column Serializer
-        }
-
-        /**
-         * Exercise the {@link CassandraNode} using the Astyanax API.
-         */
-        public void astyanaxTest() throws Exception {
-            String keyspaceName = "BrooklynTests_"+Identifiers.makeRandomId(8);
-            writeData(keyspaceName);
-            readData(keyspaceName);
-        }
-
-        /**
-         * Write to a {@link CassandraNode} using the Astyanax API.
-         * @throws ConnectionException 
-         */
-        public void writeData(String keyspaceName) throws ConnectionException {
-            // Create context
-            AstyanaxContext<Keyspace> context = newAstyanaxContextForKeyspace(keyspaceName);
-            try {
-                Keyspace keyspace = context.getEntity();
-                try {
-                    checkNull(keyspace.describeKeyspace().getColumnFamily(columnFamilyName), "key space for column family "+columnFamilyName);
-                } catch (Exception ek) {
-                    // (Re) Create keyspace if needed (including if family name already existed, 
-                    // e.g. due to a timeout on previous attempt)
-                    log.debug("repairing Cassandra error by re-creating keyspace "+keyspace+": "+ek);
-                    try {
-                        log.debug("dropping Cassandra keyspace "+keyspace);
-                        keyspace.dropKeyspace();
-                    } catch (Exception e) {
-                        /* Ignore */ 
-                        log.debug("Cassandra keyspace "+keyspace+" could not be dropped (probably did not exist): "+e);
-                    }
-                    try {
-                        keyspace.createKeyspace(ImmutableMap.<String, Object>builder()
-                                .put("strategy_options", ImmutableMap.<String, Object>of("replication_factor", "1"))
-                                .put("strategy_class", "SimpleStrategy")
-                                .build());
-                    } catch (SchemaDisagreementException e) {
-                        // discussion (but not terribly helpful) at http://stackoverflow.com/questions/6770894/schemadisagreementexception
-                        // let's just try again after a delay
-                        // (seems to have no effect; trying to fix by starting first node before others)
-                        log.warn("error creating Cassandra keyspace "+keyspace+" (retrying): "+e);
-                        Time.sleep(Duration.FIVE_SECONDS);
-                        keyspace.createKeyspace(ImmutableMap.<String, Object>builder()
-                                .put("strategy_options", ImmutableMap.<String, Object>of("replication_factor", "1"))
-                                .put("strategy_class", "SimpleStrategy")
-                                .build());
-                    }
-                }
-                
-                assertNull(keyspace.describeKeyspace().getColumnFamily("Rabbits"), "key space for arbitrary column family Rabbits");
-                assertNull(keyspace.describeKeyspace().getColumnFamily(columnFamilyName), "key space for column family "+columnFamilyName);
-
-                // Create column family
-                keyspace.createColumnFamily(sampleColumnFamily, null);
-
-                // Insert rows
-                MutationBatch m = keyspace.prepareMutationBatch();
-                m.withRow(sampleColumnFamily, "one")
-                        .putColumn("name", "Alice", null)
-                        .putColumn("company", "Cloudsoft Corp", null);
-                m.withRow(sampleColumnFamily, "two")
-                        .putColumn("name", "Bob", null)
-                        .putColumn("company", "Cloudsoft Corp", null)
-                        .putColumn("pet", "Cat", null);
-
-                OperationResult<Void> insert = m.execute();
-                assertEquals(insert.getHost().getHostName(), hostname);
-                assertTrue(insert.getLatency() > 0L);
-            } finally {
-                context.shutdown();
-            }
-        }
-
-        /**
-         * Read from a {@link CassandraNode} using the Astyanax API.
-         * @throws ConnectionException 
-         */
-        public void readData(String keyspaceName) throws ConnectionException {
-            // Create context
-            AstyanaxContext<Keyspace> context = newAstyanaxContextForKeyspace(keyspaceName);
-            try {
-                Keyspace keyspace = context.getEntity();
-
-                // Query data
-                OperationResult<ColumnList<String>> query = keyspace.prepareQuery(sampleColumnFamily)
-                        .getKey("one")
-                        .execute();
-                assertEquals(query.getHost().getHostName(), hostname);
-                assertTrue(query.getLatency() > 0L);
-
-                ColumnList<String> columns = query.getResult();
-                assertEquals(columns.size(), 2);
-
-                // Lookup columns in response by name
-                String name = columns.getColumnByName("name").getStringValue();
-                assertEquals(name, "Alice");
-
-                // Iterate through the columns
-                for (Column<String> c : columns) {
-                    assertTrue(ImmutableList.of("name", "company").contains(c.getName()));
-                }
-            } finally {
-                context.shutdown();
-            }
-        }
-        
-
-        /**
-         * Returns the keyspace name to which the data has been written. If it fails the first time,
-         * then will increment the keyspace name. This is because the failure could be a response timeout,
-         * where the keyspace really has been created so subsequent attempts with the same name will 
-         * fail (because we assert that the keyspace did not exist).
-         */
-        public String writeData(String keyspacePrefix, int numRetries) throws ConnectionException {
-            int retryCount = 0;
-            while (true) {
-                try {
-                    String keyspaceName = keyspacePrefix + (retryCount > 0 ? "" : "_"+retryCount);
-                    writeData(keyspaceName);
-                    return keyspaceName;
-                } catch (Exception e) {
-                    log.warn("Error writing data - attempt "+(retryCount+1)+" of "+(numRetries+1)+": "+e, e);
-                    if (++retryCount > numRetries)
-                        throw Exceptions.propagate(e);
-                }
-            }
-        }
-
-        /**
-         * Repeatedly tries to read data from the given keyspace name. Asserts that the data is the
-         * same as would be written by calling {@code writeData(keyspaceName)}.
-         */
-        public void readData(String keyspaceName, int numRetries) throws ConnectionException {
-            int retryCount = 0;
-            while (true) {
-                try {
-                    readData(keyspaceName);
-                    return;
-                } catch (Exception e) {
-                    log.warn("Error reading data - attempt "+(retryCount+1)+" of "+(numRetries+1)+": "+e, e);
-                    if (++retryCount > numRetries)
-                        throw Exceptions.propagate(e);
-                }
-            }
-        }
-
-        /**
-         * Like {@link Assert#assertNull(Object, String)}, except throws IllegalStateException instead
-         */
-        private void checkNull(Object obj, String msg) {
-            if (obj != null) {
-                throw new IllegalStateException("Not null: "+msg+"; obj="+obj);
-            }
-        }
-    }
-
-    public static void main(String[] args) throws Exception {
-        AstyanaxSample support = new AstyanaxSample("ignored", "ec2-79-125-32-2.eu-west-1.compute.amazonaws.com", 9160);
-        AstyanaxContext<Cluster> context = support.newAstyanaxContextForCluster();
-        try {
-            System.out.println(context.getEntity().describeSchemaVersions());
-        } finally {
-            context.shutdown();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java
deleted file mode 100644
index b74d5cd..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-import java.math.BigInteger;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.nosql.cassandra.TokenGenerators.PosNeg63TokenGenerator;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.test.Asserts;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-/**
- * An integration test of the {@link CassandraDatacenter} entity.
- *
- * Tests that a one node cluster can be started on localhost and data can be written/read, using the Astyanax API.
- * 
- * NOTE: If these tests fail with "Timeout waiting for SERVICE_UP" and "java.lang.IllegalStateException: Unable to contact any seeds!" 
- * or "java.lang.RuntimeException: Unable to gossip with any seeds" appears in the log, it may be that the broadcast_address 
- * (set to InetAddress.getLocalHost().getHostName()) is not resolving to the value specified in listen_address 
- * (InetAddress.getLocalHost().getHostAddress()). You can work round this issue by ensuring that you machine has only one 
- * address, e.g. by disabling wireless if you are also using a wired connection
- */
-public class CassandraDatacenterIntegrationTest extends BrooklynAppLiveTestSupport {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterIntegrationTest.class);
-
-    protected Location testLocation;
-    protected CassandraDatacenter cluster;
-
-    @BeforeMethod(alwaysRun = true)
-    @Override
-    public void setUp() throws Exception {
-        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
-        super.setUp();
-        testLocation = app.newLocalhostProvisioningLocation();
-    }
-
-    @AfterMethod(alwaysRun=true)
-    @Override
-    public void tearDown() throws Exception {
-        super.tearDown();
-        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
-    }
-    
-
-    @Test(groups = "Integration")
-    public void testStartAndShutdownClusterSizeOne() throws Exception {
-        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
-                .configure("initialSize", 1)
-                .configure("tokenShift", 42);
-        runStartAndShutdownClusterSizeOne(spec, true);
-    }
-    
-    /**
-     * Cassandra v2 needs Java >= 1.7. If you have java 6 as the defult locally, then you can use
-     * something like {@code .configure("shell.env", MutableMap.of("JAVA_HOME", "/Library/Java/JavaVirtualMachines/jdk1.7.0_51.jdk/Contents/Home"))}
-     */
-    @Test(groups = "Integration")
-    public void testStartAndShutdownClusterSizeOneCassandraVersion2() throws Exception {
-        String version = "2.0.9";
-        
-        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
-                .configure(CassandraNode.SUGGESTED_VERSION, version)
-                .configure("initialSize", 1);
-        runStartAndShutdownClusterSizeOne(spec, false);
-    }
-    
-    /**
-     * Test that a single node cluster starts up and allows access via the Astyanax API.
-     * Only one node because Cassandra can only run one node per VM!
-     */
-    protected void runStartAndShutdownClusterSizeOne(EntitySpec<CassandraDatacenter> datacenterSpec, final boolean assertToken) throws Exception {
-        cluster = app.createAndManageChild(datacenterSpec);
-        assertEquals(cluster.getCurrentSize().intValue(), 0);
-
-        app.start(ImmutableList.of(testLocation));
-        Entities.dumpInfo(app);
-        
-        final CassandraNode node = (CassandraNode) Iterables.get(cluster.getMembers(), 0);
-        String nodeAddr = checkNotNull(node.getAttribute(CassandraNode.HOSTNAME), "hostname") + ":" + checkNotNull(node.getAttribute(CassandraNode.THRIFT_PORT), "thriftPort");
-        
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.GROUP_SIZE, 1);
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CASSANDRA_CLUSTER_NODES, ImmutableList.of(nodeAddr));
-
-        EntityTestUtils.assertAttributeEqualsEventually(node, Startable.SERVICE_UP, true);
-        if (assertToken) {
-            PosNeg63TokenGenerator tg = new PosNeg63TokenGenerator();
-            tg.growingCluster(1);
-            EntityTestUtils.assertAttributeEqualsEventually(node, CassandraNode.TOKEN, tg.newToken().add(BigInteger.valueOf(42)));
-        }
-
-        // may take some time to be consistent (with new thrift_latency checks on the node,
-        // contactability should not be an issue, but consistency still might be)
-        Asserts.succeedsEventually(MutableMap.of("timeout", 120*1000), new Runnable() {
-            public void run() {
-                boolean open = CassandraDatacenterLiveTest.isSocketOpen(node);
-                Boolean consistant = open ? CassandraDatacenterLiveTest.areVersionsConsistent(node) : null;
-                Integer numPeers = node.getAttribute(CassandraNode.PEERS);
-                Integer liveNodeCount = node.getAttribute(CassandraNode.LIVE_NODE_COUNT);
-                String msg = "consistency:  "
-                        + (!open ? "unreachable" : consistant==null ? "error" : consistant)+"; "
-                        + "peer group sizes: "+numPeers + "; live node count: " + liveNodeCount;
-                assertTrue(open, msg);
-                assertEquals(consistant, Boolean.TRUE, msg);
-                if (assertToken) {
-                    assertEquals(numPeers, (Integer)1, msg);
-                } else {
-                    assertTrue(numPeers != null && numPeers >= 1, msg);
-                }
-                assertEquals(liveNodeCount, (Integer)1, msg);
-            }});
-        
-        CassandraDatacenterLiveTest.checkConnectionRepeatedly(2, 5, ImmutableList.of(node));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java
deleted file mode 100644
index 809a738..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertNotNull;
-import static org.testng.Assert.assertTrue;
-
-import java.math.BigInteger;
-import java.net.Socket;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.test.Asserts;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.text.Identifiers;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.netflix.astyanax.AstyanaxContext;
-import com.netflix.astyanax.Cluster;
-import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
-
-/**
- * A live test of the {@link CassandraDatacenter} entity.
- *
- * Tests that a two node cluster can be started on Amazon EC2 and data written on one {@link CassandraNode}
- * can be read from another, using the Astyanax API.
- */
-public class CassandraDatacenterLiveTest extends BrooklynAppLiveTestSupport {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterLiveTest.class);
-    
-    private String provider = 
-            "aws-ec2:eu-west-1";
-//            "rackspace-cloudservers-uk";
-//            "named:hpcloud-compute-at";
-//            "localhost";
-//            "jcloudsByon:(provider=\"aws-ec2\",region=\"us-east-1\",user=\"aled\",hosts=\"i-6f374743,i-35324219,i-1135453d\")";
-
-    protected Location testLocation;
-    protected CassandraDatacenter cluster;
-
-    @BeforeMethod(alwaysRun = true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        testLocation = mgmt.getLocationRegistry().resolve(provider);
-    }
-
-    @AfterMethod(alwaysRun=true)
-    @Override
-    public void tearDown() throws Exception {
-        super.tearDown();
-    }
-    
-    @Test(groups = "Live")
-    public void testDatacenter() throws Exception {
-        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
-                .configure("initialSize", 2)
-                .configure("clusterName", "CassandraClusterLiveTest");
-        runCluster(spec, false);
-    }
-    
-    @Test(groups = "Live")
-    public void testDatacenterWithVnodes() throws Exception {
-        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
-                .configure("initialSize", 2)
-                .configure(CassandraDatacenter.USE_VNODES, true)
-                .configure("clusterName", "CassandraClusterLiveTest");
-        runCluster(spec, true);
-    }
-    
-    /*
-     * TODO on some distros (e.g. CentOS?), it comes pre-installed with java 6. Installing java 7 
-     * didn't seem to be enough. I also had to set JAVA_HOME:
-     *     .configure("shell.env", MutableMap.of("JAVA_HOME", "/etc/alternatives/java_sdk_1.7.0"))
-     * However, that would break other deployments such as on Ubuntu where JAVA_HOME would be different.
-     */
-    @Test(groups = "Live")
-    public void testDatacenterWithVnodesVersion2() throws Exception {
-        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
-                .configure("initialSize", 2)
-                .configure(CassandraNode.SUGGESTED_VERSION, "2.0.9")
-                .configure(CassandraDatacenter.USE_VNODES, true)
-                .configure("clusterName", "CassandraClusterLiveTest");
-        runCluster(spec, true);
-    }
-
-    @Test(groups = {"Live", "Acceptance"}, invocationCount=10)
-    public void testManyTimes() throws Exception {
-        testDatacenter();
-    }
-
-    /**
-     * Test a Cassandra Datacenter:
-     * <ol>
-     *   <li>Create two node datacenter
-     *   <li>Confirm allows access via the Astyanax API through both nodes.
-     *   <li>Confirm can size
-     * </ol>
-     */
-    protected void runCluster(EntitySpec<CassandraDatacenter> datacenterSpec, boolean usesVnodes) throws Exception {
-        cluster = app.createAndManageChild(datacenterSpec);
-        assertEquals(cluster.getCurrentSize().intValue(), 0);
-
-        app.start(ImmutableList.of(testLocation));
-
-        // Check cluster is up and healthy
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.GROUP_SIZE, 2);
-        Entities.dumpInfo(app);
-        List<CassandraNode> members = castToCassandraNodes(cluster.getMembers());
-        assertNodesConsistent(members);
-
-        if (usesVnodes) {
-            assertVnodeTokensConsistent(members);
-        } else {
-            assertSingleTokenConsistent(members);
-        }
-        
-        // Can connect via Astyanax
-        checkConnectionRepeatedly(2, 5, members);
-
-        // Resize
-        cluster.resize(3);
-        assertEquals(cluster.getMembers().size(), 3, "members="+cluster.getMembers());
-        if (usesVnodes) {
-            assertVnodeTokensConsistent(castToCassandraNodes(cluster.getMembers()));
-        } else {
-            assertSingleTokenConsistent(castToCassandraNodes(cluster.getMembers()));
-        }
-        checkConnectionRepeatedly(2, 5, cluster.getMembers());
-    }
-
-    protected static List<CassandraNode> castToCassandraNodes(Collection<? extends Entity> rawnodes) {
-        final List<CassandraNode> nodes = Lists.newArrayList();
-        for (Entity node : rawnodes) {
-            nodes.add((CassandraNode) node);
-        }
-        return nodes;
-    }
-
-    protected static void assertNodesConsistent(final List<CassandraNode> nodes) {
-        final Integer expectedLiveNodeCount = nodes.size();
-        // may take some time to be consistent (with new thrift_latency checks on the node,
-        // contactability should not be an issue, but consistency still might be)
-        Asserts.succeedsEventually(MutableMap.of("timeout", Duration.TWO_MINUTES), new Runnable() {
-            public void run() {
-                for (Entity n : nodes) {
-                    CassandraNode node = (CassandraNode) n;
-                    EntityTestUtils.assertAttributeEquals(node, Startable.SERVICE_UP, true);
-                    String errmsg = "node="+node+"; hostname="+node.getAttribute(Attributes.HOSTNAME)+"; port="+node.getThriftPort();
-                    assertTrue(isSocketOpen(node), errmsg);
-                    assertTrue(areVersionsConsistent(node), errmsg);
-                    EntityTestUtils.assertAttributeEquals(node, CassandraNode.LIVE_NODE_COUNT, expectedLiveNodeCount);
-                }
-            }});
-    }
-    
-    protected static void assertSingleTokenConsistent(final List<CassandraNode> nodes) {
-        final int numNodes = nodes.size();
-        Asserts.succeedsEventually(MutableMap.of("timeout", Duration.TWO_MINUTES), new Runnable() {
-            public void run() {
-                Set<BigInteger> alltokens = Sets.newLinkedHashSet();
-                for (Entity node : nodes) {
-                    EntityTestUtils.assertAttributeEquals(node, Startable.SERVICE_UP, true);
-                    EntityTestUtils.assertConfigEquals(node, CassandraNode.NUM_TOKENS_PER_NODE, 1);
-                    EntityTestUtils.assertAttributeEquals(node, CassandraNode.PEERS, numNodes);
-                    BigInteger token = node.getAttribute(CassandraNode.TOKEN);
-                    Set<BigInteger> tokens = node.getAttribute(CassandraNode.TOKENS);
-                    assertNotNull(token);
-                    assertEquals(tokens, ImmutableSet.of(token));
-                    alltokens.addAll(tokens);
-                }
-                assertEquals(alltokens.size(), numNodes);
-            }});
-    }
-
-    protected static void assertVnodeTokensConsistent(final List<CassandraNode> nodes) {
-        final int numNodes = nodes.size();
-        final int tokensPerNode = Iterables.get(nodes, 0).getNumTokensPerNode();
-        
-        Asserts.succeedsEventually(MutableMap.of("timeout", Duration.TWO_MINUTES), new Runnable() {
-            public void run() {
-                Set<BigInteger> alltokens = Sets.newLinkedHashSet();
-                for (Entity node : nodes) {
-                    EntityTestUtils.assertAttributeEquals(node, Startable.SERVICE_UP, true);
-                    EntityTestUtils.assertAttributeEquals(node, CassandraNode.PEERS, tokensPerNode*numNodes);
-                    EntityTestUtils.assertConfigEquals(node, CassandraNode.NUM_TOKENS_PER_NODE, 256);
-                    BigInteger token = node.getAttribute(CassandraNode.TOKEN);
-                    Set<BigInteger> tokens = node.getAttribute(CassandraNode.TOKENS);
-                    assertNotNull(token);
-                    assertEquals(tokens.size(), tokensPerNode, "tokens="+tokens);
-                    alltokens.addAll(tokens);
-                }
-                assertEquals(alltokens.size(), tokensPerNode*numNodes);
-            }});
-    }
-
-    protected static void checkConnectionRepeatedly(int totalAttemptsAllowed, int numRetriesPerAttempt, Iterable<? extends Entity> nodes) throws Exception {
-        int attemptNum = 0;
-        while (true) {
-            try {
-                checkConnection(numRetriesPerAttempt, nodes);
-                return;
-            } catch (Exception e) {
-                attemptNum++;
-                if (attemptNum >= totalAttemptsAllowed) {
-                    log.warn("Cassandra not usable, "+attemptNum+" attempts; failing: "+e, e);
-                    throw e;                
-                }
-                log.warn("Cassandra not usable (attempt "+attemptNum+" of "+totalAttemptsAllowed+"), trying again after delay: "+e, e);
-                Time.sleep(Duration.TEN_SECONDS);
-            }
-        }
-    }
-
-    protected static void checkConnection(int numRetries, Iterable<? extends Entity> nodes) throws ConnectionException {
-        CassandraNode first = (CassandraNode) Iterables.get(nodes, 0);
-        
-        // have been seeing intermittent SchemaDisagreementException errors on AWS, probably due to Astyanax / how we are using it
-        // (confirmed that clocks are in sync)
-        String uniqueName = Identifiers.makeRandomId(8);
-        AstyanaxSample astyanaxFirst = AstyanaxSample.builder().node(first).columnFamilyName(uniqueName).build();
-        Map<String, List<String>> versions;
-        AstyanaxContext<Cluster> context = astyanaxFirst.newAstyanaxContextForCluster();
-        try {
-            versions = context.getEntity().describeSchemaVersions();
-        } finally {
-            context.shutdown();
-        }
-            
-        log.info("Cassandra schema versions are: "+versions);
-        if (versions.size() > 1) {
-            Assert.fail("Inconsistent versions on Cassandra start: "+versions);
-        }
-        String keyspacePrefix = "BrooklynTests_"+Identifiers.makeRandomId(8);
-
-        String keyspaceName = astyanaxFirst.writeData(keyspacePrefix, numRetries);
-
-        for (Entity node : nodes) {
-            AstyanaxSample astyanaxSecond = AstyanaxSample.builder().node((CassandraNode)node).columnFamilyName(uniqueName).build();
-            astyanaxSecond.readData(keyspaceName, numRetries);
-        }
-    }
-
-    protected static Boolean areVersionsConsistent(CassandraNode node) {
-        AstyanaxContext<Cluster> context = null;
-        try {
-            context = new AstyanaxSample(node).newAstyanaxContextForCluster();
-            Map<String, List<String>> v = context.getEntity().describeSchemaVersions();
-            return v.size() == 1;
-        } catch (Exception e) {
-            return null;
-        } finally {
-            if (context != null) context.shutdown();
-        }
-    }
-
-    protected static boolean isSocketOpen(CassandraNode node) {
-        try {
-            Socket s = new Socket(node.getAttribute(Attributes.HOSTNAME), node.getThriftPort());
-            s.close();
-            return true;
-        } catch (Exception e) {
-            return false;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java
deleted file mode 100644
index 8f917cb..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertNotNull;
-
-import java.math.BigInteger;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.proxy.nginx.NginxController;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.rebind.RebindOptions;
-import brooklyn.entity.rebind.RebindTestFixtureWithApp;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.base.Predicates;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-/**
- * Test the operation of the {@link NginxController} class.
- */
-public class CassandraDatacenterRebindIntegrationTest extends RebindTestFixtureWithApp {
-    private static final Logger LOG = LoggerFactory.getLogger(CassandraDatacenterRebindIntegrationTest.class);
-
-    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
-    
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
-        super.setUp();
-        localhostProvisioningLocation = origApp.newLocalhostProvisioningLocation();
-    }
-
-    @AfterMethod(alwaysRun=true)
-    @Override
-    public void tearDown() throws Exception {
-        super.tearDown();
-        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
-    }
-    
-    /**
-     * Test that Brooklyn can rebind to a single node datacenter.
-     */
-    @Test(groups = "Integration")
-    public void testRebindDatacenterOfSizeOne() throws Exception {
-        CassandraDatacenter origDatacenter = origApp.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
-                .configure("initialSize", 1));
-
-        origApp.start(ImmutableList.of(localhostProvisioningLocation));
-        CassandraNode origNode = (CassandraNode) Iterables.get(origDatacenter.getMembers(), 0);
-
-        EntityTestUtils.assertAttributeEqualsEventually(origDatacenter, CassandraDatacenter.GROUP_SIZE, 1);
-        CassandraDatacenterLiveTest.assertNodesConsistent(ImmutableList.of(origNode));
-        CassandraDatacenterLiveTest.assertSingleTokenConsistent(ImmutableList.of(origNode));
-        CassandraDatacenterLiveTest.checkConnectionRepeatedly(2, 5, ImmutableList.of(origNode));
-        BigInteger origToken = origNode.getAttribute(CassandraNode.TOKEN);
-        Set<BigInteger> origTokens = origNode.getAttribute(CassandraNode.TOKENS);
-        assertNotNull(origToken);
-        
-        newApp = rebind(RebindOptions.create().terminateOrigManagementContext(true));
-        final CassandraDatacenter newDatacenter = (CassandraDatacenter) Iterables.find(newApp.getChildren(), Predicates.instanceOf(CassandraDatacenter.class));
-        final CassandraNode newNode = (CassandraNode) Iterables.find(newDatacenter.getMembers(), Predicates.instanceOf(CassandraNode.class));
-        
-        EntityTestUtils.assertAttributeEqualsEventually(newDatacenter, CassandraDatacenter.GROUP_SIZE, 1);
-        EntityTestUtils.assertAttributeEqualsEventually(newNode, Startable.SERVICE_UP, true);
-        EntityTestUtils.assertAttributeEqualsEventually(newNode, CassandraNode.TOKEN, origToken);
-        EntityTestUtils.assertAttributeEqualsEventually(newNode, CassandraNode.TOKENS, origTokens);
-        CassandraDatacenterLiveTest.assertNodesConsistent(ImmutableList.of(newNode));
-        CassandraDatacenterLiveTest.assertSingleTokenConsistent(ImmutableList.of(newNode));
-        CassandraDatacenterLiveTest.checkConnectionRepeatedly(2, 5, ImmutableList.of(newNode));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java
deleted file mode 100644
index f902ac2..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertEquals;
-
-import java.math.BigInteger;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.Assert;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppUnitTestSupport;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.EmptySoftwareProcess;
-import brooklyn.entity.basic.EmptySoftwareProcessSshDriver;
-import brooklyn.entity.basic.EntityInternal;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.LocationSpec;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.ResourceUtils;
-import brooklyn.util.javalang.JavaClassNames;
-import brooklyn.util.text.TemplateProcessor;
-import brooklyn.util.time.Duration;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
-public class CassandraDatacenterTest extends BrooklynAppUnitTestSupport {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterTest.class);
-    
-    private LocalhostMachineProvisioningLocation loc;
-    private CassandraDatacenter cluster;
-    
-    @BeforeMethod(alwaysRun=true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        loc = mgmt.getLocationManager().createLocation(LocationSpec.create(LocalhostMachineProvisioningLocation.class));
-    }
-    
-    @Test
-    public void testPopulatesInitialSeeds() throws Exception {
-        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
-                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
-                .configure(CassandraDatacenter.TOKEN_SHIFT, BigInteger.ZERO)
-                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
-                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
-
-        app.start(ImmutableList.of(loc));
-        EmptySoftwareProcess e1 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 0);
-        EmptySoftwareProcess e2 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 1);
-        
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e1, e2));
-    }
-    
-    @Test(groups="Integration") // because takes approx 2 seconds
-    public void testUpdatesSeedsOnFailuresAndAdditions() throws Exception {
-        doTestUpdatesSeedsOnFailuresAndAdditions(true, false);
-    }
-    
-    protected void doTestUpdatesSeedsOnFailuresAndAdditions(boolean fast, boolean checkSeedsConstantOnRejoining) throws Exception {
-        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
-                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
-                .configure(CassandraDatacenter.TOKEN_SHIFT, BigInteger.ZERO)
-                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
-                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
-
-        app.start(ImmutableList.of(loc));
-        EmptySoftwareProcess e1 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 0);
-        EmptySoftwareProcess e2 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 1);
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e1, e2));
-        log.debug("Test "+JavaClassNames.niceClassAndMethod()+", cluster "+cluster+" has "+cluster.getMembers()+"; e1="+e1+" e2="+e2);
-        
-        // calling the driver stop for this entity will cause SERVICE_UP to become false, and stay false
-        // (and that's all it does, incidentally); if we just set the attribute it will become true on serviceUp sensor feed
-        ((EmptySoftwareProcess)e1).getDriver().stop();
-        // not necessary, but speeds things up:
-        if (fast)
-            ((EntityInternal)e1).setAttribute(Attributes.SERVICE_UP, false);
-        
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e2));
-
-        cluster.resize(3);
-        EmptySoftwareProcess e3 = (EmptySoftwareProcess) Iterables.getOnlyElement(Sets.difference(ImmutableSet.copyOf(cluster.getMembers()), ImmutableSet.of(e1,e2)));
-        log.debug("Test "+JavaClassNames.niceClassAndMethod()+", cluster "+cluster+" has "+cluster.getMembers()+"; e3="+e3);
-        try {
-            EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e2, e3));
-        } finally {
-            log.debug("Test "+JavaClassNames.niceClassAndMethod()+", cluster "+cluster+" has "+cluster.getMembers()+"; seeds "+cluster.getAttribute(CassandraDatacenter.CURRENT_SEEDS));
-        }
-        
-        if (!checkSeedsConstantOnRejoining) {
-            // cluster should not revert to e1+e2, simply because e1 has come back; but e1 should rejoin the group
-            // (not that important, and waits for 1s, so only done as part of integration)
-            ((EmptySoftwareProcessSshDriver)(((EmptySoftwareProcess)e1).getDriver())).launch();
-            if (fast)
-                ((EntityInternal)e1).setAttribute(Attributes.SERVICE_UP, true);
-            EntityTestUtils.assertAttributeEqualsEventually(e1, CassandraNode.SERVICE_UP, true);
-            EntityTestUtils.assertAttributeEqualsContinually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e2, e3));
-        }
-    }
-    
-    @Test
-    public void testPopulatesInitialTokens() throws Exception {
-        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
-                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
-                .configure(CassandraDatacenter.TOKEN_SHIFT, BigInteger.ZERO)
-                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
-                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
-
-        app.start(ImmutableList.of(loc));
-
-        Set<BigInteger> tokens = Sets.newLinkedHashSet();
-        Set<BigInteger> tokens2 = Sets.newLinkedHashSet();
-        for (Entity member : cluster.getMembers()) {
-            BigInteger memberToken = member.getConfig(CassandraNode.TOKEN);
-            Set<BigInteger > memberTokens = member.getConfig(CassandraNode.TOKENS);
-            if (memberToken != null) tokens.add(memberToken);
-            if (memberTokens != null) tokens2.addAll(memberTokens);
-        }
-        assertEquals(tokens, ImmutableSet.of(new BigInteger("-9223372036854775808"), BigInteger.ZERO));
-        assertEquals(tokens2, ImmutableSet.of());
-    }
-    
-    @Test
-    public void testDoesNotPopulateInitialTokens() throws Exception {
-        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
-                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
-                .configure(CassandraDatacenter.USE_VNODES, true)
-                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
-                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
-
-        app.start(ImmutableList.of(loc));
-
-        Set<BigInteger> tokens = Sets.newLinkedHashSet();
-        Set<BigInteger> tokens2 = Sets.newLinkedHashSet();
-        for (Entity member : cluster.getMembers()) {
-            BigInteger memberToken = member.getConfig(CassandraNode.TOKEN);
-            Set<BigInteger > memberTokens = member.getConfig(CassandraNode.TOKENS);
-            if (memberToken != null) tokens.add(memberToken);
-            if (memberTokens != null) tokens2.addAll(memberTokens);
-        }
-        assertEquals(tokens, ImmutableSet.of());
-        assertEquals(tokens2, ImmutableSet.of());
-    }
-    
-    public static class MockInputForTemplate {
-        public BigInteger getToken() { return new BigInteger("-9223372036854775808"); }
-        public String getTokensAsString() { return "" + getToken(); }
-        public int getNumTokensPerNode() { return 1; }
-        public String getSeeds() { return ""; }
-        public int getGossipPort() { return 1234; }
-        public int getSslGossipPort() { return 1234; }
-        public int getThriftPort() { return 1234; }
-        public int getNativeTransportPort() { return 1234; }
-        public String getClusterName() { return "Mock"; }
-        public String getEndpointSnitchName() { return ""; }
-        public String getListenAddress() { return "0"; }
-        public String getBroadcastAddress() { return "0"; }
-        public String getRpcAddress() { return "0"; }
-        public String getRunDir() { return "/tmp/mock"; }
-    }
-    
-    @Test
-    public void testBigIntegerFormattedCorrectly() {
-        Map<String, Object> substitutions = ImmutableMap.<String, Object>builder()
-                .put("entity", new MockInputForTemplate())
-                .put("driver", new MockInputForTemplate())
-                .build();
-
-        String templatedUrl = CassandraNode.CASSANDRA_CONFIG_TEMPLATE_URL.getDefaultValue();
-        String url = TemplateProcessor.processTemplateContents(templatedUrl, ImmutableMap.of("entity", ImmutableMap.of("majorMinorVersion", "1.2")));
-        String templateContents = new ResourceUtils(this).getResourceAsString(url);
-        String processedTemplate = TemplateProcessor.processTemplateContents(templateContents, substitutions);
-        Assert.assertEquals(processedTemplate.indexOf("775,808"), -1);
-        Assert.assertTrue(processedTemplate.indexOf("-9223372036854775808") > 0);
-    }
-    
-    @Test(groups="Integration") // because takes approx 30 seconds
-    public void testUpdatesSeedsFastishManyTimes() throws Exception {
-        final int COUNT = 20;
-        for (int i=0; i<COUNT; i++) {
-            log.info("Test "+JavaClassNames.niceClassAndMethod()+", iteration "+(i+1)+" of "+COUNT);
-            try {
-                doTestUpdatesSeedsOnFailuresAndAdditions(true, true);
-                tearDown();
-                setUp();
-            } catch (Exception e) {
-                log.warn("Error in "+JavaClassNames.niceClassAndMethod()+", iteration "+(i+1)+" of "+COUNT, e);
-                throw e;
-            }
-        }
-    }
-    
-    @Test(groups="Integration") // because takes approx 5 seconds
-    public void testUpdateSeedsSlowAndRejoining() throws Exception {
-        final int COUNT = 1;
-        for (int i=0; i<COUNT; i++) {
-            log.info("Test "+JavaClassNames.niceClassAndMethod()+", iteration "+(i+1)+" of "+COUNT);
-            doTestUpdatesSeedsOnFailuresAndAdditions(false, true);
-            tearDown();
-            setUp();
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java
deleted file mode 100644
index f4a786a..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertEquals;
-
-import java.util.Collection;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppUnitTestSupport;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.AbstractEntity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.EmptySoftwareProcess;
-import brooklyn.entity.basic.EntityInternal;
-import brooklyn.entity.basic.EntityLocal;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.location.LocationSpec;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
-public class CassandraFabricTest extends BrooklynAppUnitTestSupport {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraFabricTest.class);
-    
-    private LocalhostMachineProvisioningLocation loc1;
-    private LocalhostMachineProvisioningLocation loc2;
-    private CassandraFabric fabric;
-    
-    @BeforeMethod(alwaysRun=true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        loc1 = mgmt.getLocationManager().createLocation(LocationSpec.create(LocalhostMachineProvisioningLocation.class));
-        loc2 = mgmt.getLocationManager().createLocation(LocationSpec.create(LocalhostMachineProvisioningLocation.class));
-    }
-    
-    @Test
-    public void testPopulatesInitialSeeds() throws Exception {
-        fabric = app.createAndManageChild(EntitySpec.create(CassandraFabric.class)
-                .configure(CassandraFabric.INITIAL_QUORUM_SIZE, 2)
-                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
-                .configure(CassandraFabric.MEMBER_SPEC, EntitySpec.create(CassandraDatacenter.class)
-                        .configure(CassandraDatacenter.INITIAL_SIZE, 2)
-                        .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class))));
-
-        app.start(ImmutableList.of(loc1, loc2));
-        CassandraDatacenter d1 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 0);
-        CassandraDatacenter d2 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 1);
-
-        final EmptySoftwareProcess d1a = (EmptySoftwareProcess) Iterables.get(d1.getMembers(), 0);
-        final EmptySoftwareProcess d1b = (EmptySoftwareProcess) Iterables.get(d1.getMembers(), 1);
-
-        final EmptySoftwareProcess d2a = (EmptySoftwareProcess) Iterables.get(d2.getMembers(), 0);
-        final EmptySoftwareProcess d2b = (EmptySoftwareProcess) Iterables.get(d2.getMembers(), 1);
-
-        Predicate<Set<Entity>> predicate = new Predicate<Set<Entity>>() {
-            @Override public boolean apply(Set<Entity> input) {
-                return input != null && input.size() >= 2 &&
-                        Sets.intersection(input, ImmutableSet.of(d1a, d1b)).size() == 1 &&
-                        Sets.intersection(input, ImmutableSet.of(d2a, d2b)).size() == 1;
-            }
-        };
-        EntityTestUtils.assertAttributeEventually(fabric, CassandraFabric.CURRENT_SEEDS, predicate);
-        EntityTestUtils.assertAttributeEventually(d1, CassandraDatacenter.CURRENT_SEEDS, predicate);
-        EntityTestUtils.assertAttributeEventually(d2, CassandraDatacenter.CURRENT_SEEDS, predicate);
-        
-        Set<Entity> seeds = fabric.getAttribute(CassandraFabric.CURRENT_SEEDS);
-        assertEquals(d1.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
-        assertEquals(d2.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
-        log.info("Seeds="+seeds);
-    }
-
-    @Test
-    public void testPopulatesInitialSeedsWhenNodesOfOneClusterComeUpBeforeTheOtherCluster() throws Exception {
-        fabric = app.createAndManageChild(EntitySpec.create(CassandraFabric.class)
-                .configure(CassandraFabric.INITIAL_QUORUM_SIZE, 2)
-                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
-                .configure(CassandraFabric.MEMBER_SPEC, EntitySpec.create(CassandraDatacenter.class)
-                        .configure(CassandraDatacenter.INITIAL_SIZE, 2)
-                        .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(DummyCassandraNode.class))));
-
-        Thread t = new Thread() {
-            public void run() {
-                app.start(ImmutableList.of(loc1, loc2));
-            }
-        };
-        t.start();
-        try {
-            EntityTestUtils.assertGroupSizeEqualsEventually(fabric, 2);
-            CassandraDatacenter d1 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 0);
-            CassandraDatacenter d2 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 1);
-    
-            EntityTestUtils.assertGroupSizeEqualsEventually(d1, 2);
-            final DummyCassandraNode d1a = (DummyCassandraNode) Iterables.get(d1.getMembers(), 0);
-            final DummyCassandraNode d1b = (DummyCassandraNode) Iterables.get(d1.getMembers(), 1);
-    
-            EntityTestUtils.assertGroupSizeEqualsEventually(d2, 2);
-            final DummyCassandraNode d2a = (DummyCassandraNode) Iterables.get(d2.getMembers(), 0);
-            final DummyCassandraNode d2b = (DummyCassandraNode) Iterables.get(d2.getMembers(), 1);
-
-            d1a.setAttribute(Attributes.HOSTNAME, "d1a");
-            d1b.setAttribute(Attributes.HOSTNAME, "d1b");
-            
-            Thread.sleep(1000);
-            d2a.setAttribute(Attributes.HOSTNAME, "d2a");
-            d2b.setAttribute(Attributes.HOSTNAME, "d2b");
-            
-            Predicate<Set<Entity>> predicate = new Predicate<Set<Entity>>() {
-                @Override public boolean apply(Set<Entity> input) {
-                    return input != null && input.size() >= 2 &&
-                            Sets.intersection(input, ImmutableSet.of(d1a, d1b)).size() == 1 &&
-                            Sets.intersection(input, ImmutableSet.of(d2a, d2b)).size() == 1;
-                }
-            };
-            EntityTestUtils.assertAttributeEventually(fabric, CassandraFabric.CURRENT_SEEDS, predicate);
-            EntityTestUtils.assertAttributeEventually(d1, CassandraDatacenter.CURRENT_SEEDS, predicate);
-            EntityTestUtils.assertAttributeEventually(d2, CassandraDatacenter.CURRENT_SEEDS, predicate);
-            
-            Set<Entity> seeds = fabric.getAttribute(CassandraFabric.CURRENT_SEEDS);
-            assertEquals(d1.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
-            assertEquals(d2.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
-            log.info("Seeds="+seeds);
-        } finally {
-            log.info("Failed seeds; fabric="+fabric.getAttribute(CassandraFabric.CURRENT_SEEDS));
-            t.interrupt();
-        }
-    }
-    
-    
-    @ImplementedBy(DummyCassandraNodeImpl.class)
-    public interface DummyCassandraNode extends Entity, Startable, EntityLocal, EntityInternal {
-    }
-    
-    public static class DummyCassandraNodeImpl extends AbstractEntity implements DummyCassandraNode {
-
-        @Override
-        public void start(Collection<? extends Location> locations) {
-            ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
-        }
-
-        @Override
-        public void stop() {
-            ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPING);
-        }
-
-        @Override
-        public void restart() {
-        }
-    }
-}



[14/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java
new file mode 100644
index 0000000..c7feab6
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface CouchbaseSyncGatewayDriver extends SoftwareProcessDriver {
+
+    public String getOsTag();
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java
new file mode 100644
index 0000000..14d8760
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import brooklyn.config.render.RendererHints;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.location.access.BrooklynAccessUtils;
+
+import com.google.common.base.Functions;
+import com.google.common.net.HostAndPort;
+
+public class CouchbaseSyncGatewayImpl extends SoftwareProcessImpl implements CouchbaseSyncGateway {
+
+    private HttpFeed httpFeed;
+
+    @Override
+    public Class<CouchbaseSyncGatewayDriver> getDriverInterface() {
+        return CouchbaseSyncGatewayDriver.class;
+    }
+
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+        connectServiceUpIsRunning();
+    }
+
+    @Override
+    protected void connectServiceUpIsRunning() {
+        HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(this,
+                getAttribute(CouchbaseSyncGateway.ADMIN_REST_API_PORT));
+
+        String managementUri = String.format("http://%s:%s",
+                hp.getHostText(), hp.getPort());
+
+        setAttribute(MANAGEMENT_URL, managementUri);
+
+        httpFeed = HttpFeed.builder()
+                .entity(this)
+                .period(200)
+                .baseUri(managementUri)
+                .poll(new HttpPollConfig<Boolean>(SERVICE_UP)
+                        .onSuccess(HttpValueFunctions.responseCodeEquals(200))
+                        .onFailureOrException(Functions.constant(false)))
+                .build();
+    }
+
+    @Override
+    protected void disconnectSensors() {
+        super.disconnectSensors();
+        disconnectServiceUpIsRunning();
+    }
+
+    @Override
+    protected void disconnectServiceUpIsRunning() {
+        if (httpFeed != null) {
+            httpFeed.stop();
+        }
+    }
+    
+    static {
+        RendererHints.register(MANAGEMENT_URL, RendererHints.namedActionWithUrl());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java
new file mode 100644
index 0000000..d2d18da
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import static brooklyn.util.ssh.BashCommands.INSTALL_CURL;
+import static brooklyn.util.ssh.BashCommands.alternatives;
+import static brooklyn.util.ssh.BashCommands.chainGroup;
+import static brooklyn.util.ssh.BashCommands.sudo;
+import static java.lang.String.format;
+
+import java.util.List;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityLocal;
+import brooklyn.entity.basic.EntityPredicates;
+import brooklyn.entity.drivers.downloads.DownloadResolver;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.location.OsDetails;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.ssh.BashCommands;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Predicates;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+
+public class CouchbaseSyncGatewaySshDriver extends AbstractSoftwareProcessSshDriver implements CouchbaseSyncGatewayDriver {
+    public CouchbaseSyncGatewaySshDriver(EntityLocal entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    public void stop() {
+
+    }
+
+    @Override
+    public void install() {
+        //reference http://docs.couchbase.com/sync-gateway/#getting-started-with-sync-gateway
+        DownloadResolver resolver = Entities.newDownloader(this);
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+
+        log.info("Installing couchbase-sync-gateway version: {}", getVersion());
+        if (osDetails.isLinux()) {
+            List<String> commands = installLinux(urls, saveAs);
+            newScript(INSTALLING)
+                    .body.append(commands).execute();
+        }
+    }
+
+    @Override
+    public void customize() {
+
+    }
+
+    @Override
+    public void launch() {
+        Entity cbNode = entity.getConfig(CouchbaseSyncGateway.COUCHBASE_SERVER);
+        Entities.waitForServiceUp(cbNode, Duration.ONE_HOUR);
+        DependentConfiguration.waitInTaskForAttributeReady(cbNode, CouchbaseCluster.IS_CLUSTER_INITIALIZED, Predicates.equalTo(true));
+        // Even once the bucket has published its API URL, it can still take a couple of seconds for it to become available
+        Time.sleep(10 * 1000);
+        if (cbNode instanceof CouchbaseCluster) {
+            // in_cluster now applies even to a node in a cluster of size 1
+            Optional<Entity> cbClusterNode = Iterables.tryFind(cbNode.getAttribute(CouchbaseCluster.GROUP_MEMBERS),
+                Predicates.and(Predicates.instanceOf(CouchbaseNode.class), EntityPredicates.attributeEqualTo(CouchbaseNode.IS_IN_CLUSTER, Boolean.TRUE)));
+            
+            if (!cbClusterNode.isPresent()) {
+                throw new IllegalArgumentException(format("The cluster %s does not contain any suitable Couchbase nodes to connect to..", cbNode.getId()));
+            }
+            
+            cbNode = cbClusterNode.get();
+        }
+        String hostname = cbNode.getAttribute(CouchbaseNode.HOSTNAME);
+        String webPort = cbNode.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT).toString();
+
+
+        String username = cbNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
+        String password = cbNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
+
+        String bucketName = entity.getConfig(CouchbaseSyncGateway.COUCHBASE_SERVER_BUCKET);
+        String pool = entity.getConfig(CouchbaseSyncGateway.COUCHBASE_SERVER_POOL);
+        String pretty = entity.getConfig(CouchbaseSyncGateway.PRETTY) ? "-pretty" : "";
+        String verbose = entity.getConfig(CouchbaseSyncGateway.VERBOSE) ? "-verbose" : "";
+
+        String adminRestApiPort = entity.getConfig(CouchbaseSyncGateway.ADMIN_REST_API_PORT).iterator().next().toString();
+        String syncRestApiPort = entity.getConfig(CouchbaseSyncGateway.SYNC_REST_API_PORT).iterator().next().toString();
+
+        String serverWebAdminUrl = format("http://%s:%s@%s:%s", username, password, hostname, webPort);
+        String options = format("-url %s -bucket %s -adminInterface 0.0.0.0:%s -interface 0.0.0.0:%s -pool %s %s %s",
+                serverWebAdminUrl, bucketName, adminRestApiPort, syncRestApiPort, pool, pretty, verbose);
+
+        newScript(ImmutableMap.of("usePidFile", true), LAUNCHING)
+                .body.append(format("/opt/couchbase-sync-gateway/bin/sync_gateway %s ", options) + "> out.log 2> err.log < /dev/null &")
+                .failOnNonZeroResultCode()
+                .execute();
+    }
+    
+    @Override
+    public boolean isRunning() {
+        return newScript(MutableMap.of("usePidFile", true), CHECK_RUNNING).execute() == 0;
+    }
+    
+    @Override
+    public void kill() {
+        newScript(MutableMap.of("usePidFile", true), KILLING).execute();
+    }
+
+    private List<String> installLinux(List<String> urls, String saveAs) {
+
+        String apt = chainGroup(
+                "which apt-get",
+                sudo("apt-get update"),
+                sudo(format("dpkg -i %s", saveAs)));
+
+        String yum = chainGroup(
+                "which yum",
+                sudo(format("rpm --install %s", saveAs)));
+
+        return ImmutableList.<String>builder()
+                .add(INSTALL_CURL)
+                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
+                .add(alternatives(apt, yum))
+                .build();
+    }
+
+    @Override
+    public String getOsTag() {
+        OsDetails os = getLocation().getOsDetails();
+        if (os == null) {
+            // Default to generic linux
+            return "x86_64.rpm";
+        } else {
+            //FIXME should be a better way to check for OS name and version
+            String osName = os.getName().toLowerCase();
+            String fileExtension = osName.contains("deb") || osName.contains("ubuntu") ? ".deb" : ".rpm";
+            String arch = os.is64bit() ? "x86_64" : "x86";
+            return arch + fileExtension;
+        }
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBCluster.java
new file mode 100644
index 0000000..852a4a4
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBCluster.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+/**
+ * A cluster of {@link CouchDBNode}s based on {@link DynamicCluster} which can be resized by a policy if required.
+ *
+ * TODO add sensors with aggregated CouchDB statistics from cluster
+ */
+@ImplementedBy(CouchDBClusterImpl.class)
+public interface CouchDBCluster extends DynamicCluster {
+
+    @SetFromFlag("clusterName")
+    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = new BasicAttributeSensorAndConfigKey<String>(String.class, "couchdb.cluster.name", "Name of the CouchDB cluster", "BrooklynCluster");
+
+    AttributeSensor<String> HOSTNAME = Sensors.newStringSensor("couchdb.cluster.hostname", "Hostname to connect to cluster with");
+
+    AttributeSensor<Integer> HTTP_PORT = Sensors.newIntegerSensor("couchdb.cluster.http.port", "CouchDB HTTP port to connect to cluster with");
+
+    /**
+     * The name of the cluster.
+     */
+    String getClusterName();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java
new file mode 100644
index 0000000..7c576a1
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+
+/**
+ * Implementation of {@link CouchDBCluster}.
+ */
+public class CouchDBClusterImpl extends DynamicClusterImpl implements CouchDBCluster {
+
+    @SuppressWarnings("unused")
+    private static final Logger log = LoggerFactory.getLogger(CouchDBClusterImpl.class);
+
+    public CouchDBClusterImpl() {
+    }
+
+    /**
+     * Sets the default {@link #MEMBER_SPEC} to describe the CouchDB nodes.
+     */
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        return getConfig(MEMBER_SPEC, EntitySpec.create(CouchDBNode.class));
+    }
+
+    @Override
+    public String getClusterName() {
+        return getAttribute(CLUSTER_NAME);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNode.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNode.java
new file mode 100644
index 0000000..be169b9
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNode.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.entity.webapp.WebAppService;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.util.flags.SetFromFlag;
+
+/**
+ * An {@link brooklyn.entity.Entity} that represents a CouchDB node in a {@link CouchDBCluster}.
+ */
+@Catalog(name="CouchDB Node",
+        description="Apache CouchDB is a database that uses JSON for documents, JavaScript for MapReduce queries, " +
+                "and regular HTTP for an API",
+        iconUrl="classpath:///couchdb-logo.png")
+@ImplementedBy(CouchDBNodeImpl.class)
+public interface CouchDBNode extends SoftwareProcess, WebAppService {
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "1.2.1");
+
+    @SetFromFlag("erlangVersion")
+    ConfigKey<String> ERLANG_VERSION = ConfigKeys.newStringConfigKey("erlang.version", "Erlang runtime version", "R15B");
+
+    @SetFromFlag("clusterName")
+    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = CouchDBCluster.CLUSTER_NAME;
+
+    @SetFromFlag("couchdbConfigTemplateUrl")
+    BasicAttributeSensorAndConfigKey<String> COUCHDB_CONFIG_TEMPLATE_URL = new BasicAttributeSensorAndConfigKey<String>(
+            String.class, "couchdb.config.templateUrl", "Template file (in freemarker format) for the couchdb config file", 
+            "classpath://org/apache/brooklyn/entity/nosql/couchdb/couch.ini");
+
+    @SetFromFlag("couchdbUriTemplateUrl")
+    BasicAttributeSensorAndConfigKey<String> COUCHDB_URI_TEMPLATE_URL = new BasicAttributeSensorAndConfigKey<String>(
+            String.class, "couchdb.uri.templateUrl", "Template file (in freemarker format) for the couchdb URI file", 
+            "classpath://org/apache/brooklyn/entity/nosql/couchdb/couch.uri");
+
+    @SetFromFlag("couchdbConfigFileName")
+    BasicAttributeSensorAndConfigKey<String> COUCHDB_CONFIG_FILE_NAME = new BasicAttributeSensorAndConfigKey<String>(
+            String.class, "couchdb.config.fileName", "Name for the copied config file", "local.ini");
+
+    Integer getHttpPort();
+
+    Integer getHttpsPort();
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java
new file mode 100644
index 0000000..14386a0
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface CouchDBNodeDriver extends SoftwareProcessDriver {
+
+    Integer getHttpPort();
+
+    Integer getHttpsPort();
+
+    String getClusterName();
+
+    String getCouchDBConfigTemplateUrl();
+
+    String getCouchDBUriTemplateUrl();
+
+    String getCouchDBConfigFileName();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java
new file mode 100644
index 0000000..298b6b6
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.entity.webapp.JavaWebAppSoftwareProcessImpl;
+import brooklyn.entity.webapp.WebAppServiceMethods;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+
+import com.google.common.base.Function;
+import com.google.common.base.Functions;
+
+/**
+ * Implementation of {@link CouchDBNode}.
+ */
+public class CouchDBNodeImpl extends SoftwareProcessImpl implements CouchDBNode {
+
+    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeImpl.class);
+
+    public CouchDBNodeImpl() {
+    }
+
+    public Integer getHttpPort() { return getAttribute(CouchDBNode.HTTP_PORT); }
+    public Integer getHttpsPort() { return getAttribute(CouchDBNode.HTTPS_PORT); }
+    public String getClusterName() { return getAttribute(CouchDBNode.CLUSTER_NAME); }
+
+    @Override
+    public Class<CouchDBNodeDriver> getDriverInterface() {
+        return CouchDBNodeDriver.class;
+    }
+
+    private volatile HttpFeed httpFeed;
+
+    @Override 
+    protected void connectSensors() {
+        super.connectSensors();
+
+        connectServiceUpIsRunning();
+
+        httpFeed = HttpFeed.builder()
+                .entity(this)
+                .period(500, TimeUnit.MILLISECONDS)
+                .baseUri(String.format("http://%s:%d/_stats", getAttribute(HOSTNAME), getHttpPort()))
+                .poll(new HttpPollConfig<Integer>(REQUEST_COUNT)
+                        .onSuccess(HttpValueFunctions.jsonContents(new String[] { "httpd", "requests", "count" }, Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(ERROR_COUNT)
+                        .onSuccess(HttpValueFunctions.jsonContents(new String[] { "httpd_status_codes", "404", "count" }, Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(TOTAL_PROCESSING_TIME)
+                        .onSuccess(HttpValueFunctions.jsonContents(new String[] { "couchdb", "request_time", "count" }, Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(MAX_PROCESSING_TIME)
+                        .onSuccess(HttpValueFunctions.chain(HttpValueFunctions.jsonContents(new String[] { "couchdb", "request_time", "max" }, Double.class), new Function<Double, Integer>() {
+                            @Override
+                            public Integer apply(@Nullable Double input) {
+                                return Integer.valueOf(input.intValue());
+                            }
+                        }))
+                        .onFailureOrException(Functions.constant(-1)))
+                .build();
+
+        WebAppServiceMethods.connectWebAppServerPolicies(this);
+    }
+
+    @Override
+    public void disconnectSensors() {
+        super.disconnectSensors();
+        if (httpFeed != null) httpFeed.stop();
+        disconnectServiceUpIsRunning();
+    }
+
+    /** @see JavaWebAppSoftwareProcessImpl#postStop() */
+    @Override
+    protected void postStop() {
+        super.postStop();
+        // zero our workrate derived workrates.
+        setAttribute(REQUESTS_PER_SECOND_LAST, 0D);
+        setAttribute(REQUESTS_PER_SECOND_IN_WINDOW, 0D);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java
new file mode 100644
index 0000000..5545f08
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import static brooklyn.util.ssh.BashCommands.*;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.location.Location;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.net.Networking;
+import brooklyn.util.os.Os;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+
+/**
+ * Start a {@link CouchDBNode} in a {@link Location} accessible over ssh.
+ */
+public class CouchDBNodeSshDriver extends AbstractSoftwareProcessSshDriver implements CouchDBNodeDriver {
+
+    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeSshDriver.class);
+
+    public CouchDBNodeSshDriver(CouchDBNodeImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+
+        entity.setAttribute(Attributes.LOG_FILE_LOCATION, getLogFileLocation());
+    }
+
+    public String getLogFileLocation() { return Os.mergePathsUnix(getRunDir(), "couchdb.log"); }
+
+    @Override
+    public Integer getHttpPort() { return entity.getAttribute(CouchDBNode.HTTP_PORT); }
+
+    @Override
+    public Integer getHttpsPort() { return entity.getAttribute(CouchDBNode.HTTPS_PORT); }
+
+    @Override
+    public String getClusterName() { return entity.getAttribute(CouchDBNode.CLUSTER_NAME); }
+
+    @Override
+    public String getCouchDBConfigTemplateUrl() { return entity.getAttribute(CouchDBNode.COUCHDB_CONFIG_TEMPLATE_URL); }
+
+    @Override
+    public String getCouchDBUriTemplateUrl() { return entity.getAttribute(CouchDBNode.COUCHDB_URI_TEMPLATE_URL); }
+
+    @Override
+    public String getCouchDBConfigFileName() { return entity.getAttribute(CouchDBNode.COUCHDB_CONFIG_FILE_NAME); }
+
+    public String getErlangVersion() { return entity.getConfig(CouchDBNode.ERLANG_VERSION); }
+
+    @Override
+    public void install() {
+        log.info("Installing {}", entity);
+        List<String> commands = ImmutableList.<String>builder()
+                .add(ifExecutableElse0("zypper", chainGroup( // SLES 11 not supported, would require building from source
+                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/devel:/languages:/erlang/openSUSE_11.4 erlang_suse_11")),
+                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/devel:/languages:/erlang/openSUSE_12.3 erlang_suse_12")),
+                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/devel:/languages:/erlang/openSUSE_13.1 erlang_suse_13")),
+                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/server:/database/openSUSE_11.4 db_suse_11")),
+                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/server:/database/openSUSE_12.3 db_suse_12")),
+                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/server:/database/openSUSE_13.1 db_suse_13")))))
+                .add(installPackage( // NOTE only 'port' states the version of Erlang used, maybe remove this constraint?
+                        ImmutableMap.of(
+                                "apt", "erlang-nox erlang-dev",
+                                "port", "erlang@"+getErlangVersion()+"+ssl"),
+                        "erlang"))
+                .add(installPackage("couchdb"))
+                .add(ifExecutableElse0("service", sudo("service couchdb stop")))
+                .build();
+
+        newScript(INSTALLING)
+                .body.append(commands)
+                .execute();
+    }
+
+    @Override
+    public Set<Integer> getPortsUsed() {
+        Set<Integer> result = Sets.newLinkedHashSet(super.getPortsUsed());
+        result.addAll(getPortMap().values());
+        return result;
+    }
+
+    private Map<String, Integer> getPortMap() {
+        return ImmutableMap.<String, Integer>builder()
+                .put("httpPort", getHttpPort())
+                .build();
+    }
+
+    @Override
+    public void customize() {
+        log.info("Customizing {} (Cluster {})", entity, getClusterName());
+        Networking.checkPortsValid(getPortMap());
+
+        newScript(CUSTOMIZING).execute();
+
+        // Copy the configuration files across
+        String destinationConfigFile = Os.mergePathsUnix(getRunDir(), getCouchDBConfigFileName());
+        copyTemplate(getCouchDBConfigTemplateUrl(), destinationConfigFile);
+        String destinationUriFile = Os.mergePathsUnix(getRunDir(), "couch.uri");
+        copyTemplate(getCouchDBUriTemplateUrl(), destinationUriFile);
+    }
+
+    @Override
+    public void launch() {
+        log.info("Launching  {}", entity);
+        newScript(MutableMap.of(USE_PID_FILE, false), LAUNCHING)
+                .body.append(sudo(String.format("nohup couchdb -p %s -a %s -o couchdb-console.log -e couchdb-error.log -b &", getPidFile(), Os.mergePathsUnix(getRunDir(), getCouchDBConfigFileName()))))
+                .execute();
+    }
+
+    public String getPidFile() { return Os.mergePathsUnix(getRunDir(), "couchdb.pid"); }
+
+    @Override
+    public boolean isRunning() {
+        return newScript(MutableMap.of(USE_PID_FILE, false), CHECK_RUNNING)
+                .body.append(sudo(String.format("couchdb -p %s -s", getPidFile())))
+                .execute() == 0;
+    }
+
+    @Override
+    public void stop() {
+        newScript(MutableMap.of(USE_PID_FILE, false), STOPPING)
+                .body.append(sudo(String.format("couchdb -p %s -k", getPidFile())))
+                .failOnNonZeroResultCode()
+                .execute();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java
new file mode 100644
index 0000000..9d5bd78
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.util.flags.SetFromFlag;
+
+/**
+ * A cluster of {@link ElasticSearchNode}s based on {@link DynamicCluster} which can be resized by a policy if required.
+ */
+@Catalog(name="Elastic Search Cluster", description="Elasticsearch is an open-source search server based on Lucene. "
+        + "It provides a distributed, multitenant-capable full-text search engine with a RESTful web interface and "
+        + "schema-free JSON documents.")
+@ImplementedBy(ElasticSearchClusterImpl.class)
+public interface ElasticSearchCluster extends DynamicCluster {
+    @SetFromFlag("clusterName")
+    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = new BasicAttributeSensorAndConfigKey<String>(String.class, 
+            "elasticsearch.cluster.name", "Name of the ElasticSearch cluster", "BrooklynCluster");
+    
+    String getClusterName();
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java
new file mode 100644
index 0000000..27d4e9e
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+
+public class ElasticSearchClusterImpl extends DynamicClusterImpl implements ElasticSearchCluster {
+    
+    private AtomicInteger nextMemberId = new AtomicInteger(0);
+
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        EntitySpec<?> spec = EntitySpec.create(getConfig(MEMBER_SPEC, EntitySpec.create(ElasticSearchNode.class)));
+        
+        spec.configure(ElasticSearchNode.CLUSTER_NAME, getConfig(ElasticSearchClusterImpl.CLUSTER_NAME))
+            .configure(ElasticSearchNode.NODE_NAME, "elasticsearch-" + nextMemberId.incrementAndGet());
+        
+        return spec;
+    }
+    
+    @Override
+    public String getClusterName() {
+        return getConfig(CLUSTER_NAME);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java
new file mode 100644
index 0000000..34be8f1
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.database.DatastoreMixins;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.entity.webapp.WebAppServiceConstants;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey.StringAttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.location.basic.PortRanges;
+import brooklyn.util.flags.SetFromFlag;
+
+/**
+ * An {@link brooklyn.entity.Entity} that represents an ElasticSearch node
+ */
+@Catalog(name="Elastic Search Node", description="Elasticsearch is an open-source search server based on Lucene. "
+        + "It provides a distributed, multitenant-capable full-text search engine with a RESTful web interface and "
+        + "schema-free JSON documents.")
+@ImplementedBy(ElasticSearchNodeImpl.class)
+public interface ElasticSearchNode extends SoftwareProcess, DatastoreMixins.HasDatastoreUrl {
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "1.2.1");
+    
+    @SetFromFlag("downloadUrl")
+    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${version}.tar.gz");
+    
+    @SetFromFlag("dataDir")
+    ConfigKey<String> DATA_DIR = ConfigKeys.newStringConfigKey("elasticsearch.node.data.dir", "Directory for writing data files", null);
+    
+    @SetFromFlag("logDir")
+    ConfigKey<String> LOG_DIR = ConfigKeys.newStringConfigKey("elasticsearch.node.log.dir", "Directory for writing log files", null);
+    
+    @SetFromFlag("configFileUrl")
+    ConfigKey<String> TEMPLATE_CONFIGURATION_URL = ConfigKeys.newStringConfigKey(
+            "elasticsearch.node.template.configuration.url", "URL where the elasticsearch configuration file (in freemarker format) can be found", null);
+    
+    @SetFromFlag("multicastEnabled")
+    ConfigKey<Boolean> MULTICAST_ENABLED = ConfigKeys.newBooleanConfigKey("elasticsearch.node.multicast.enabled", 
+            "Indicates whether zen discovery multicast should be enabled for a node", null);
+    
+    @SetFromFlag("multicastEnabled")
+    ConfigKey<Boolean> UNICAST_ENABLED = ConfigKeys.newBooleanConfigKey("elasticsearch.node.UNicast.enabled", 
+            "Indicates whether zen discovery unicast should be enabled for a node", null);
+    
+    @SetFromFlag("httpPort")
+    PortAttributeSensorAndConfigKey HTTP_PORT = new PortAttributeSensorAndConfigKey(WebAppServiceConstants.HTTP_PORT, PortRanges.fromString("9200+"));
+    
+    @SetFromFlag("nodeName")
+    StringAttributeSensorAndConfigKey NODE_NAME = new StringAttributeSensorAndConfigKey("elasticsearch.node.name", 
+            "Node name (or randomly selected if not set", null);
+    
+    @SetFromFlag("clusterName")
+    StringAttributeSensorAndConfigKey CLUSTER_NAME = new StringAttributeSensorAndConfigKey("elasticsearch.node.cluster.name", 
+            "Cluster name (or elasticsearch selected if not set", null);
+    
+    AttributeSensor<String> NODE_ID = Sensors.newStringSensor("elasticsearch.node.id");
+    AttributeSensor<Integer> DOCUMENT_COUNT = Sensors.newIntegerSensor("elasticsearch.node.docs.count");
+    AttributeSensor<Integer> STORE_BYTES = Sensors.newIntegerSensor("elasticsearch.node.store.bytes");
+    AttributeSensor<Integer> GET_TOTAL = Sensors.newIntegerSensor("elasticsearch.node.get.total");
+    AttributeSensor<Integer> GET_TIME_IN_MILLIS = Sensors.newIntegerSensor("elasticsearch.node.get.time.in.millis");
+    AttributeSensor<Integer> SEARCH_QUERY_TOTAL = Sensors.newIntegerSensor("elasticsearch.node.search.query.total");
+    AttributeSensor<Integer> SEARCH_QUERY_TIME_IN_MILLIS = Sensors.newIntegerSensor("elasticsearch.node.search.query.time.in.millis");
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java
new file mode 100644
index 0000000..bc0e57c
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface ElasticSearchNodeDriver extends SoftwareProcessDriver {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java
new file mode 100644
index 0000000..9c51de5
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.event.feed.http.JsonFunctions;
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.util.guava.Functionals;
+import brooklyn.util.guava.Maybe;
+import brooklyn.util.guava.MaybeFunctions;
+import brooklyn.util.guava.TypeTokens;
+import brooklyn.util.http.HttpToolResponse;
+
+import com.google.common.base.Function;
+import com.google.common.base.Functions;
+import com.google.common.net.HostAndPort;
+import com.google.gson.JsonElement;
+
+public class ElasticSearchNodeImpl extends SoftwareProcessImpl implements ElasticSearchNode {
+    
+    protected static final Function<Maybe<JsonElement>, Maybe<JsonElement>> GET_FIRST_NODE_FROM_NODES = new Function<Maybe<JsonElement>, Maybe<JsonElement>>() {
+        @Override public Maybe<JsonElement> apply(Maybe<JsonElement> input) {
+            if (input.isAbsent()) {
+                return input;
+            }
+            return Maybe.fromNullable(input.get().getAsJsonObject().entrySet().iterator().next().getValue());
+        }
+    };
+    
+    protected static final Function<HttpToolResponse, Maybe<JsonElement>> GET_FIRST_NODE = Functionals.chain(HttpValueFunctions.jsonContents(), 
+            MaybeFunctions.<JsonElement>wrap(), JsonFunctions.walkM("nodes"), GET_FIRST_NODE_FROM_NODES);
+    
+    
+    HttpFeed httpFeed;
+
+    @Override
+    public Class<ElasticSearchNodeDriver> getDriverInterface() {
+        return ElasticSearchNodeDriver.class;
+    }
+    
+    protected static final <T> HttpPollConfig<T> getSensorFromNodeStat(AttributeSensor<T> sensor, String... jsonPath) {
+        return new HttpPollConfig<T>(sensor)
+            .onSuccess(Functionals.chain(GET_FIRST_NODE, JsonFunctions.walkM(jsonPath), JsonFunctions.castM(TypeTokens.getRawRawType(sensor.getTypeToken()), null)))
+            .onFailureOrException(Functions.<T>constant(null));
+    }
+    
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+        Integer rawPort = getAttribute(HTTP_PORT);
+        checkNotNull(rawPort, "HTTP_PORT sensors not set for %s; is an acceptable port available?", this);
+        HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, rawPort);
+        Function<Maybe<JsonElement>, String> getNodeId = new Function<Maybe<JsonElement>, String>() {
+            @Override public String apply(Maybe<JsonElement> input) {
+                if (input.isAbsent()) {
+                    return null;
+                }
+                return input.get().getAsJsonObject().entrySet().iterator().next().getKey();
+            }
+        };
+        httpFeed = HttpFeed.builder()
+            .entity(this)
+            .period(1000)
+            .baseUri(String.format("http://%s:%s/_nodes/_local/stats", hp.getHostText(), hp.getPort()))
+            .poll(new HttpPollConfig<Boolean>(SERVICE_UP)
+                .onSuccess(HttpValueFunctions.responseCodeEquals(200))
+                .onFailureOrException(Functions.constant(false)))
+            .poll(new HttpPollConfig<String>(NODE_ID)
+                .onSuccess(Functionals.chain(HttpValueFunctions.jsonContents(), MaybeFunctions.<JsonElement>wrap(), JsonFunctions.walkM("nodes"), getNodeId))
+                .onFailureOrException(Functions.constant("")))
+            .poll(getSensorFromNodeStat(NODE_NAME, "name"))
+            .poll(getSensorFromNodeStat(DOCUMENT_COUNT, "indices", "docs", "count"))
+            .poll(getSensorFromNodeStat(STORE_BYTES, "indices", "store", "size_in_bytes"))
+            .poll(getSensorFromNodeStat(GET_TOTAL, "indices", "get", "total"))
+            .poll(getSensorFromNodeStat(GET_TIME_IN_MILLIS, "indices", "get", "time_in_millis"))
+            .poll(getSensorFromNodeStat(SEARCH_QUERY_TOTAL, "indices", "search", "query_total"))
+            .poll(getSensorFromNodeStat(SEARCH_QUERY_TIME_IN_MILLIS, "indices", "search", "query_time_in_millis"))
+            .poll(new HttpPollConfig<String>(CLUSTER_NAME)
+                .onSuccess(HttpValueFunctions.jsonContents("cluster_name", String.class)))
+            .build();
+    }
+    
+    @Override
+    protected void disconnectSensors() {
+        if (httpFeed != null) {
+            httpFeed.stop();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java
new file mode 100644
index 0000000..74f53e5
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import static java.lang.String.format;
+
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.List;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityLocal;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.net.Urls;
+import brooklyn.util.os.Os;
+import brooklyn.util.ssh.BashCommands;
+
+import com.google.common.collect.ImmutableList;
+
+public class ElasticSearchNodeSshDriver extends AbstractSoftwareProcessSshDriver implements ElasticSearchNodeDriver {
+
+    public ElasticSearchNodeSshDriver(EntityLocal entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("elasticsearch-%s", getVersion()))));
+    }
+
+    @Override
+    public void install() {
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+        
+        List<String> commands = ImmutableList.<String>builder()
+            .add(BashCommands.installJavaLatestOrWarn())
+            .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
+            .add(String.format("tar zxvf %s", saveAs))
+            .build();
+        
+        newScript(INSTALLING).body.append(commands).execute();
+    }
+
+    @Override
+    public void customize() {
+        newScript(CUSTOMIZING).execute();  //create the directory
+        
+        String configFileUrl = entity.getConfig(ElasticSearchNode.TEMPLATE_CONFIGURATION_URL);
+        
+        if (configFileUrl == null) {
+            return;
+        }
+
+        String configScriptContents = processTemplate(configFileUrl);
+        Reader configContents = new StringReader(configScriptContents);
+
+        getMachine().copyTo(configContents, Urls.mergePaths(getRunDir(), getConfigFile()));
+    }
+
+    @Override
+    public void launch() {
+        String pidFile = getRunDir() + "/" + AbstractSoftwareProcessSshDriver.PID_FILENAME;
+        entity.setAttribute(ElasticSearchNode.PID_FILE, pidFile);
+        StringBuilder commandBuilder = new StringBuilder()
+            .append(String.format("%s/bin/elasticsearch -d -p %s", getExpandedInstallDir(), pidFile));
+        if (entity.getConfig(ElasticSearchNode.TEMPLATE_CONFIGURATION_URL) != null) {
+            commandBuilder.append(" -Des.config=" + Os.mergePaths(getRunDir(), getConfigFile()));
+        }
+        appendConfigIfPresent(commandBuilder, "es.path.data", ElasticSearchNode.DATA_DIR, Os.mergePaths(getRunDir(), "data"));
+        appendConfigIfPresent(commandBuilder, "es.path.logs", ElasticSearchNode.LOG_DIR, Os.mergePaths(getRunDir(), "logs"));
+        appendConfigIfPresent(commandBuilder, "es.node.name", ElasticSearchNode.NODE_NAME.getConfigKey());
+        appendConfigIfPresent(commandBuilder, "es.cluster.name", ElasticSearchNode.CLUSTER_NAME.getConfigKey());
+        appendConfigIfPresent(commandBuilder, "es.discovery.zen.ping.multicast.enabled", ElasticSearchNode.MULTICAST_ENABLED);
+        appendConfigIfPresent(commandBuilder, "es.discovery.zen.ping.unicast.enabled", ElasticSearchNode.UNICAST_ENABLED);
+        commandBuilder.append(" > out.log 2> err.log < /dev/null");
+        newScript(MutableMap.of("usePidFile", false), LAUNCHING)
+            .updateTaskAndFailOnNonZeroResultCode()
+            .body.append(commandBuilder.toString())
+            .execute();
+    }
+    
+    private void appendConfigIfPresent(StringBuilder builder, String parameter, ConfigKey<?> configKey) {
+        appendConfigIfPresent(builder, parameter, configKey, null);
+    }
+    
+    private void appendConfigIfPresent(StringBuilder builder, String parameter, ConfigKey<?> configKey, String defaultValue) {
+        String config = null;
+        if (entity.getConfig(configKey) != null) {
+            config = String.valueOf(entity.getConfig(configKey));
+        }
+        if (config == null && defaultValue != null) {
+            config = defaultValue;
+        }
+        if (config != null) {
+            builder.append(String.format(" -D%s=%s", parameter, config));
+        }
+    }
+    
+    public String getConfigFile() {
+        return "elasticsearch.yaml";
+    }
+    
+    @Override
+    public boolean isRunning() {
+        return newScript(MutableMap.of("usePidFile", true), CHECK_RUNNING).execute() == 0;
+    }
+    
+    @Override
+    public void stop() {
+        newScript(MutableMap.of("usePidFile", true), STOPPING).execute();
+    }
+    
+    @Override
+    public void kill() {
+        newScript(MutableMap.of("usePidFile", true), KILLING).execute();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java
new file mode 100644
index 0000000..5f8cc84
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.event.basic.AttributeSensorAndConfigKey;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.util.flags.SetFromFlag;
+
+public interface AbstractMongoDBServer extends SoftwareProcess, Entity {
+
+    // TODO Need to properly test v2.4.x and v2.5.x support.
+    // I think the v2.5.x were dev releases.
+    // Should update mongo.config to yaml format, but no rush for that.
+    
+    @SetFromFlag("dataDirectory")
+    ConfigKey<String> DATA_DIRECTORY = ConfigKeys.newStringConfigKey(
+            "mongodb.data.directory", "Data directory to store MongoDB journals");
+    
+    @SetFromFlag("mongodbConfTemplateUrl")
+    ConfigKey<String> MONGODB_CONF_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
+            "mongodb.config.url", "Template file (in freemarker format) for a MongoDB configuration file",
+            "classpath://org/apache/brooklyn/entity/nosql/mongodb/default.conf");
+    
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION =
+            ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "2.6.5");
+
+    // TODO: Windows support
+    // e.g. http://fastdl.mongodb.org/linux/mongodb-linux-x86_64-2.2.2.tgz,
+    // http://fastdl.mongodb.org/osx/mongodb-osx-x86_64-2.2.2.tgz
+    // http://downloads.mongodb.org/win32/mongodb-win32-x86_64-1.8.5.zip
+    // Note Windows download is a zip.
+    @SetFromFlag("downloadUrl")
+    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "http://fastdl.mongodb.org/${driver.osDir}/${driver.osTag}-${version}.tgz");
+
+    @SetFromFlag("port")
+    PortAttributeSensorAndConfigKey PORT =
+            new PortAttributeSensorAndConfigKey("mongodb.server.port", "Server port", "27017+");
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java
new file mode 100644
index 0000000..4065ec5
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityLocal;
+import brooklyn.entity.basic.lifecycle.ScriptHelper;
+import brooklyn.location.OsDetails;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.net.Networking;
+import brooklyn.util.os.Os;
+import brooklyn.util.ssh.BashCommands;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+public abstract class AbstractMongoDBSshDriver extends AbstractSoftwareProcessSshDriver {
+
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractMongoDBSshDriver.class);
+    
+    public AbstractMongoDBSshDriver(EntityLocal entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(getBaseName())));
+    }
+
+    @Override
+    public void install() {
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+    
+        List<String> commands = new LinkedList<String>();
+        commands.addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs));
+        commands.add(BashCommands.INSTALL_TAR);
+        commands.add("tar xzfv " + saveAs);
+    
+        newScript(INSTALLING)
+                .failOnNonZeroResultCode()
+                .body.append(commands).execute();
+    }
+    
+    @Override
+    public void customize() {
+        Map<?,?> ports = ImmutableMap.of("port", getServerPort());
+        Networking.checkPortsValid(ports);
+        String command = String.format("mkdir -p %s", getDataDirectory());
+        newScript(CUSTOMIZING)
+                .updateTaskAndFailOnNonZeroResultCode()
+                .body.append(command).execute();
+        String templateUrl = entity.getConfig(MongoDBServer.MONGODB_CONF_TEMPLATE_URL);
+        if (!Strings.isNullOrEmpty(templateUrl)) copyTemplate(templateUrl, getConfFile());
+    }
+    
+    @Override
+    public boolean isRunning() {
+        try {
+            return MongoDBClientSupport.forServer((AbstractMongoDBServer) entity).ping();
+        } catch (Exception e) {
+            Exceptions.propagateIfFatal(e);
+            return false;
+        }
+    }
+    
+    /**
+     * Kills the server with SIGINT. Sending SIGKILL is likely to result in data corruption.
+     * @see <a href="http://docs.mongodb.org/manual/tutorial/manage-mongodb-processes/#sending-a-unix-int-or-term-signal">http://docs.mongodb.org/manual/tutorial/manage-mongodb-processes/#sending-a-unix-int-or-term-signal</a>
+     */
+    @Override
+    public void stop() {
+        // TODO: Wait for process to terminate. Currently, this will send the signal and then immediately continue with next steps, 
+        // which could involve stopping VM etc.
+        
+        // We could also use SIGTERM (15)
+        new ScriptHelper(this, "Send SIGINT to MongoDB server")
+                .body.append("kill -2 $(cat " + getPidFile() + ")")
+                .execute();
+    }
+
+    protected String getBaseName() {
+        return getOsTag() + "-" + entity.getConfig(AbstractMongoDBServer.SUGGESTED_VERSION);
+    }
+
+    // IDE note: This is used by MongoDBServer.DOWNLOAD_URL
+    public String getOsDir() {
+        return (getLocation().getOsDetails().isMac()) ? "osx" : "linux";
+    }
+
+    public String getOsTag() {
+        OsDetails os = getLocation().getOsDetails();
+        if (os == null) {
+            // Default to generic linux
+            return "mongodb-linux-x86_64";
+        } else if (os.isMac()) {
+            // Mac is 64bit only
+            return "mongodb-osx-x86_64";
+        } else {
+            String arch = os.is64bit() ? "x86_64" : "i686";
+            return "mongodb-linux-" + arch;
+        }
+    }
+
+    public String getDataDirectory() {
+        String result = entity.getConfig(MongoDBServer.DATA_DIRECTORY);
+        if (result!=null) return result;
+        return getRunDir() + "/data";
+    }
+
+    protected String getLogFile() {
+        return getRunDir() + "/log.txt";
+    }
+
+    protected String getPidFile() {
+        return getRunDir() + "/pid";
+    }
+
+    protected Integer getServerPort() {
+        return entity.getAttribute(MongoDBServer.PORT);
+    }
+
+    protected String getConfFile() {
+        return getRunDir() + "/mongo.conf";
+    }
+
+    protected ImmutableList.Builder<String> getArgsBuilderWithDefaults(AbstractMongoDBServer server) {
+        Integer port = server.getAttribute(MongoDBServer.PORT);
+
+        return ImmutableList.<String>builder()
+                .add("--config", getConfFile())
+                .add("--pidfilepath", getPidFile())
+                .add("--logpath", getLogFile())
+                .add("--port", port.toString())
+                .add("--fork");
+    }
+    
+    protected void launch(ImmutableList.Builder<String> argsBuilder) {
+        String args = Joiner.on(" ").join(argsBuilder.build());
+        String command = String.format("%s/bin/mongod %s > out.log 2> err.log < /dev/null", getExpandedInstallDir(), args);
+        LOG.info(command);
+        newScript(LAUNCHING)
+                .updateTaskAndFailOnNonZeroResultCode()
+                .body.append(command).execute();
+    }
+ 
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClient.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClient.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClient.java
new file mode 100644
index 0000000..b2eeb59
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClient.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.util.List;
+import java.util.Map;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.annotation.Effector;
+import brooklyn.entity.annotation.EffectorParam;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.MethodEffector;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.util.flags.SetFromFlag;
+
+import com.google.common.reflect.TypeToken;
+
+@ImplementedBy(MongoDBClientImpl.class)
+public interface MongoDBClient extends AbstractMongoDBServer {
+    
+    MethodEffector<Void> RUN_SCRIPT = new MethodEffector<Void>(MongoDBClient.class, "runScript");
+    
+    @SuppressWarnings("serial")
+    @SetFromFlag("startupJsScripts")
+    ConfigKey<List<String>> STARTUP_JS_SCRIPTS = ConfigKeys.newConfigKey(
+            new TypeToken<List<String>>(){}, "mongodb.client.startupJsScripts", 
+                "List of scripts defined in mongodb.client.scripts to be run on startup");
+    
+    @SuppressWarnings("serial")
+    @SetFromFlag("scripts")
+    ConfigKey<Map<String, String>> JS_SCRIPTS = ConfigKeys.newConfigKey(
+            new TypeToken<Map<String, String>>(){}, "mongodb.client.scripts", "List of javascript scripts to be copied "
+                    + "to the server. These scripts can be run using the runScript effector");
+    
+    @SetFromFlag("shardedDeployment")
+    ConfigKey<MongoDBShardedDeployment> SHARDED_DEPLOYMENT = ConfigKeys.newConfigKey(MongoDBShardedDeployment.class, 
+            "mongodb.client.shardeddeployment", "Sharded deployment that the client will use to run scripts. "
+                    + "If both SERVER and SHARDED_DEPLOYMENT are specified, SERVER will be used");
+    
+    @SetFromFlag("server")
+    ConfigKey<AbstractMongoDBServer> SERVER = ConfigKeys.newConfigKey(AbstractMongoDBServer.class, 
+            "mongodb.client.server", "MongoDBServer that the client will use to run scripts. "
+                    + "If both SERVER and SHARDED_DEPLOYMENT are specified, SERVER will be used");
+    
+    @Effector(description="Runs one of the scripts defined in mongodb.client.scripts")
+    void runScript(@EffectorParam(name="preStart", description="use this to create parameters that can be used by the script, e.g.:<p><code>var loopCount = 10</code>") String preStart,
+            @EffectorParam(name="scriptName", description="Name of the script as defined in mongodb.client.scripts") String scriptName);
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java
new file mode 100644
index 0000000..4bbfabd
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface MongoDBClientDriver extends SoftwareProcessDriver {
+    void runScript(String preStart, String scriptName);
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java
new file mode 100644
index 0000000..bff69e9
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.entity.trait.Startable;
+
+public class MongoDBClientImpl extends SoftwareProcessImpl implements MongoDBClient {
+    
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+        setAttribute(Startable.SERVICE_UP, true);
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public Class getDriverInterface() {
+        return MongoDBClientDriver.class;
+    }
+
+    @Override
+    public void runScript(String preStart, String scriptName) {
+        ((MongoDBClientDriver)getDriver()).runScript(preStart, scriptName);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java
new file mode 100644
index 0000000..50ca2ad
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.EntityLocal;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBRouter;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBRouterCluster;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.math.MathPredicates;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+
+public class MongoDBClientSshDriver extends AbstractMongoDBSshDriver implements MongoDBClientDriver {
+    
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBClientSshDriver.class);
+
+    private boolean isRunning = false;
+
+    public MongoDBClientSshDriver(EntityLocal entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+    
+    @Override
+    public void customize() {
+        String command = String.format("mkdir -p %s", getUserScriptDir());
+        newScript(CUSTOMIZING)
+            .updateTaskAndFailOnNonZeroResultCode()
+            .body.append(command).execute();
+        Map<String, String> scripts = entity.getConfig(MongoDBClient.JS_SCRIPTS);
+        for (String scriptName : scripts.keySet()) {
+            copyResource(scripts.get(scriptName), getUserScriptDir() + scriptName + ".js");
+        }
+    }
+
+    @Override
+    public void launch() {
+        AbstractMongoDBServer server = getServer();
+        // The scripts are going to be run on the machine via SSH so it shouldn't matter
+        // that the accessible host and port might be different.
+        String host = server.getAttribute(AbstractMongoDBServer.HOSTNAME);
+        Integer port = server.getAttribute(AbstractMongoDBServer.PORT);
+
+        List<String> scripts = entity.getConfig(MongoDBClient.STARTUP_JS_SCRIPTS);
+        if (scripts!=null) {
+            for (String scriptName : scripts) {
+                try {
+                    LOG.debug("Running MongoDB script "+scriptName+" at "+getEntity());
+                    runScript("", scriptName, host, port);
+                } catch (Exception e) {
+                    LOG.warn("Error running MongoDB script "+scriptName+" at "+getEntity()+", throwing: "+e);
+                    isRunning = false;
+                    Exceptions.propagateIfFatal(e);
+                    throw new IllegalStateException("Error running MongoDB script "+scriptName+" at "+entity+": "+e, e);
+                }
+            }
+        }
+        isRunning = true;
+    }
+    
+    @Override
+    public boolean isRunning() {
+        // TODO better would be to get some confirmation
+        return isRunning;
+    }
+    
+    @Override
+    public void stop() {
+        try {
+            super.stop();
+        } finally {
+            isRunning = false;
+        }
+    }
+    
+    private String getUserScriptDir() {
+        return getRunDir() + "/userScripts/" ;
+    }
+    
+    public void runScript(String preStart, String scriptName) {
+        AbstractMongoDBServer server = getServer();
+        String host = server.getAttribute(AbstractMongoDBServer.HOSTNAME);
+        Integer port = server.getAttribute(AbstractMongoDBServer.PORT);
+        runScript(preStart, scriptName, host, port);
+    }
+    
+    private void runScript(String preStart, String scriptName, String host, Integer port) {
+        // TODO: escape preStart to prevent injection attack
+        String command = String.format("%s/bin/mongo %s:%s --eval \"%s\" %s/%s > out.log 2> err.log < /dev/null", getExpandedInstallDir(), 
+                host, port, preStart, getUserScriptDir(), scriptName + ".js");
+        newScript(LAUNCHING)
+            .updateTaskAndFailOnNonZeroResultCode()
+            .body.append(command).execute();
+    }
+    
+    private AbstractMongoDBServer getServer() {
+        AbstractMongoDBServer server = entity.getConfig(MongoDBClient.SERVER);
+        MongoDBShardedDeployment deployment = entity.getConfig(MongoDBClient.SHARDED_DEPLOYMENT);
+        if (server == null) {
+            Preconditions.checkNotNull(deployment, "Either server or shardedDeployment must be specified for %s", this);
+            server = DependentConfiguration.builder()
+                    .attributeWhenReady(deployment.getRouterCluster(), MongoDBRouterCluster.ANY_ROUTER)
+                    .blockingDetails("any available router")
+                    .runNow();
+            DependentConfiguration.builder()
+                    .attributeWhenReady(server, MongoDBRouter.SHARD_COUNT)
+                    .readiness(MathPredicates.<Integer>greaterThan(0))
+                    .runNow();
+        } else {
+            if (deployment != null) {
+                log.warn("Server and ShardedDeployment defined for {}; using server ({} instead of {})", 
+                        new Object[] {this, server, deployment});
+            }
+            DependentConfiguration.builder()
+                    .attributeWhenReady(server, Startable.SERVICE_UP)
+                    .readiness(Predicates.equalTo(true))
+                    .runNow();
+        }
+        return server;
+    }
+}


[13/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java
new file mode 100644
index 0000000..d9997aa
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.net.UnknownHostException;
+
+import org.bson.BSONObject;
+import org.bson.BasicBSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.net.HostAndPort;
+import com.mongodb.BasicDBObject;
+import com.mongodb.CommandResult;
+import com.mongodb.DB;
+import com.mongodb.DBObject;
+import com.mongodb.MongoClient;
+import com.mongodb.MongoClientOptions;
+import com.mongodb.MongoException;
+import com.mongodb.ServerAddress;
+
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.util.BrooklynNetworkUtils;
+
+/**
+ * Manages connections to standalone MongoDB servers.
+ *
+ * @see <a href="http://docs.mongodb.org/manual/reference/command/">MongoDB database command documentation</a>
+ */
+public class MongoDBClientSupport {
+
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBClientSupport.class);
+
+    private ServerAddress address;
+    
+    private MongoClient client() {
+        return new MongoClient(address, connectionOptions);
+    }
+
+    // Set client to automatically reconnect to servers.
+    private static final MongoClientOptions connectionOptions = MongoClientOptions.builder()
+            .autoConnectRetry(true)
+            .socketKeepAlive(true)
+            .build();
+
+    private static final BasicBSONObject EMPTY_RESPONSE = new BasicBSONObject();
+
+    public MongoDBClientSupport(ServerAddress standalone) {
+        // We could also use a MongoClient to access an entire replica set. See MongoClient(List<ServerAddress>).
+        address = standalone;
+    }
+
+    /**
+     * Creates a {@link MongoDBClientSupport} instance in standalone mode.
+     */
+    public static MongoDBClientSupport forServer(AbstractMongoDBServer standalone) throws UnknownHostException {
+        HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(standalone, standalone.getAttribute(MongoDBServer.PORT));
+        ServerAddress address = new ServerAddress(hostAndPort.getHostText(), hostAndPort.getPort());
+        return new MongoDBClientSupport(address);
+    }
+
+    private ServerAddress getServerAddress() {
+        MongoClient client = client();
+        try {
+            return client.getServerAddressList().get(0);
+        } finally {
+            client.close();
+        }
+    }
+
+    private HostAndPort getServerHostAndPort() {
+        ServerAddress address = getServerAddress();
+        return HostAndPort.fromParts(address.getHost(), address.getPort());
+    }
+
+    public Optional<CommandResult> runDBCommand(String database, String command) {
+        return runDBCommand(database, new BasicDBObject(command, Boolean.TRUE));
+    }
+
+    private Optional<CommandResult> runDBCommand(String database, DBObject command) {
+        MongoClient client = client();
+        try {
+            DB db = client.getDB(database);
+            CommandResult status;
+            try {
+                status = db.command(command);
+            } catch (MongoException e) {
+                LOG.warn("Command " + command + " on " + getServerAddress() + " failed", e);
+                return Optional.absent();
+            }
+            if (!status.ok()) {
+                LOG.debug("Unexpected result of {} on {}: {}",
+                        new Object[] { command, getServerAddress(), status.getErrorMessage() });
+            }
+            return Optional.of(status);
+        } finally {
+            client.close();
+        }
+    }
+    
+    public long getShardCount() {
+        MongoClient client = client();
+        try {
+            return client.getDB("config").getCollection("shards").getCount();
+        } finally {
+            client.close();
+        }
+    }
+
+    public BasicBSONObject getServerStatus() {
+        Optional<CommandResult> result = runDBCommand("admin", "serverStatus");
+        if (result.isPresent() && result.get().ok()) {
+            return result.get();
+        } else {
+            return EMPTY_RESPONSE;
+        }
+    }
+    
+    public boolean ping() {
+        DBObject ping = new BasicDBObject("ping", "1");
+        try {
+            runDBCommand("admin", ping);
+        } catch (MongoException e) {
+            return false;
+        }
+        return true;
+    }
+
+    public boolean initializeReplicaSet(String replicaSetName, Integer id) {
+        HostAndPort primary = getServerHostAndPort();
+        BasicBSONObject config = ReplicaSetConfig.builder(replicaSetName)
+                .member(primary, id)
+                .build();
+
+        BasicDBObject dbObject = new BasicDBObject("replSetInitiate", config);
+        LOG.debug("Initiating replica set with: " + dbObject);
+
+        Optional<CommandResult> result = runDBCommand("admin", dbObject);
+        if (result.isPresent() && result.get().ok() && LOG.isDebugEnabled()) {
+            LOG.debug("Completed initiating MongoDB replica set {} on entity {}", replicaSetName, this);
+        }
+        return result.isPresent() && result.get().ok();
+    }
+
+    /**
+     * Java equivalent of calling rs.conf() in the console.
+     */
+    private BSONObject getReplicaSetConfig() {
+        MongoClient client = client();
+        try {
+            return client.getDB("local").getCollection("system.replset").findOne();
+        } catch (MongoException e) {
+            LOG.error("Failed to get replica set config on "+client, e);
+            return null;
+        } finally {
+            client.close();
+        }
+    }
+
+    /**
+     * Runs <code>replSetGetStatus</code> on the admin database.
+     *
+     * @return The result of <code>replSetGetStatus</code>, or
+     *         an empty {@link BasicBSONObject} if the command threw an exception (e.g. if
+     *         the connection was reset) or if the resultant {@link CommandResult#ok} was false.
+     *
+     * @see <a href="http://docs.mongodb.org/manual/reference/replica-status/">Replica set status reference</a>
+     * @see <a href="http://docs.mongodb.org/manual/reference/command/replSetGetStatus/">replSetGetStatus documentation</a>
+     */
+    public BasicBSONObject getReplicaSetStatus() {
+        Optional<CommandResult> result = runDBCommand("admin", "replSetGetStatus");
+        if (result.isPresent() && result.get().ok()) {
+            return result.get();
+        } else {
+            return EMPTY_RESPONSE;
+        }
+    }
+
+    /**
+     * Reconfigures the replica set that this client is the primary member of to include a new member.
+     * <p/>
+     * Note that this can cause long downtime (typically 10-20s, even up to a minute).
+     *
+     * @param secondary New member of the set.
+     * @param id The id for the new set member. Must be unique within the set.
+     * @return True if successful
+     */
+    public boolean addMemberToReplicaSet(MongoDBServer secondary, Integer id) {
+        // We need to:
+        // - get the existing configuration
+        // - update its version
+        // - add the new member to its list of members
+        // - run replSetReconfig with the new configuration.
+        BSONObject existingConfig = getReplicaSetConfig();
+        if (existingConfig == null) {
+            LOG.warn("Couldn't load existing config for replica set from {}. Server {} not added.",
+                    getServerAddress(), secondary);
+            return false;
+        }
+
+        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(existingConfig)
+                .primary(getServerHostAndPort())
+                .member(secondary, id)
+                .build();
+        return reconfigureReplicaSet(newConfig);
+    }
+
+    /**
+     * Reconfigures the replica set that this client is the primary member of to
+     * remove the given server.
+     * @param server The server to remove
+     * @return True if successful
+     */
+    public boolean removeMemberFromReplicaSet(MongoDBServer server) {
+        BSONObject existingConfig = getReplicaSetConfig();
+        if (existingConfig == null) {
+            LOG.warn("Couldn't load existing config for replica set from {}. Server {} not removed.",
+                    getServerAddress(), server);
+            return false;
+        }
+        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(existingConfig)
+                .primary(getServerHostAndPort())
+                .remove(server)
+                .build();
+        return reconfigureReplicaSet(newConfig);
+    }
+
+    /**
+     * Runs replSetReconfig with the given BasicBSONObject. Returns true if the result's
+     * status is ok.
+     */
+    private boolean reconfigureReplicaSet(BasicBSONObject newConfig) {
+        BasicDBObject command = new BasicDBObject("replSetReconfig", newConfig);
+        LOG.debug("Reconfiguring replica set to: " + command);
+        Optional<CommandResult> result = runDBCommand("admin", command);
+        return result.isPresent() && result.get().ok();
+    }
+
+    public boolean addShardToRouter(String hostAndPort) {
+        LOG.debug("Adding shard " + hostAndPort);
+        BasicDBObject command = new BasicDBObject("addShard", hostAndPort);
+        Optional<CommandResult> result = runDBCommand("admin", command);
+        return result.isPresent() && result.get().ok();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBDriver.java
new file mode 100644
index 0000000..b7d93f0
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBDriver.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface MongoDBDriver extends SoftwareProcessDriver {
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java
new file mode 100644
index 0000000..b7d91db
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.util.Collection;
+import java.util.List;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.group.Cluster;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+import com.google.common.reflect.TypeToken;
+
+/**
+ * A replica set of {@link MongoDBServer}s, based on {@link DynamicCluster} which can be resized by a policy
+ * if required.
+ *
+ * <p/><b>Note</b>
+ * An issue with <code>mongod</code> on Mac OS X can cause unpredictable failure of servers at start-up.
+ * See <a href="https://groups.google.com/forum/#!topic/mongodb-user/QRQYdIXOR2U">this mailing list post</a>
+ * for more information.
+ *
+ * <p/>This replica set implementation has been tested on OS X 10.6 and Ubuntu 12.04.
+ *
+ * @see <a href="http://docs.mongodb.org/manual/replication/">http://docs.mongodb.org/manual/replication/</a>
+ */
+@ImplementedBy(MongoDBReplicaSetImpl.class)
+public interface MongoDBReplicaSet extends DynamicCluster {
+
+    @SetFromFlag("replicaSetName")
+    ConfigKey<String> REPLICA_SET_NAME = ConfigKeys.newStringConfigKey(
+            "mongodb.replicaSet.name", "Name of the MongoDB replica set", "BrooklynCluster");
+
+    ConfigKey<Integer> INITIAL_SIZE = ConfigKeys.newConfigKeyWithDefault(Cluster.INITIAL_SIZE, 3);
+
+    AttributeSensor<MongoDBServer> PRIMARY_ENTITY = Sensors.newSensor(
+            MongoDBServer.class, "mongodb.replicaSet.primary.entity", "The entity acting as primary");
+
+    @SuppressWarnings("serial")
+    AttributeSensor<List<String>> REPLICA_SET_ENDPOINTS = Sensors.newSensor(new TypeToken<List<String>>() {}, 
+        "mongodb.replicaSet.endpoints", "Endpoints active for this replica set");
+
+    /**
+     * The name of the replica set.
+     */
+    String getName();
+
+    /**
+     * @return The primary MongoDB server in the replica set.
+     */
+    MongoDBServer getPrimary();
+
+    /**
+     * @return The secondary servers in the replica set.
+     */
+    Collection<MongoDBServer> getSecondaries();
+
+    /**
+     * @return All servers in the replica set.
+     */
+    Collection<MongoDBServer> getReplicas();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java
new file mode 100644
index 0000000..e5ce093
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.location.Location;
+import brooklyn.policy.PolicySpec;
+import brooklyn.util.collections.MutableList;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.text.Strings;
+
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+
+/**
+ * Implementation of {@link MongoDBReplicaSet}.
+ *
+ * Replica sets have a <i>minimum</i> of three members.
+ *
+ * Removal strategy is always {@link #NON_PRIMARY_REMOVAL_STRATEGY}.
+ */
+public class MongoDBReplicaSetImpl extends DynamicClusterImpl implements MongoDBReplicaSet {
+
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBReplicaSetImpl.class);
+
+    // Provides IDs for replica set members. The first member will have ID 0.
+    private final AtomicInteger nextMemberId = new AtomicInteger(0);
+
+    private MemberTrackingPolicy policy;
+    private final AtomicBoolean mustInitialise = new AtomicBoolean(true);
+
+    @SuppressWarnings("unchecked")
+    protected static final List<AttributeSensor<Long>> SENSORS_TO_SUM = Arrays.asList(
+        MongoDBServer.OPCOUNTERS_INSERTS,
+        MongoDBServer.OPCOUNTERS_QUERIES,
+        MongoDBServer.OPCOUNTERS_UPDATES,
+        MongoDBServer.OPCOUNTERS_DELETES,
+        MongoDBServer.OPCOUNTERS_GETMORE,
+        MongoDBServer.OPCOUNTERS_COMMAND,
+        MongoDBServer.NETWORK_BYTES_IN,
+        MongoDBServer.NETWORK_BYTES_OUT,
+        MongoDBServer.NETWORK_NUM_REQUESTS);
+    
+    public MongoDBReplicaSetImpl() {
+    }
+
+    /**
+     * Manages member addition and removal.
+     *
+     * It's important that this is a single thread: the concurrent addition and removal
+     * of members from the set would almost certainly have unintended side effects,
+     * like reconfigurations using outdated ReplicaSetConfig instances.
+     */
+    private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
+
+    /** true iff input is a non-null MongoDBServer with attribute REPLICA_SET_MEMBER_STATUS PRIMARY. */
+    static final Predicate<Entity> IS_PRIMARY = new Predicate<Entity>() {
+        // getPrimary relies on instanceof check
+        @Override public boolean apply(@Nullable Entity input) {
+            return input != null
+                    && input instanceof MongoDBServer
+                    && ReplicaSetMemberStatus.PRIMARY.equals(input.getAttribute(MongoDBServer.REPLICA_SET_MEMBER_STATUS));
+        }
+    };
+
+    /** true iff. input is a non-null MongoDBServer with attribute REPLICA_SET_MEMBER_STATUS SECONDARY. */
+    static final Predicate<Entity> IS_SECONDARY = new Predicate<Entity>() {
+        @Override public boolean apply(@Nullable Entity input) {
+            // getSecondaries relies on instanceof check
+            return input != null
+                    && input instanceof MongoDBServer
+                    && ReplicaSetMemberStatus.SECONDARY.equals(input.getAttribute(MongoDBServer.REPLICA_SET_MEMBER_STATUS));
+        }
+    };
+
+    /**
+     * {@link Function} for use as the cluster's removal strategy. Chooses any entity with
+     * {@link MongoDBServer#IS_PRIMARY_FOR_REPLICA_SET} true last of all.
+     */
+    private static final Function<Collection<Entity>, Entity> NON_PRIMARY_REMOVAL_STRATEGY = new Function<Collection<Entity>, Entity>() {
+        @Override
+        public Entity apply(@Nullable Collection<Entity> entities) {
+            checkArgument(entities != null && entities.size() > 0, "Expect list of MongoDBServers to have at least one entry");
+            return Iterables.tryFind(entities, Predicates.not(IS_PRIMARY)).or(Iterables.get(entities, 0));
+        }
+    };
+
+    /** @return {@link #NON_PRIMARY_REMOVAL_STRATEGY} */
+    @Override
+    public Function<Collection<Entity>, Entity> getRemovalStrategy() {
+        return NON_PRIMARY_REMOVAL_STRATEGY;
+    }
+
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        return getConfig(MEMBER_SPEC, EntitySpec.create(MongoDBServer.class));
+    }
+
+    /**
+     * Sets {@link MongoDBServer#REPLICA_SET}.
+     */
+    @Override
+    protected Map<?,?> getCustomChildFlags() {
+        return ImmutableMap.builder()
+                .putAll(super.getCustomChildFlags())
+                .put(MongoDBServer.REPLICA_SET, getProxy())
+                .build();
+    }
+
+    @Override
+    public String getName() {
+        // FIXME: Names must be unique if the replica sets are used in a sharded cluster
+        return getConfig(REPLICA_SET_NAME) + this.getId();
+    }
+
+    @Override
+    public MongoDBServer getPrimary() {
+        return Iterables.tryFind(getReplicas(), IS_PRIMARY).orNull();
+    }
+
+    @Override
+    public Collection<MongoDBServer> getSecondaries() {
+        return FluentIterable.from(getReplicas())
+                .filter(IS_SECONDARY)
+                .toList();
+    }
+
+    @Override
+    public Collection<MongoDBServer> getReplicas() {
+        return FluentIterable.from(getMembers())
+                .transform(new Function<Entity, MongoDBServer>() {
+                    @Override public MongoDBServer apply(Entity input) {
+                        return MongoDBServer.class.cast(input);
+                    }
+                })
+                .toList();
+    }
+
+    /**
+     * Initialises the replica set with the given server as primary if {@link #mustInitialise} is true,
+     * otherwise schedules the addition of a new secondary.
+     */
+    private void serverAdded(MongoDBServer server) {
+        LOG.debug("Server added: {}. SERVICE_UP: {}", server, server.getAttribute(MongoDBServer.SERVICE_UP));
+
+        // Set the primary if the replica set hasn't been initialised.
+        if (mustInitialise.compareAndSet(true, false)) {
+            if (LOG.isInfoEnabled())
+                LOG.info("First server up in {} is: {}", getName(), server);
+            boolean replicaSetInitialised = server.initializeReplicaSet(getName(), nextMemberId.getAndIncrement());
+            if (replicaSetInitialised) {
+                setAttribute(PRIMARY_ENTITY, server);
+                setAttribute(Startable.SERVICE_UP, true);
+            } else {
+                ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
+            }
+        } else {
+            if (LOG.isDebugEnabled())
+                LOG.debug("Scheduling addition of member to {}: {}", getName(), server);
+            addSecondaryWhenPrimaryIsNonNull(server);
+        }
+    }
+
+    /**
+     * Adds a server as a secondary in the replica set.
+     * <p/>
+     * If {@link #getPrimary} returns non-null submit the secondary to the primary's
+     * {@link MongoDBClientSupport}. Otherwise, reschedule the task to run again in three
+     * seconds time (in the hope that next time the primary will be available).
+     */
+    private void addSecondaryWhenPrimaryIsNonNull(final MongoDBServer secondary) {
+        // TODO Don't use executor, use ExecutionManager
+        executor.submit(new Runnable() {
+            @Override
+            public void run() {
+                // SERVICE_UP is not guaranteed when additional members are added to the set.
+                Boolean isAvailable = secondary.getAttribute(MongoDBServer.SERVICE_UP);
+                MongoDBServer primary = getPrimary();
+                boolean reschedule;
+                if (Boolean.TRUE.equals(isAvailable) && primary != null) {
+                    boolean added = primary.addMemberToReplicaSet(secondary, nextMemberId.incrementAndGet());
+                    if (added) {
+                        LOG.info("{} added to replica set {}", secondary, getName());
+                        reschedule = false;
+                    } else {
+                        if (LOG.isDebugEnabled()) {
+                            LOG.debug("{} could not be added to replica set via {}; rescheduling", secondary, getName());
+                        }
+                        reschedule = true;
+                    }
+                } else {
+                    if (LOG.isTraceEnabled()) {
+                        LOG.trace("Rescheduling addition of member {} to replica set {}: service_up={}, primary={}",
+                            new Object[] {secondary, getName(), isAvailable, primary});
+                    }
+                    reschedule = true;
+                }
+                
+                if (reschedule) {
+                    // TODO Could limit number of retries
+                    executor.schedule(this, 3, TimeUnit.SECONDS);
+                }
+            }
+        });
+    }
+
+    /**
+     * Removes a server from the replica set.
+     * <p/>
+     * Submits a task that waits for the member to be down and for the replica set to have a primary
+     * member, then reconfigures the set to remove the member, to {@link #executor}. If either of the
+     * two conditions are not met then the task reschedules itself.
+     *
+     * @param member The server to be removed from the replica set.
+     */
+    private void serverRemoved(final MongoDBServer member) {
+        if (LOG.isDebugEnabled())
+            LOG.debug("Scheduling removal of member from {}: {}", getName(), member);
+        // FIXME is there a chance of race here?
+        if (member.equals(getAttribute(PRIMARY_ENTITY)))
+            setAttribute(PRIMARY_ENTITY, null);
+        executor.submit(new Runnable() {
+            @Override
+            public void run() {
+                // Wait until the server has been stopped before reconfiguring the set. Quoth the MongoDB doc:
+                // for best results always shut down the mongod instance before removing it from a replica set.
+                Boolean isAvailable = member.getAttribute(MongoDBServer.SERVICE_UP);
+                // Wait for the replica set to elect a new primary if the set is reconfiguring itself.
+                MongoDBServer primary = getPrimary();
+                boolean reschedule;
+                
+                if (primary != null && !isAvailable) {
+                    boolean removed = primary.removeMemberFromReplicaSet(member);
+                    if (removed) {
+                        LOG.info("Removed {} from replica set {}", member, getName());
+                        reschedule = false;
+                    } else {
+                        if (LOG.isDebugEnabled()) {
+                            LOG.debug("{} could not be removed from replica set via {}; rescheduling", member, getName());
+                        }
+                        reschedule = true;
+                    }
+
+                } else {
+                    if (LOG.isTraceEnabled()) {
+                        LOG.trace("Rescheduling removal of member {} from replica set {}: service_up={}, primary={}",
+                            new Object[]{member, getName(), isAvailable, primary});
+                    }
+                    reschedule = true;
+                }
+                
+                if (reschedule) {
+                    // TODO Could limit number of retries
+                    executor.schedule(this, 3, TimeUnit.SECONDS);
+                }
+            }
+        });
+    }
+
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        // Promises that all the cluster's members have SERVICE_UP true on returning.
+        super.start(locations);
+        policy = addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
+                .displayName(getName() + " membership tracker")
+                .configure("group", this));
+
+        for (AttributeSensor<Long> sensor: SENSORS_TO_SUM)
+            addEnricher(Enrichers.builder()
+                    .aggregating(sensor)
+                    .publishing(sensor)
+                    .fromMembers()
+                    .computingSum()
+                    .valueToReportIfNoSensors(null)
+                    .defaultValueForUnreportedSensors(null)
+                    .build());
+        
+        // FIXME would it be simpler to have a *subscription* on four or five sensors on allMembers, including SERVICE_UP
+        // (which we currently don't check), rather than an enricher, and call to an "update" method?
+        addEnricher(Enrichers.builder()
+                .aggregating(MongoDBServer.REPLICA_SET_PRIMARY_ENDPOINT)
+                .publishing(MongoDBServer.REPLICA_SET_PRIMARY_ENDPOINT)
+                .fromMembers()
+                .valueToReportIfNoSensors(null)
+                .computing(new Function<Collection<String>, String>() {
+                        @Override
+                        public String apply(Collection<String> input) {
+                            if (input==null || input.isEmpty()) return null;
+                            Set<String> distinct = MutableSet.of();
+                            for (String endpoint: input)
+                                if (!Strings.isBlank(endpoint))
+                                    distinct.add(endpoint);
+                            if (distinct.size()>1)
+                                LOG.warn("Mongo replica set "+MongoDBReplicaSetImpl.this+" detetcted multiple masters (transitioning?): "+distinct);
+                            return input.iterator().next();
+                        }})
+                .build());
+
+        addEnricher(Enrichers.builder()
+                .aggregating(MongoDBServer.MONGO_SERVER_ENDPOINT)
+                .publishing(REPLICA_SET_ENDPOINTS)
+                .fromMembers()
+                .valueToReportIfNoSensors(null)
+                .computing(new Function<Collection<String>, List<String>>() {
+                        @Override
+                        public List<String> apply(Collection<String> input) {
+                            Set<String> endpoints = new TreeSet<String>();
+                            for (String endpoint: input) {
+                                if (!Strings.isBlank(endpoint)) {
+                                    endpoints.add(endpoint);
+                                }
+                            }
+                            return MutableList.copyOf(endpoints);
+                        }})
+                .build());
+
+        subscribeToMembers(this, MongoDBServer.IS_PRIMARY_FOR_REPLICA_SET, new SensorEventListener<Boolean>() {
+            @Override public void onEvent(SensorEvent<Boolean> event) {
+                if (Boolean.TRUE == event.getValue())
+                    setAttribute(PRIMARY_ENTITY, (MongoDBServer)event.getSource());
+            }
+        });
+
+    }
+
+    @Override
+    public void stop() {
+        // Do we want to remove the members from the replica set?
+        //  - if the set is being stopped forever it's irrelevant
+        //  - if the set might be restarted I think it just inconveniences us
+        // Terminate the executor immediately.
+        // TODO Note that after this the executor will not run if the set is restarted.
+        executor.shutdownNow();
+        super.stop();
+        setAttribute(Startable.SERVICE_UP, false);
+    }
+
+    @Override
+    public void onManagementStopped() {
+        super.onManagementStopped();
+        executor.shutdownNow();
+    }
+    
+    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override protected void onEntityChange(Entity member) {
+            // Ignored
+        }
+        @Override protected void onEntityAdded(Entity member) {
+            ((MongoDBReplicaSetImpl)entity).serverAdded((MongoDBServer) member);
+        }
+        @Override protected void onEntityRemoved(Entity member) {
+            ((MongoDBReplicaSetImpl)entity).serverRemoved((MongoDBServer) member);
+        }
+    };
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServer.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServer.java
new file mode 100644
index 0000000..5300684
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServer.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import org.bson.BasicBSONObject;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.AttributeSensor.SensorPersistenceMode;
+import brooklyn.event.basic.BasicConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+@Catalog(name="MongoDB Server",
+    description="MongoDB (from \"humongous\") is a scalable, high-performance, open source NoSQL database",
+    iconUrl="classpath:///mongodb-logo.png")
+@ImplementedBy(MongoDBServerImpl.class)
+public interface MongoDBServer extends AbstractMongoDBServer {
+
+    @SetFromFlag("mongodbConfTemplateUrl")
+    ConfigKey<String> MONGODB_CONF_TEMPLATE_URL = ConfigKeys.newConfigKeyWithDefault(
+            AbstractMongoDBServer.MONGODB_CONF_TEMPLATE_URL,
+            "classpath://org/apache/brooklyn/entity/nosql/mongodb/default-mongod.conf");
+
+    // See http://docs.mongodb.org/ecosystem/tools/http-interfaces/#http-console
+    // This is *always* 1000 more than port. We disable if it is not available.
+    PortAttributeSensorAndConfigKey HTTP_PORT =
+        new PortAttributeSensorAndConfigKey("mongodb.server.httpPort", "HTTP port for the server (estimated)", "28017+");
+
+    @SetFromFlag("enableRestInterface")
+    ConfigKey<Boolean> ENABLE_REST_INTERFACE = ConfigKeys.newBooleanConfigKey(
+            "mongodb.config.enable_rest", "Adds --rest to server startup flags when true", Boolean.FALSE);
+
+    AttributeSensor<String> HTTP_INTERFACE_URL = Sensors.newStringSensor(
+            "mongodb.server.http_interface", "URL of the server's HTTP console");
+
+    AttributeSensor<BasicBSONObject> STATUS_BSON = Sensors.builder(BasicBSONObject.class, "mongodb.server.status.bson")
+            .description("Server status (BSON/JSON map ojbect)")
+            .persistence(SensorPersistenceMode.NONE)
+            .build();
+    
+    AttributeSensor<Double> UPTIME_SECONDS = Sensors.newDoubleSensor(
+            "mongodb.server.uptime", "Server uptime in seconds");
+
+    AttributeSensor<Long> OPCOUNTERS_INSERTS = Sensors.newLongSensor(
+            "mongodb.server.opcounters.insert", "Server inserts");
+
+    AttributeSensor<Long> OPCOUNTERS_QUERIES = Sensors.newLongSensor(
+            "mongodb.server.opcounters.query", "Server queries");
+
+    AttributeSensor<Long> OPCOUNTERS_UPDATES = Sensors.newLongSensor(
+            "mongodb.server.opcounters.update", "Server updates");
+
+    AttributeSensor<Long> OPCOUNTERS_DELETES = Sensors.newLongSensor(
+            "mongodb.server.opcounters.delete", "Server deletes");
+
+    AttributeSensor<Long> OPCOUNTERS_GETMORE = Sensors.newLongSensor(
+            "mongodb.server.opcounters.getmore", "Server getmores");
+
+    AttributeSensor<Long> OPCOUNTERS_COMMAND = Sensors.newLongSensor(
+            "mongodb.server.opcounters.command", "Server commands");
+
+    AttributeSensor<Long> NETWORK_BYTES_IN = Sensors.newLongSensor(
+            "mongodb.server.network.bytesIn", "Server incoming network traffic (in bytes)");
+
+    AttributeSensor<Long> NETWORK_BYTES_OUT = Sensors.newLongSensor(
+            "mongodb.server.network.bytesOut", "Server outgoing network traffic (in bytes)");
+
+    AttributeSensor<Long> NETWORK_NUM_REQUESTS = Sensors.newLongSensor(
+            "mongodb.server.network.numRequests", "Server network requests");
+
+    /** A single server's replica set configuration **/
+    ConfigKey<MongoDBReplicaSet> REPLICA_SET = new BasicConfigKey<MongoDBReplicaSet>(MongoDBReplicaSet.class,
+            "mongodb.replicaset", "The replica set to which the server belongs. " +
+            "Users should not set this directly when creating a new replica set.");
+
+    AttributeSensor<ReplicaSetMemberStatus> REPLICA_SET_MEMBER_STATUS = Sensors.newSensor(
+            ReplicaSetMemberStatus.class, "mongodb.server.replicaSet.memberStatus", "The status of this server in the replica set");
+
+    AttributeSensor<Boolean> IS_PRIMARY_FOR_REPLICA_SET = Sensors.newBooleanSensor(
+            "mongodb.server.replicaSet.isPrimary", "True if this server is the write master for the replica set");
+
+    AttributeSensor<Boolean> IS_SECONDARY_FOR_REPLICA_SET = Sensors.newBooleanSensor(
+            "mongodb.server.replicaSet.isSecondary", "True if this server is a secondary server in the replica set");
+
+    AttributeSensor<String> REPLICA_SET_PRIMARY_ENDPOINT = Sensors.newStringSensor(
+            "mongodb.server.replicaSet.primary.endpoint", "The host:port of the server which is acting as primary (master) for the replica set");
+
+    AttributeSensor<String> MONGO_SERVER_ENDPOINT = Sensors.newStringSensor(
+            "mongodb.server.endpoint", "The host:port where this server is listening");
+
+    /**
+     * @return The replica set the server belongs to, or null if the server is a standalone instance.
+     */
+    MongoDBReplicaSet getReplicaSet();
+
+    /**
+     * @return True if the server is a child of {@link MongoDBReplicaSet}.
+     */
+    boolean isReplicaSetMember();
+
+    /**
+     * Initialises a replica set at the server the method is invoked on.
+     * @param replicaSetName The name for the replica set.
+     * @param id The id to be given to this server in the replica set configuration.
+     * @return True if initialisation is successful.
+     */
+    boolean initializeReplicaSet(String replicaSetName, Integer id);
+
+    /**
+     * Reconfigures the replica set that the server the method is invoked on is the primary member of
+     * to include a new member.
+     * <p/>
+     * Note that this can cause long downtime (typically 10-20s, even up to a minute).
+     *
+     * @param secondary New member of the set.
+     * @param id The id for the new set member. Must be unique within the set; its validity is not checked.
+     * @return True if addition is successful. False if the server this is called on is not the primary
+     *         member of the replica set.
+     */
+    boolean addMemberToReplicaSet(MongoDBServer secondary, Integer id);
+
+    /**
+     * Reconfigures the replica set that the server the method is invoked on is the primary member of
+     * to remove the given server.
+     * @param server The server to remove.
+     * @return True if removal is successful. False if the server this is called on is not the primary
+     *         member of the replica set.
+     */
+    boolean removeMemberFromReplicaSet(MongoDBServer server);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java
new file mode 100644
index 0000000..346b1ee
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.net.UnknownHostException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import org.bson.BasicBSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.config.render.RendererHints;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.event.feed.function.FunctionFeed;
+import brooklyn.event.feed.function.FunctionPollConfig;
+import brooklyn.location.access.BrooklynAccessUtils;
+
+import com.google.common.base.Functions;
+import com.google.common.base.Objects;
+import com.google.common.net.HostAndPort;
+
+public class MongoDBServerImpl extends SoftwareProcessImpl implements MongoDBServer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBServerImpl.class);
+
+    static {
+        RendererHints.register(HTTP_INTERFACE_URL, RendererHints.namedActionWithUrl());
+    }
+
+    private FunctionFeed serviceStats;
+    private FunctionFeed replicaSetStats;
+    private MongoDBClientSupport client;
+
+    public MongoDBServerImpl() {
+    }
+
+    @Override
+    public Class<?> getDriverInterface() {
+        return MongoDBDriver.class;
+    }
+
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+        connectServiceUpIsRunning();
+
+        int port = getAttribute(MongoDBServer.PORT);
+        HostAndPort accessibleAddress = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, port);
+        setAttribute(MONGO_SERVER_ENDPOINT, String.format("http://%s:%d",
+                accessibleAddress.getHostText(), accessibleAddress.getPort()));
+
+        int httpConsolePort = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getAttribute(HTTP_PORT)).getPort();
+        setAttribute(HTTP_INTERFACE_URL, String.format("http://%s:%d",
+                accessibleAddress.getHostText(), httpConsolePort));
+
+        try {
+            client = MongoDBClientSupport.forServer(this);
+        } catch (UnknownHostException e) {
+            LOG.warn("Unable to create client connection to {}, not connecting sensors: {} ", this, e.getMessage());
+            return;
+        }
+
+        serviceStats = FunctionFeed.builder()
+                .entity(this)
+                .poll(new FunctionPollConfig<Object, BasicBSONObject>(STATUS_BSON)
+                        .period(2, TimeUnit.SECONDS)
+                        .callable(new Callable<BasicBSONObject>() {
+                            @Override
+                            public BasicBSONObject call() throws Exception {
+                                return MongoDBServerImpl.this.getAttribute(SERVICE_UP)
+                                    ? client.getServerStatus()
+                                    : null;
+                            }
+                        })
+                        .onException(Functions.<BasicBSONObject>constant(null)))
+                .build();
+
+        if (isReplicaSetMember()) {
+            replicaSetStats = FunctionFeed.builder()
+                    .entity(this)
+                    .poll(new FunctionPollConfig<Object, ReplicaSetMemberStatus>(REPLICA_SET_MEMBER_STATUS)
+                            .period(2, TimeUnit.SECONDS)
+                            .callable(new Callable<ReplicaSetMemberStatus>() {
+                                /**
+                                 * Calls {@link MongoDBClientSupport#getReplicaSetStatus} and
+                                 * extracts <code>myState</code> from the response.
+                                 * @return
+                                 *      The appropriate {@link org.apache.brooklyn.entity.nosql.mongodb.ReplicaSetMemberStatus}
+                                 *      if <code>myState</code> was non-null, {@link ReplicaSetMemberStatus#UNKNOWN} otherwise.
+                                 */
+                                @Override
+                                public ReplicaSetMemberStatus call() {
+                                    BasicBSONObject serverStatus = client.getReplicaSetStatus();
+                                    int state = serverStatus.getInt("myState", -1);
+                                    return ReplicaSetMemberStatus.fromCode(state);
+                                }
+                            })
+                            .onException(Functions.constant(ReplicaSetMemberStatus.UNKNOWN)))
+                    .build();
+        } else {
+            setAttribute(IS_PRIMARY_FOR_REPLICA_SET, false);
+            setAttribute(IS_SECONDARY_FOR_REPLICA_SET, false);
+        }
+
+        // Take interesting details from STATUS.
+        subscribe(this, STATUS_BSON, new SensorEventListener<BasicBSONObject>() {
+                @Override public void onEvent(SensorEvent<BasicBSONObject> event) {
+                    BasicBSONObject map = event.getValue();
+                    if (map != null && !map.isEmpty()) {
+                        setAttribute(UPTIME_SECONDS, map.getDouble("uptime", 0));
+
+                        // Operations
+                        BasicBSONObject opcounters = (BasicBSONObject) map.get("opcounters");
+                        setAttribute(OPCOUNTERS_INSERTS, opcounters.getLong("insert", 0));
+                        setAttribute(OPCOUNTERS_QUERIES, opcounters.getLong("query", 0));
+                        setAttribute(OPCOUNTERS_UPDATES, opcounters.getLong("update", 0));
+                        setAttribute(OPCOUNTERS_DELETES, opcounters.getLong("delete", 0));
+                        setAttribute(OPCOUNTERS_GETMORE, opcounters.getLong("getmore", 0));
+                        setAttribute(OPCOUNTERS_COMMAND, opcounters.getLong("command", 0));
+
+                        // Network stats
+                        BasicBSONObject network = (BasicBSONObject) map.get("network");
+                        setAttribute(NETWORK_BYTES_IN, network.getLong("bytesIn", 0));
+                        setAttribute(NETWORK_BYTES_OUT, network.getLong("bytesOut", 0));
+                        setAttribute(NETWORK_NUM_REQUESTS, network.getLong("numRequests", 0));
+
+                        // Replica set stats
+                        BasicBSONObject repl = (BasicBSONObject) map.get("repl");
+                        if (isReplicaSetMember() && repl != null) {
+                            setAttribute(IS_PRIMARY_FOR_REPLICA_SET, repl.getBoolean("ismaster"));
+                            setAttribute(IS_SECONDARY_FOR_REPLICA_SET, repl.getBoolean("secondary"));
+                            setAttribute(REPLICA_SET_PRIMARY_ENDPOINT, repl.getString("primary"));
+                        }
+                    }
+                }
+        });
+    }
+
+    @Override
+    protected void disconnectSensors() {
+        super.disconnectSensors();
+        disconnectServiceUpIsRunning();
+        if (serviceStats != null) serviceStats.stop();
+        if (replicaSetStats != null) replicaSetStats.stop();
+    }
+
+    @Override
+    public MongoDBReplicaSet getReplicaSet() {
+        return getConfig(MongoDBServer.REPLICA_SET);
+    }
+
+    @Override
+    public boolean isReplicaSetMember() {
+        return getReplicaSet() != null;
+    }
+
+    @Override
+    public boolean initializeReplicaSet(String replicaSetName, Integer id) {
+        return client.initializeReplicaSet(replicaSetName, id);
+    }
+
+    @Override
+    public boolean addMemberToReplicaSet(MongoDBServer secondary, Integer id) {
+        // TODO The attributes IS_PRIMARY_FOR_REPLICA_SET and REPLICA_SET_MEMBER_STATUS can be out-of-sync.
+        // The former is obtained by an enricher that listens to STATUS_BSON (set by client.getServerStatus()).
+        // The latter is set by a different feed doing client.getReplicaSetStatus().getInt("myState").
+        // The ReplicaSet uses REPLICA_SET_MEMBER_STATUS to determine which node to call.
+        // 
+        // Relying on caller to respect the `false` result, to retry.
+        if (!getAttribute(IS_PRIMARY_FOR_REPLICA_SET)) {
+            LOG.warn("Attempted to add {} to replica set at server that is not primary: {}", secondary, this);
+            return false;
+        }
+        return client.addMemberToReplicaSet(secondary, id);
+    }
+
+    @Override
+    public boolean removeMemberFromReplicaSet(MongoDBServer server) {
+        if (!getAttribute(IS_PRIMARY_FOR_REPLICA_SET)) {
+            LOG.warn("Attempted to remove {} from replica set at server that is not primary: {}", server, this);
+            return false;
+        }
+        return client.removeMemberFromReplicaSet(server);
+    }
+
+    @Override
+    public String toString() {
+        return Objects.toStringHelper(this)
+                .add("id", getId())
+                .add("hostname", getAttribute(HOSTNAME))
+                .add("port", getAttribute(PORT))
+                .toString();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java
new file mode 100644
index 0000000..819014d
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static com.google.common.base.Preconditions.checkState;
+import brooklyn.location.basic.SshMachineLocation;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableList;
+
+public class MongoDBSshDriver extends AbstractMongoDBSshDriver implements MongoDBDriver {
+
+    public MongoDBSshDriver(MongoDBServerImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    public MongoDBServerImpl getEntity() {
+        return MongoDBServerImpl.class.cast(super.getEntity());
+    }
+
+    @Override
+    public void launch() {
+        MongoDBServer server = getEntity();
+
+        ImmutableList.Builder<String> argsBuilder = getArgsBuilderWithDefaults(server)
+            .add("--dbpath", getDataDirectory());
+
+        if (server.isReplicaSetMember()) {
+            String replicaSetName = server.getReplicaSet().getName();
+            checkState(!Strings.isNullOrEmpty(replicaSetName), "Replica set name must not be null or empty");
+            argsBuilder.add("--replSet", replicaSetName);
+        }
+
+        if (Boolean.TRUE.equals(server.getConfig(MongoDBServer.ENABLE_REST_INTERFACE)))
+            argsBuilder.add("--rest");
+
+        launch(argsBuilder);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java
new file mode 100644
index 0000000..a4ecebb
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Iterator;
+
+import org.bson.BSONObject;
+import org.bson.BasicBSONObject;
+import org.bson.types.BasicBSONList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.net.HostAndPort;
+
+import brooklyn.location.access.BrooklynAccessUtils;
+
+/**
+ * Simplifies the creation of configuration objects for Mongo DB replica sets.
+ * <p/>
+ * A configuration object is structured like this:
+ * <pre>
+ * {
+ *    "_id" : "replica-set-name",
+ *     "version" : 3,
+ *    "members" : [
+ *        { "_id" : 0, "host" : "Sams.local:27017" },
+ *        { "_id" : 1, "host" : "Sams.local:27018" },
+ *        { "_id" : 2, "host" : "Sams.local:27019" }
+ *    ]
+ * }
+ * </pre>
+ * To add or remove servers to a replica set you must redefine this configuration
+ * (run <code>replSetReconfig</code> on the primary) with the new <code>members</code>
+ * list and the <code>version</code> updated.
+ */
+public class ReplicaSetConfig {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ReplicaSetConfig.class);
+    static final int MAXIMUM_REPLICA_SET_SIZE = 12;
+    static final int MAXIMUM_VOTING_MEMBERS = 7;
+
+    private Optional<HostAndPort> primary = Optional.absent();
+
+    private String name;
+    private Integer version;
+    BasicBSONList members;
+
+    public ReplicaSetConfig(String name) {
+        this(name, new BasicBSONList());
+    }
+
+    public ReplicaSetConfig(String name, BasicBSONList existingMembers) {
+        this.name = name;
+        this.members = existingMembers;
+        this.version = 1;
+    }
+
+    /**
+     * Creates a configuration with the given name.
+     */
+    public static ReplicaSetConfig builder(String name) {
+        return new ReplicaSetConfig(name);
+    }
+
+    /**
+     * Creates a configuration from an existing configuration.
+     * <p/>
+     * Automatically increments the replica set's version number.
+     */
+    public static ReplicaSetConfig fromExistingConfig(BSONObject config) {
+        checkNotNull(config);
+        checkArgument(config.containsField("_id"), "_id missing from replica set config");
+        checkArgument(config.containsField("version"), "version missing from replica set config");
+        checkArgument(config.containsField("members"), "members missing from replica set config");
+
+        String name = (String) config.get("_id");
+        Integer version = (Integer) config.get("version");
+        BasicBSONList members = (BasicBSONList) config.get("members");
+
+        return new ReplicaSetConfig(name, members).version(++version);
+    }
+
+    /**
+     * Sets the version of the configuration. The version number must increase as the replica set changes.
+     */
+    public ReplicaSetConfig version(Integer version) {
+        this.version = version;
+        return this;
+    }
+
+    /**
+     * Notes the primary member of the replica. Primary members will always be voting members.
+     */
+    public ReplicaSetConfig primary(HostAndPort primary) {
+        this.primary = Optional.of(primary);
+        return this;
+    }
+
+    /**
+     * Adds a new member to the replica set config using {@link MongoDBServer#HOSTNAME} and {@link MongoDBServer#PORT}
+     * for hostname and port. Doesn't attempt to check that the id is free.
+     */
+    public ReplicaSetConfig member(MongoDBServer server, Integer id) {
+        // TODO: Switch to SUBNET_HOSTNAME and there should be no need for a Brooklyn accessible
+        // address. It will require modification to MongoDBClientSupport, though, since it sets
+        // the primary to the host/port accessible from Brooklyn.
+        HostAndPort hap = BrooklynAccessUtils.getBrooklynAccessibleAddress(server, server.getAttribute(MongoDBServer.PORT));
+        return member(hap.getHostText(), hap.getPort(), id);
+    }
+
+    /**
+     * Adds a new member to the replica set config using the given {@link HostAndPort} for hostname and port.
+     * Doesn't attempt to check that the id is free.
+     */
+    public ReplicaSetConfig member(HostAndPort address, Integer id) {
+        return member(address.getHostText(), address.getPort(), id);
+    }
+
+    /**
+     * Adds a new member to the replica set config with the given hostname, port and id. Doesn't attempt to check
+     * that the id is free.
+     */
+    public ReplicaSetConfig member(String hostname, Integer port, Integer id) {
+        if (members.size() == MAXIMUM_REPLICA_SET_SIZE) {
+            throw new IllegalStateException(String.format(
+                    "Replica set {} exceeds maximum size of {} with addition of member at {}:{}",
+                    new Object[]{name, MAXIMUM_REPLICA_SET_SIZE, hostname, port}));
+        }
+        BasicBSONObject member = new BasicBSONObject();
+        member.put("_id", id);
+        member.put("host", String.format("%s:%s", hostname, port));
+        members.add(member);
+        return this;
+    }
+
+    /** Removes the first entity using {@link MongoDBServer#HOSTNAME} and {@link MongoDBServer#PORT}. */
+    public ReplicaSetConfig remove(MongoDBServer server) {
+        HostAndPort hap = BrooklynAccessUtils.getBrooklynAccessibleAddress(server, server.getAttribute(MongoDBServer.PORT));
+        return remove(hap.getHostText(), hap.getPort());
+    }
+
+    /** Removes the first entity with host and port matching the given address. */
+    public ReplicaSetConfig remove(HostAndPort address) {
+        return remove(address.getHostText(), address.getPort());
+    }
+
+    /**
+     * Removes the first entity with the given hostname and port from the list of members
+     */
+    public ReplicaSetConfig remove(String hostname, Integer port) {
+        String host = String.format("%s:%s", hostname, port);
+        Iterator<Object> it = this.members.iterator();
+        while (it.hasNext()) {
+            Object next = it.next();
+            if (next instanceof BasicBSONObject) {
+                BasicBSONObject basicBSONObject = (BasicBSONObject) next;
+                if (host.equals(basicBSONObject.getString("host"))) {
+                    it.remove();
+                    break;
+                }
+            }
+        }
+        return this;
+    }
+
+    /**
+     * @return A {@link BasicBSONObject} representing the configuration that is suitable for a MongoDB server.
+     */
+    public BasicBSONObject build() {
+        setVotingMembers();
+        BasicBSONObject config = new BasicBSONObject();
+        config.put("_id", name);
+        config.put("version", version);
+        config.put("members", members);
+        return config;
+    }
+
+    /**
+     * Selects 1, 3, 5 or 7 members to have a vote. The primary member (as set by
+     * {@link #primary(com.google.common.net.HostAndPort)}) is guaranteed a vote if
+     * it is in {@link #members}.
+     * <p/>
+     *
+     * Reconfiguring a server to be voters when they previously did not have votes generally triggers
+     * a primary election. This confuses the MongoDB Java driver, which logs an error like:
+     * <pre>
+     * WARN  emptying DBPortPool to sams.home/192.168.1.64:27019 b/c of error
+     * java.io.EOFException: null
+     *    at org.bson.io.Bits.readFully(Bits.java:48) ~[mongo-java-driver-2.11.3.jar:na]
+     * WARN  Command { "replSetReconfig" : ... } on sams.home/192.168.1.64:27019 failed
+     * com.mongodb.MongoException$Network: Read operation to server sams.home/192.168.1.64:27019 failed on database admin
+     *    at com.mongodb.DBTCPConnector.innerCall(DBTCPConnector.java:253) ~[mongo-java-driver-2.11.3.jar:na]
+     * Caused by: java.io.EOFException: null
+     *    at org.bson.io.Bits.readFully(Bits.java:48) ~[mongo-java-driver-2.11.3.jar:na]
+     * </pre>
+     *
+     * The MongoDB documentation on <a href=http://docs.mongodb.org/manual/tutorial/configure-a-non-voting-replica-set-member/">
+     * non-voting members</a> says:
+     * <blockquote>
+     *     Initializes a new replica set configuration. Disconnects the shell briefly and forces a
+     *     reconnection as the replica set renegotiates which member will be primary. As a result,
+     *     the shell will display an error even if this command succeeds.
+     * </blockquote>
+     *
+     * So the problem is more that the MongoDB Java driver does not understand why the server
+     * may have disconnected and is to eager to report a problem.
+     */
+    private void setVotingMembers() {
+        if (LOG.isDebugEnabled())
+            LOG.debug("Setting voting and non-voting members of replica set: {}", name);
+        boolean seenPrimary = false;
+        String expectedPrimary = primary.isPresent()
+                ? primary.get().getHostText() + ":" + primary.get().getPort()
+                : "";
+
+        // Ensure an odd number of voters
+        int setSize = this.members.size();
+        int nonPrimaryVotingMembers = Math.min(setSize % 2 == 0 ? setSize - 1 : setSize, MAXIMUM_VOTING_MEMBERS);
+        if (primary.isPresent()) {
+            if (LOG.isTraceEnabled())
+                LOG.trace("Reserving vote for primary: " + expectedPrimary);
+            nonPrimaryVotingMembers -= 1;
+        }
+
+        for (Object member : this.members) {
+            if (member instanceof BasicBSONObject) {
+                BasicBSONObject bsonObject = BasicBSONObject.class.cast(member);
+                String host = bsonObject.getString("host");
+
+                // is this member noted as the primary?
+                if (this.primary.isPresent() && expectedPrimary.equals(host)) {
+                    bsonObject.put("votes", 1);
+                    seenPrimary = true;
+                    if (LOG.isDebugEnabled())
+                        LOG.debug("Voting member (primary) of set {}: {}", name, host);
+                } else if (nonPrimaryVotingMembers-- > 0) {
+                    bsonObject.put("votes", 1);
+                    if (LOG.isDebugEnabled())
+                        LOG.debug("Voting member of set {}: {}", name, host);
+                } else {
+                    bsonObject.put("votes", 0);
+                    if (LOG.isDebugEnabled())
+                        LOG.debug("Non-voting member of set {}: {}", name, host);
+                }
+            } else {
+                LOG.error("Unexpected entry in replica set members list: " + member);
+            }
+        }
+
+        if (primary.isPresent() && !seenPrimary) {
+            LOG.warn("Cannot give replica set primary a vote in reconfigured set: " +
+                    "primary was indicated as {} but no member with that host and port was seen in the set. " +
+                    "The replica set now has an even number of voters.",
+                    this.primary);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java
new file mode 100644
index 0000000..16df3a7
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+/**
+ * @see <a href="http://docs.mongodb.org/manual/reference/replica-status/">Replica set status reference</a>
+ */
+public enum ReplicaSetMemberStatus {
+
+    STARTUP("Start up, phase 1 (parsing configuration)"),
+    PRIMARY("Primary"),
+    SECONDARY("Secondary"),
+    RECOVERING("Member is recovering (initial sync, post-rollback, stale members)"),
+    FATAL("Member has encountered an unrecoverable error"),
+    STARTUP2("Start up, phase 2 (forking threads)"),
+    UNKNOWN("Unknown (the set has never connected to the member)"),
+    ARBITER("Member is an arbiter"),
+    DOWN("Member is not accessible to the set"),
+    ROLLBACK("Member is rolling back data. See rollback"),
+    SHUNNED("Member has been removed from replica set");
+
+    private final String description;
+
+    ReplicaSetMemberStatus(String description) {
+        this.description = description;
+    }
+
+    public static ReplicaSetMemberStatus fromCode(int code) {
+        switch (code) {
+            case 0: return STARTUP;
+            case 1: return PRIMARY;
+            case 2: return SECONDARY;
+            case 3: return RECOVERING;
+            case 4: return FATAL;
+            case 5: return STARTUP2;
+            case 6: return UNKNOWN;
+            case 7: return ARBITER;
+            case 8: return DOWN;
+            case 9: return ROLLBACK;
+            case 10: return SHUNNED;
+            default: return UNKNOWN;
+        }
+    }
+
+    @Override
+    public String toString() {
+        return name() + ": " + description;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java
new file mode 100644
index 0000000..48c9c63
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.util.List;
+import java.util.Map;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SameServerEntity;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+import com.google.common.reflect.TypeToken;
+
+@ImplementedBy(CoLocatedMongoDBRouterImpl.class)
+public interface CoLocatedMongoDBRouter extends SameServerEntity {
+    @SuppressWarnings("serial")
+    @SetFromFlag("siblingSpecs")
+    ConfigKey<Iterable<EntitySpec<?>>> SIBLING_SPECS = ConfigKeys.newConfigKey(new TypeToken<Iterable<EntitySpec<?>>>(){}, 
+            "mongodb.colocatedrouter.sibling.specs", "Collection of (configured) specs for entities to be co-located with the router");
+    
+    @SetFromFlag("shardedDeployment")
+    ConfigKey<MongoDBShardedDeployment> SHARDED_DEPLOYMENT = ConfigKeys.newConfigKey(MongoDBShardedDeployment.class, 
+            "mongodb.colocatedrouter.shardeddeployment", "Sharded deployment to which the router should report");
+
+    /** Deprecated since 0.7.0 use {@link #PROPAGATING_SENSORS} instead. */
+    @Deprecated
+    @SuppressWarnings("serial")
+    @SetFromFlag("propogatingSensors")
+    ConfigKey<List<Map<String, ?>>> PROPOGATING_SENSORS = ConfigKeys.newConfigKey(new TypeToken<List<Map<String, ?>>>(){}, 
+            "mongodb.colocatedrouter.propogating.sensors", "List of sensors to be propogated from child members");
+
+    @SetFromFlag("propagatingSensors")
+    ConfigKey<List<Map<String, ?>>> PROPAGATING_SENSORS = ConfigKeys.newConfigKey(new TypeToken<List<Map<String, ?>>>(){},
+            "mongodb.colocatedrouter.propagating.sensors", "List of sensors to be propogated from child members");
+
+    public static AttributeSensor<MongoDBRouter> ROUTER = Sensors.newSensor(MongoDBRouter.class,
+            "mongodb.colocatedrouter.router", "Router");
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java
new file mode 100644
index 0000000..35252ae
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.util.Collection;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.SameServerEntityImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.location.Location;
+
+import com.google.common.base.Predicates;
+import com.google.common.collect.Iterables;
+
+public class CoLocatedMongoDBRouterImpl extends SameServerEntityImpl implements CoLocatedMongoDBRouter {
+    @Override
+    public void init() {
+        super.init();
+        
+        for (EntitySpec<?> siblingSpec : getConfig(CoLocatedMongoDBRouter.SIBLING_SPECS)) {
+            addChild(siblingSpec);
+        }
+    }
+
+    @Override
+    protected void doStart(Collection<? extends Location> locations) {
+        // TODO Changed to create the router child after init as a workaround.
+        // When we use `mongo-sharded.yaml`, and we call 
+        // `getConfig(CoLocatedMongoDBRouter.SHARDED_DEPLOYMENT)`,
+        // the value is `$brooklyn:component("shardeddeployment")`.
+        // To look up the component, it tries to do `entity().getApplication()` to
+        // search the entities for one with the correct id. However if being done
+        // during `init()`, then this (which is returned by `entity()`) has not had its parent
+        // set, so `entity().getApplication()` returns null.
+        //
+        // We should move this code back to `init()` once we have a solution for that.
+        // We can also remove the call to Entities.manage() once this is in init() again.
+        
+        MongoDBRouter router = addChild(EntitySpec.create(MongoDBRouter.class)
+                .configure(MongoDBRouter.CONFIG_SERVERS,
+                        DependentConfiguration.attributeWhenReady(
+                                getConfig(CoLocatedMongoDBRouter.SHARDED_DEPLOYMENT), 
+                                MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES)));
+        Entities.manage(router);
+        setAttribute(ROUTER, (MongoDBRouter) Iterables.tryFind(getChildren(), Predicates.instanceOf(MongoDBRouter.class)).get());
+        addEnricher(Enrichers.builder().propagating(MongoDBRouter.PORT).from(router).build());
+        
+        super.doStart(locations);
+        setAttribute(Startable.SERVICE_UP, true);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java
new file mode 100644
index 0000000..acecbc4
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import org.apache.brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
+
+import brooklyn.entity.proxying.ImplementedBy;
+
+@ImplementedBy(MongoDBConfigServerImpl.class)
+public interface MongoDBConfigServer extends AbstractMongoDBServer {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java
new file mode 100644
index 0000000..79f78ac
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+
+import com.google.common.reflect.TypeToken;
+
+@ImplementedBy(MongoDBConfigServerClusterImpl.class)
+public interface MongoDBConfigServerCluster extends DynamicCluster {
+
+    @SuppressWarnings("serial")
+    AttributeSensor<Iterable<String>> CONFIG_SERVER_ADDRESSES = Sensors.newSensor(new TypeToken<Iterable<String>>() {}, 
+            "mongodb.config.server.addresses", "List of config server hostnames and ports");
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java
new file mode 100644
index 0000000..34651bb
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.util.Collection;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.location.access.BrooklynAccessUtils;
+
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.net.HostAndPort;
+
+public class MongoDBConfigServerClusterImpl extends DynamicClusterImpl implements MongoDBConfigServerCluster {
+    
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        if (super.getMemberSpec() != null)
+            return super.getMemberSpec();
+        return EntitySpec.create(MongoDBConfigServer.class);
+    }
+    
+    @Override
+    public void start(Collection<? extends Location> locs) {
+        super.start(locs);
+        
+        // TODO this should be an enricher
+        Iterable<String> memberHostNamesAndPorts = Iterables.transform(getMembers(), new Function<Entity, String>() {
+            @Override
+            public String apply(Entity entity) {
+                return entity.getAttribute(MongoDBConfigServer.SUBNET_HOSTNAME) + ":" + entity.getAttribute(MongoDBConfigServer.PORT);
+            }
+        });
+        setAttribute(MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES, ImmutableList.copyOf(memberHostNamesAndPorts));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java
new file mode 100644
index 0000000..7963b22
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface MongoDBConfigServerDriver extends SoftwareProcessDriver {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java
new file mode 100644
index 0000000..b8ce2b8
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import brooklyn.entity.basic.SoftwareProcessImpl;
+
+public class MongoDBConfigServerImpl extends SoftwareProcessImpl implements MongoDBConfigServer {
+
+    @Override
+    public Class<?> getDriverInterface() {
+        return MongoDBConfigServerDriver.class;
+    }
+    
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+        connectServiceUpIsRunning();
+    }
+
+}



[20/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java
deleted file mode 100644
index 57a8dee..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSupport.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.net.UnknownHostException;
-
-import org.bson.BSONObject;
-import org.bson.BasicBSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-import com.google.common.net.HostAndPort;
-import com.mongodb.BasicDBObject;
-import com.mongodb.CommandResult;
-import com.mongodb.DB;
-import com.mongodb.DBObject;
-import com.mongodb.MongoClient;
-import com.mongodb.MongoClientOptions;
-import com.mongodb.MongoException;
-import com.mongodb.ServerAddress;
-
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.util.BrooklynNetworkUtils;
-
-/**
- * Manages connections to standalone MongoDB servers.
- *
- * @see <a href="http://docs.mongodb.org/manual/reference/command/">MongoDB database command documentation</a>
- */
-public class MongoDBClientSupport {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBClientSupport.class);
-
-    private ServerAddress address;
-    
-    private MongoClient client() {
-        return new MongoClient(address, connectionOptions);
-    }
-
-    // Set client to automatically reconnect to servers.
-    private static final MongoClientOptions connectionOptions = MongoClientOptions.builder()
-            .autoConnectRetry(true)
-            .socketKeepAlive(true)
-            .build();
-
-    private static final BasicBSONObject EMPTY_RESPONSE = new BasicBSONObject();
-
-    public MongoDBClientSupport(ServerAddress standalone) {
-        // We could also use a MongoClient to access an entire replica set. See MongoClient(List<ServerAddress>).
-        address = standalone;
-    }
-
-    /**
-     * Creates a {@link MongoDBClientSupport} instance in standalone mode.
-     */
-    public static MongoDBClientSupport forServer(AbstractMongoDBServer standalone) throws UnknownHostException {
-        HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(standalone, standalone.getAttribute(MongoDBServer.PORT));
-        ServerAddress address = new ServerAddress(hostAndPort.getHostText(), hostAndPort.getPort());
-        return new MongoDBClientSupport(address);
-    }
-
-    private ServerAddress getServerAddress() {
-        MongoClient client = client();
-        try {
-            return client.getServerAddressList().get(0);
-        } finally {
-            client.close();
-        }
-    }
-
-    private HostAndPort getServerHostAndPort() {
-        ServerAddress address = getServerAddress();
-        return HostAndPort.fromParts(address.getHost(), address.getPort());
-    }
-
-    public Optional<CommandResult> runDBCommand(String database, String command) {
-        return runDBCommand(database, new BasicDBObject(command, Boolean.TRUE));
-    }
-
-    private Optional<CommandResult> runDBCommand(String database, DBObject command) {
-        MongoClient client = client();
-        try {
-            DB db = client.getDB(database);
-            CommandResult status;
-            try {
-                status = db.command(command);
-            } catch (MongoException e) {
-                LOG.warn("Command " + command + " on " + getServerAddress() + " failed", e);
-                return Optional.absent();
-            }
-            if (!status.ok()) {
-                LOG.debug("Unexpected result of {} on {}: {}",
-                        new Object[] { command, getServerAddress(), status.getErrorMessage() });
-            }
-            return Optional.of(status);
-        } finally {
-            client.close();
-        }
-    }
-    
-    public long getShardCount() {
-        MongoClient client = client();
-        try {
-            return client.getDB("config").getCollection("shards").getCount();
-        } finally {
-            client.close();
-        }
-    }
-
-    public BasicBSONObject getServerStatus() {
-        Optional<CommandResult> result = runDBCommand("admin", "serverStatus");
-        if (result.isPresent() && result.get().ok()) {
-            return result.get();
-        } else {
-            return EMPTY_RESPONSE;
-        }
-    }
-    
-    public boolean ping() {
-        DBObject ping = new BasicDBObject("ping", "1");
-        try {
-            runDBCommand("admin", ping);
-        } catch (MongoException e) {
-            return false;
-        }
-        return true;
-    }
-
-    public boolean initializeReplicaSet(String replicaSetName, Integer id) {
-        HostAndPort primary = getServerHostAndPort();
-        BasicBSONObject config = ReplicaSetConfig.builder(replicaSetName)
-                .member(primary, id)
-                .build();
-
-        BasicDBObject dbObject = new BasicDBObject("replSetInitiate", config);
-        LOG.debug("Initiating replica set with: " + dbObject);
-
-        Optional<CommandResult> result = runDBCommand("admin", dbObject);
-        if (result.isPresent() && result.get().ok() && LOG.isDebugEnabled()) {
-            LOG.debug("Completed initiating MongoDB replica set {} on entity {}", replicaSetName, this);
-        }
-        return result.isPresent() && result.get().ok();
-    }
-
-    /**
-     * Java equivalent of calling rs.conf() in the console.
-     */
-    private BSONObject getReplicaSetConfig() {
-        MongoClient client = client();
-        try {
-            return client.getDB("local").getCollection("system.replset").findOne();
-        } catch (MongoException e) {
-            LOG.error("Failed to get replica set config on "+client, e);
-            return null;
-        } finally {
-            client.close();
-        }
-    }
-
-    /**
-     * Runs <code>replSetGetStatus</code> on the admin database.
-     *
-     * @return The result of <code>replSetGetStatus</code>, or
-     *         an empty {@link BasicBSONObject} if the command threw an exception (e.g. if
-     *         the connection was reset) or if the resultant {@link CommandResult#ok} was false.
-     *
-     * @see <a href="http://docs.mongodb.org/manual/reference/replica-status/">Replica set status reference</a>
-     * @see <a href="http://docs.mongodb.org/manual/reference/command/replSetGetStatus/">replSetGetStatus documentation</a>
-     */
-    public BasicBSONObject getReplicaSetStatus() {
-        Optional<CommandResult> result = runDBCommand("admin", "replSetGetStatus");
-        if (result.isPresent() && result.get().ok()) {
-            return result.get();
-        } else {
-            return EMPTY_RESPONSE;
-        }
-    }
-
-    /**
-     * Reconfigures the replica set that this client is the primary member of to include a new member.
-     * <p/>
-     * Note that this can cause long downtime (typically 10-20s, even up to a minute).
-     *
-     * @param secondary New member of the set.
-     * @param id The id for the new set member. Must be unique within the set.
-     * @return True if successful
-     */
-    public boolean addMemberToReplicaSet(MongoDBServer secondary, Integer id) {
-        // We need to:
-        // - get the existing configuration
-        // - update its version
-        // - add the new member to its list of members
-        // - run replSetReconfig with the new configuration.
-        BSONObject existingConfig = getReplicaSetConfig();
-        if (existingConfig == null) {
-            LOG.warn("Couldn't load existing config for replica set from {}. Server {} not added.",
-                    getServerAddress(), secondary);
-            return false;
-        }
-
-        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(existingConfig)
-                .primary(getServerHostAndPort())
-                .member(secondary, id)
-                .build();
-        return reconfigureReplicaSet(newConfig);
-    }
-
-    /**
-     * Reconfigures the replica set that this client is the primary member of to
-     * remove the given server.
-     * @param server The server to remove
-     * @return True if successful
-     */
-    public boolean removeMemberFromReplicaSet(MongoDBServer server) {
-        BSONObject existingConfig = getReplicaSetConfig();
-        if (existingConfig == null) {
-            LOG.warn("Couldn't load existing config for replica set from {}. Server {} not removed.",
-                    getServerAddress(), server);
-            return false;
-        }
-        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(existingConfig)
-                .primary(getServerHostAndPort())
-                .remove(server)
-                .build();
-        return reconfigureReplicaSet(newConfig);
-    }
-
-    /**
-     * Runs replSetReconfig with the given BasicBSONObject. Returns true if the result's
-     * status is ok.
-     */
-    private boolean reconfigureReplicaSet(BasicBSONObject newConfig) {
-        BasicDBObject command = new BasicDBObject("replSetReconfig", newConfig);
-        LOG.debug("Reconfiguring replica set to: " + command);
-        Optional<CommandResult> result = runDBCommand("admin", command);
-        return result.isPresent() && result.get().ok();
-    }
-
-    public boolean addShardToRouter(String hostAndPort) {
-        LOG.debug("Adding shard " + hostAndPort);
-        BasicDBObject command = new BasicDBObject("addShard", hostAndPort);
-        Optional<CommandResult> result = runDBCommand("admin", command);
-        return result.isPresent() && result.get().ok();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBDriver.java
deleted file mode 100644
index 3f484f2..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBDriver.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface MongoDBDriver extends SoftwareProcessDriver {
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java
deleted file mode 100644
index d2448c0..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSet.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.util.Collection;
-import java.util.List;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.group.Cluster;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-import com.google.common.reflect.TypeToken;
-
-/**
- * A replica set of {@link MongoDBServer}s, based on {@link DynamicCluster} which can be resized by a policy
- * if required.
- *
- * <p/><b>Note</b>
- * An issue with <code>mongod</code> on Mac OS X can cause unpredictable failure of servers at start-up.
- * See <a href="https://groups.google.com/forum/#!topic/mongodb-user/QRQYdIXOR2U">this mailing list post</a>
- * for more information.
- *
- * <p/>This replica set implementation has been tested on OS X 10.6 and Ubuntu 12.04.
- *
- * @see <a href="http://docs.mongodb.org/manual/replication/">http://docs.mongodb.org/manual/replication/</a>
- */
-@ImplementedBy(MongoDBReplicaSetImpl.class)
-public interface MongoDBReplicaSet extends DynamicCluster {
-
-    @SetFromFlag("replicaSetName")
-    ConfigKey<String> REPLICA_SET_NAME = ConfigKeys.newStringConfigKey(
-            "mongodb.replicaSet.name", "Name of the MongoDB replica set", "BrooklynCluster");
-
-    ConfigKey<Integer> INITIAL_SIZE = ConfigKeys.newConfigKeyWithDefault(Cluster.INITIAL_SIZE, 3);
-
-    AttributeSensor<MongoDBServer> PRIMARY_ENTITY = Sensors.newSensor(
-            MongoDBServer.class, "mongodb.replicaSet.primary.entity", "The entity acting as primary");
-
-    @SuppressWarnings("serial")
-    AttributeSensor<List<String>> REPLICA_SET_ENDPOINTS = Sensors.newSensor(new TypeToken<List<String>>() {}, 
-        "mongodb.replicaSet.endpoints", "Endpoints active for this replica set");
-
-    /**
-     * The name of the replica set.
-     */
-    String getName();
-
-    /**
-     * @return The primary MongoDB server in the replica set.
-     */
-    MongoDBServer getPrimary();
-
-    /**
-     * @return The secondary servers in the replica set.
-     */
-    Collection<MongoDBServer> getSecondaries();
-
-    /**
-     * @return All servers in the replica set.
-     */
-    Collection<MongoDBServer> getReplicas();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java
deleted file mode 100644
index a106ec1..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetImpl.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static com.google.common.base.Preconditions.checkArgument;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import javax.annotation.Nullable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.location.Location;
-import brooklyn.policy.PolicySpec;
-import brooklyn.util.collections.MutableList;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.text.Strings;
-
-import com.google.common.base.Function;
-import com.google.common.base.Predicate;
-import com.google.common.base.Predicates;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-/**
- * Implementation of {@link MongoDBReplicaSet}.
- *
- * Replica sets have a <i>minimum</i> of three members.
- *
- * Removal strategy is always {@link #NON_PRIMARY_REMOVAL_STRATEGY}.
- */
-public class MongoDBReplicaSetImpl extends DynamicClusterImpl implements MongoDBReplicaSet {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBReplicaSetImpl.class);
-
-    // Provides IDs for replica set members. The first member will have ID 0.
-    private final AtomicInteger nextMemberId = new AtomicInteger(0);
-
-    private MemberTrackingPolicy policy;
-    private final AtomicBoolean mustInitialise = new AtomicBoolean(true);
-
-    @SuppressWarnings("unchecked")
-    protected static final List<AttributeSensor<Long>> SENSORS_TO_SUM = Arrays.asList(
-        MongoDBServer.OPCOUNTERS_INSERTS,
-        MongoDBServer.OPCOUNTERS_QUERIES,
-        MongoDBServer.OPCOUNTERS_UPDATES,
-        MongoDBServer.OPCOUNTERS_DELETES,
-        MongoDBServer.OPCOUNTERS_GETMORE,
-        MongoDBServer.OPCOUNTERS_COMMAND,
-        MongoDBServer.NETWORK_BYTES_IN,
-        MongoDBServer.NETWORK_BYTES_OUT,
-        MongoDBServer.NETWORK_NUM_REQUESTS);
-    
-    public MongoDBReplicaSetImpl() {
-    }
-
-    /**
-     * Manages member addition and removal.
-     *
-     * It's important that this is a single thread: the concurrent addition and removal
-     * of members from the set would almost certainly have unintended side effects,
-     * like reconfigurations using outdated ReplicaSetConfig instances.
-     */
-    private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
-
-    /** true iff input is a non-null MongoDBServer with attribute REPLICA_SET_MEMBER_STATUS PRIMARY. */
-    static final Predicate<Entity> IS_PRIMARY = new Predicate<Entity>() {
-        // getPrimary relies on instanceof check
-        @Override public boolean apply(@Nullable Entity input) {
-            return input != null
-                    && input instanceof MongoDBServer
-                    && ReplicaSetMemberStatus.PRIMARY.equals(input.getAttribute(MongoDBServer.REPLICA_SET_MEMBER_STATUS));
-        }
-    };
-
-    /** true iff. input is a non-null MongoDBServer with attribute REPLICA_SET_MEMBER_STATUS SECONDARY. */
-    static final Predicate<Entity> IS_SECONDARY = new Predicate<Entity>() {
-        @Override public boolean apply(@Nullable Entity input) {
-            // getSecondaries relies on instanceof check
-            return input != null
-                    && input instanceof MongoDBServer
-                    && ReplicaSetMemberStatus.SECONDARY.equals(input.getAttribute(MongoDBServer.REPLICA_SET_MEMBER_STATUS));
-        }
-    };
-
-    /**
-     * {@link Function} for use as the cluster's removal strategy. Chooses any entity with
-     * {@link MongoDBServer#IS_PRIMARY_FOR_REPLICA_SET} true last of all.
-     */
-    private static final Function<Collection<Entity>, Entity> NON_PRIMARY_REMOVAL_STRATEGY = new Function<Collection<Entity>, Entity>() {
-        @Override
-        public Entity apply(@Nullable Collection<Entity> entities) {
-            checkArgument(entities != null && entities.size() > 0, "Expect list of MongoDBServers to have at least one entry");
-            return Iterables.tryFind(entities, Predicates.not(IS_PRIMARY)).or(Iterables.get(entities, 0));
-        }
-    };
-
-    /** @return {@link #NON_PRIMARY_REMOVAL_STRATEGY} */
-    @Override
-    public Function<Collection<Entity>, Entity> getRemovalStrategy() {
-        return NON_PRIMARY_REMOVAL_STRATEGY;
-    }
-
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        return getConfig(MEMBER_SPEC, EntitySpec.create(MongoDBServer.class));
-    }
-
-    /**
-     * Sets {@link MongoDBServer#REPLICA_SET}.
-     */
-    @Override
-    protected Map<?,?> getCustomChildFlags() {
-        return ImmutableMap.builder()
-                .putAll(super.getCustomChildFlags())
-                .put(MongoDBServer.REPLICA_SET, getProxy())
-                .build();
-    }
-
-    @Override
-    public String getName() {
-        // FIXME: Names must be unique if the replica sets are used in a sharded cluster
-        return getConfig(REPLICA_SET_NAME) + this.getId();
-    }
-
-    @Override
-    public MongoDBServer getPrimary() {
-        return Iterables.tryFind(getReplicas(), IS_PRIMARY).orNull();
-    }
-
-    @Override
-    public Collection<MongoDBServer> getSecondaries() {
-        return FluentIterable.from(getReplicas())
-                .filter(IS_SECONDARY)
-                .toList();
-    }
-
-    @Override
-    public Collection<MongoDBServer> getReplicas() {
-        return FluentIterable.from(getMembers())
-                .transform(new Function<Entity, MongoDBServer>() {
-                    @Override public MongoDBServer apply(Entity input) {
-                        return MongoDBServer.class.cast(input);
-                    }
-                })
-                .toList();
-    }
-
-    /**
-     * Initialises the replica set with the given server as primary if {@link #mustInitialise} is true,
-     * otherwise schedules the addition of a new secondary.
-     */
-    private void serverAdded(MongoDBServer server) {
-        LOG.debug("Server added: {}. SERVICE_UP: {}", server, server.getAttribute(MongoDBServer.SERVICE_UP));
-
-        // Set the primary if the replica set hasn't been initialised.
-        if (mustInitialise.compareAndSet(true, false)) {
-            if (LOG.isInfoEnabled())
-                LOG.info("First server up in {} is: {}", getName(), server);
-            boolean replicaSetInitialised = server.initializeReplicaSet(getName(), nextMemberId.getAndIncrement());
-            if (replicaSetInitialised) {
-                setAttribute(PRIMARY_ENTITY, server);
-                setAttribute(Startable.SERVICE_UP, true);
-            } else {
-                ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
-            }
-        } else {
-            if (LOG.isDebugEnabled())
-                LOG.debug("Scheduling addition of member to {}: {}", getName(), server);
-            addSecondaryWhenPrimaryIsNonNull(server);
-        }
-    }
-
-    /**
-     * Adds a server as a secondary in the replica set.
-     * <p/>
-     * If {@link #getPrimary} returns non-null submit the secondary to the primary's
-     * {@link MongoDBClientSupport}. Otherwise, reschedule the task to run again in three
-     * seconds time (in the hope that next time the primary will be available).
-     */
-    private void addSecondaryWhenPrimaryIsNonNull(final MongoDBServer secondary) {
-        // TODO Don't use executor, use ExecutionManager
-        executor.submit(new Runnable() {
-            @Override
-            public void run() {
-                // SERVICE_UP is not guaranteed when additional members are added to the set.
-                Boolean isAvailable = secondary.getAttribute(MongoDBServer.SERVICE_UP);
-                MongoDBServer primary = getPrimary();
-                boolean reschedule;
-                if (Boolean.TRUE.equals(isAvailable) && primary != null) {
-                    boolean added = primary.addMemberToReplicaSet(secondary, nextMemberId.incrementAndGet());
-                    if (added) {
-                        LOG.info("{} added to replica set {}", secondary, getName());
-                        reschedule = false;
-                    } else {
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("{} could not be added to replica set via {}; rescheduling", secondary, getName());
-                        }
-                        reschedule = true;
-                    }
-                } else {
-                    if (LOG.isTraceEnabled()) {
-                        LOG.trace("Rescheduling addition of member {} to replica set {}: service_up={}, primary={}",
-                            new Object[] {secondary, getName(), isAvailable, primary});
-                    }
-                    reschedule = true;
-                }
-                
-                if (reschedule) {
-                    // TODO Could limit number of retries
-                    executor.schedule(this, 3, TimeUnit.SECONDS);
-                }
-            }
-        });
-    }
-
-    /**
-     * Removes a server from the replica set.
-     * <p/>
-     * Submits a task that waits for the member to be down and for the replica set to have a primary
-     * member, then reconfigures the set to remove the member, to {@link #executor}. If either of the
-     * two conditions are not met then the task reschedules itself.
-     *
-     * @param member The server to be removed from the replica set.
-     */
-    private void serverRemoved(final MongoDBServer member) {
-        if (LOG.isDebugEnabled())
-            LOG.debug("Scheduling removal of member from {}: {}", getName(), member);
-        // FIXME is there a chance of race here?
-        if (member.equals(getAttribute(PRIMARY_ENTITY)))
-            setAttribute(PRIMARY_ENTITY, null);
-        executor.submit(new Runnable() {
-            @Override
-            public void run() {
-                // Wait until the server has been stopped before reconfiguring the set. Quoth the MongoDB doc:
-                // for best results always shut down the mongod instance before removing it from a replica set.
-                Boolean isAvailable = member.getAttribute(MongoDBServer.SERVICE_UP);
-                // Wait for the replica set to elect a new primary if the set is reconfiguring itself.
-                MongoDBServer primary = getPrimary();
-                boolean reschedule;
-                
-                if (primary != null && !isAvailable) {
-                    boolean removed = primary.removeMemberFromReplicaSet(member);
-                    if (removed) {
-                        LOG.info("Removed {} from replica set {}", member, getName());
-                        reschedule = false;
-                    } else {
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("{} could not be removed from replica set via {}; rescheduling", member, getName());
-                        }
-                        reschedule = true;
-                    }
-
-                } else {
-                    if (LOG.isTraceEnabled()) {
-                        LOG.trace("Rescheduling removal of member {} from replica set {}: service_up={}, primary={}",
-                            new Object[]{member, getName(), isAvailable, primary});
-                    }
-                    reschedule = true;
-                }
-                
-                if (reschedule) {
-                    // TODO Could limit number of retries
-                    executor.schedule(this, 3, TimeUnit.SECONDS);
-                }
-            }
-        });
-    }
-
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        // Promises that all the cluster's members have SERVICE_UP true on returning.
-        super.start(locations);
-        policy = addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
-                .displayName(getName() + " membership tracker")
-                .configure("group", this));
-
-        for (AttributeSensor<Long> sensor: SENSORS_TO_SUM)
-            addEnricher(Enrichers.builder()
-                    .aggregating(sensor)
-                    .publishing(sensor)
-                    .fromMembers()
-                    .computingSum()
-                    .valueToReportIfNoSensors(null)
-                    .defaultValueForUnreportedSensors(null)
-                    .build());
-        
-        // FIXME would it be simpler to have a *subscription* on four or five sensors on allMembers, including SERVICE_UP
-        // (which we currently don't check), rather than an enricher, and call to an "update" method?
-        addEnricher(Enrichers.builder()
-                .aggregating(MongoDBServer.REPLICA_SET_PRIMARY_ENDPOINT)
-                .publishing(MongoDBServer.REPLICA_SET_PRIMARY_ENDPOINT)
-                .fromMembers()
-                .valueToReportIfNoSensors(null)
-                .computing(new Function<Collection<String>, String>() {
-                        @Override
-                        public String apply(Collection<String> input) {
-                            if (input==null || input.isEmpty()) return null;
-                            Set<String> distinct = MutableSet.of();
-                            for (String endpoint: input)
-                                if (!Strings.isBlank(endpoint))
-                                    distinct.add(endpoint);
-                            if (distinct.size()>1)
-                                LOG.warn("Mongo replica set "+MongoDBReplicaSetImpl.this+" detetcted multiple masters (transitioning?): "+distinct);
-                            return input.iterator().next();
-                        }})
-                .build());
-
-        addEnricher(Enrichers.builder()
-                .aggregating(MongoDBServer.MONGO_SERVER_ENDPOINT)
-                .publishing(REPLICA_SET_ENDPOINTS)
-                .fromMembers()
-                .valueToReportIfNoSensors(null)
-                .computing(new Function<Collection<String>, List<String>>() {
-                        @Override
-                        public List<String> apply(Collection<String> input) {
-                            Set<String> endpoints = new TreeSet<String>();
-                            for (String endpoint: input) {
-                                if (!Strings.isBlank(endpoint)) {
-                                    endpoints.add(endpoint);
-                                }
-                            }
-                            return MutableList.copyOf(endpoints);
-                        }})
-                .build());
-
-        subscribeToMembers(this, MongoDBServer.IS_PRIMARY_FOR_REPLICA_SET, new SensorEventListener<Boolean>() {
-            @Override public void onEvent(SensorEvent<Boolean> event) {
-                if (Boolean.TRUE == event.getValue())
-                    setAttribute(PRIMARY_ENTITY, (MongoDBServer)event.getSource());
-            }
-        });
-
-    }
-
-    @Override
-    public void stop() {
-        // Do we want to remove the members from the replica set?
-        //  - if the set is being stopped forever it's irrelevant
-        //  - if the set might be restarted I think it just inconveniences us
-        // Terminate the executor immediately.
-        // TODO Note that after this the executor will not run if the set is restarted.
-        executor.shutdownNow();
-        super.stop();
-        setAttribute(Startable.SERVICE_UP, false);
-    }
-
-    @Override
-    public void onManagementStopped() {
-        super.onManagementStopped();
-        executor.shutdownNow();
-    }
-    
-    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override protected void onEntityChange(Entity member) {
-            // Ignored
-        }
-        @Override protected void onEntityAdded(Entity member) {
-            ((MongoDBReplicaSetImpl)entity).serverAdded((MongoDBServer) member);
-        }
-        @Override protected void onEntityRemoved(Entity member) {
-            ((MongoDBReplicaSetImpl)entity).serverRemoved((MongoDBServer) member);
-        }
-    };
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServer.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServer.java
deleted file mode 100644
index c829b3a..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServer.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import org.bson.BasicBSONObject;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.AttributeSensor.SensorPersistenceMode;
-import brooklyn.event.basic.BasicConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-@Catalog(name="MongoDB Server",
-    description="MongoDB (from \"humongous\") is a scalable, high-performance, open source NoSQL database",
-    iconUrl="classpath:///mongodb-logo.png")
-@ImplementedBy(MongoDBServerImpl.class)
-public interface MongoDBServer extends AbstractMongoDBServer {
-
-    @SetFromFlag("mongodbConfTemplateUrl")
-    ConfigKey<String> MONGODB_CONF_TEMPLATE_URL = ConfigKeys.newConfigKeyWithDefault(
-            AbstractMongoDBServer.MONGODB_CONF_TEMPLATE_URL,
-            "classpath://brooklyn/entity/nosql/mongodb/default-mongod.conf");
-
-    // See http://docs.mongodb.org/ecosystem/tools/http-interfaces/#http-console
-    // This is *always* 1000 more than port. We disable if it is not available.
-    PortAttributeSensorAndConfigKey HTTP_PORT =
-        new PortAttributeSensorAndConfigKey("mongodb.server.httpPort", "HTTP port for the server (estimated)", "28017+");
-
-    @SetFromFlag("enableRestInterface")
-    ConfigKey<Boolean> ENABLE_REST_INTERFACE = ConfigKeys.newBooleanConfigKey(
-            "mongodb.config.enable_rest", "Adds --rest to server startup flags when true", Boolean.FALSE);
-
-    AttributeSensor<String> HTTP_INTERFACE_URL = Sensors.newStringSensor(
-            "mongodb.server.http_interface", "URL of the server's HTTP console");
-
-    AttributeSensor<BasicBSONObject> STATUS_BSON = Sensors.builder(BasicBSONObject.class, "mongodb.server.status.bson")
-            .description("Server status (BSON/JSON map ojbect)")
-            .persistence(SensorPersistenceMode.NONE)
-            .build();
-    
-    AttributeSensor<Double> UPTIME_SECONDS = Sensors.newDoubleSensor(
-            "mongodb.server.uptime", "Server uptime in seconds");
-
-    AttributeSensor<Long> OPCOUNTERS_INSERTS = Sensors.newLongSensor(
-            "mongodb.server.opcounters.insert", "Server inserts");
-
-    AttributeSensor<Long> OPCOUNTERS_QUERIES = Sensors.newLongSensor(
-            "mongodb.server.opcounters.query", "Server queries");
-
-    AttributeSensor<Long> OPCOUNTERS_UPDATES = Sensors.newLongSensor(
-            "mongodb.server.opcounters.update", "Server updates");
-
-    AttributeSensor<Long> OPCOUNTERS_DELETES = Sensors.newLongSensor(
-            "mongodb.server.opcounters.delete", "Server deletes");
-
-    AttributeSensor<Long> OPCOUNTERS_GETMORE = Sensors.newLongSensor(
-            "mongodb.server.opcounters.getmore", "Server getmores");
-
-    AttributeSensor<Long> OPCOUNTERS_COMMAND = Sensors.newLongSensor(
-            "mongodb.server.opcounters.command", "Server commands");
-
-    AttributeSensor<Long> NETWORK_BYTES_IN = Sensors.newLongSensor(
-            "mongodb.server.network.bytesIn", "Server incoming network traffic (in bytes)");
-
-    AttributeSensor<Long> NETWORK_BYTES_OUT = Sensors.newLongSensor(
-            "mongodb.server.network.bytesOut", "Server outgoing network traffic (in bytes)");
-
-    AttributeSensor<Long> NETWORK_NUM_REQUESTS = Sensors.newLongSensor(
-            "mongodb.server.network.numRequests", "Server network requests");
-
-    /** A single server's replica set configuration **/
-    ConfigKey<MongoDBReplicaSet> REPLICA_SET = new BasicConfigKey<MongoDBReplicaSet>(MongoDBReplicaSet.class,
-            "mongodb.replicaset", "The replica set to which the server belongs. " +
-            "Users should not set this directly when creating a new replica set.");
-
-    AttributeSensor<ReplicaSetMemberStatus> REPLICA_SET_MEMBER_STATUS = Sensors.newSensor(
-            ReplicaSetMemberStatus.class, "mongodb.server.replicaSet.memberStatus", "The status of this server in the replica set");
-
-    AttributeSensor<Boolean> IS_PRIMARY_FOR_REPLICA_SET = Sensors.newBooleanSensor(
-            "mongodb.server.replicaSet.isPrimary", "True if this server is the write master for the replica set");
-
-    AttributeSensor<Boolean> IS_SECONDARY_FOR_REPLICA_SET = Sensors.newBooleanSensor(
-            "mongodb.server.replicaSet.isSecondary", "True if this server is a secondary server in the replica set");
-
-    AttributeSensor<String> REPLICA_SET_PRIMARY_ENDPOINT = Sensors.newStringSensor(
-            "mongodb.server.replicaSet.primary.endpoint", "The host:port of the server which is acting as primary (master) for the replica set");
-
-    AttributeSensor<String> MONGO_SERVER_ENDPOINT = Sensors.newStringSensor(
-            "mongodb.server.endpoint", "The host:port where this server is listening");
-
-    /**
-     * @return The replica set the server belongs to, or null if the server is a standalone instance.
-     */
-    MongoDBReplicaSet getReplicaSet();
-
-    /**
-     * @return True if the server is a child of {@link MongoDBReplicaSet}.
-     */
-    boolean isReplicaSetMember();
-
-    /**
-     * Initialises a replica set at the server the method is invoked on.
-     * @param replicaSetName The name for the replica set.
-     * @param id The id to be given to this server in the replica set configuration.
-     * @return True if initialisation is successful.
-     */
-    boolean initializeReplicaSet(String replicaSetName, Integer id);
-
-    /**
-     * Reconfigures the replica set that the server the method is invoked on is the primary member of
-     * to include a new member.
-     * <p/>
-     * Note that this can cause long downtime (typically 10-20s, even up to a minute).
-     *
-     * @param secondary New member of the set.
-     * @param id The id for the new set member. Must be unique within the set; its validity is not checked.
-     * @return True if addition is successful. False if the server this is called on is not the primary
-     *         member of the replica set.
-     */
-    boolean addMemberToReplicaSet(MongoDBServer secondary, Integer id);
-
-    /**
-     * Reconfigures the replica set that the server the method is invoked on is the primary member of
-     * to remove the given server.
-     * @param server The server to remove.
-     * @return True if removal is successful. False if the server this is called on is not the primary
-     *         member of the replica set.
-     */
-    boolean removeMemberFromReplicaSet(MongoDBServer server);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java
deleted file mode 100644
index a662508..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBServerImpl.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.net.UnknownHostException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-
-import org.bson.BasicBSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.config.render.RendererHints;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.event.feed.function.FunctionFeed;
-import brooklyn.event.feed.function.FunctionPollConfig;
-import brooklyn.location.access.BrooklynAccessUtils;
-
-import com.google.common.base.Functions;
-import com.google.common.base.Objects;
-import com.google.common.net.HostAndPort;
-
-public class MongoDBServerImpl extends SoftwareProcessImpl implements MongoDBServer {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBServerImpl.class);
-
-    static {
-        RendererHints.register(HTTP_INTERFACE_URL, RendererHints.namedActionWithUrl());
-    }
-
-    private FunctionFeed serviceStats;
-    private FunctionFeed replicaSetStats;
-    private MongoDBClientSupport client;
-
-    public MongoDBServerImpl() {
-    }
-
-    @Override
-    public Class<?> getDriverInterface() {
-        return MongoDBDriver.class;
-    }
-
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        connectServiceUpIsRunning();
-
-        int port = getAttribute(MongoDBServer.PORT);
-        HostAndPort accessibleAddress = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, port);
-        setAttribute(MONGO_SERVER_ENDPOINT, String.format("http://%s:%d",
-                accessibleAddress.getHostText(), accessibleAddress.getPort()));
-
-        int httpConsolePort = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getAttribute(HTTP_PORT)).getPort();
-        setAttribute(HTTP_INTERFACE_URL, String.format("http://%s:%d",
-                accessibleAddress.getHostText(), httpConsolePort));
-
-        try {
-            client = MongoDBClientSupport.forServer(this);
-        } catch (UnknownHostException e) {
-            LOG.warn("Unable to create client connection to {}, not connecting sensors: {} ", this, e.getMessage());
-            return;
-        }
-
-        serviceStats = FunctionFeed.builder()
-                .entity(this)
-                .poll(new FunctionPollConfig<Object, BasicBSONObject>(STATUS_BSON)
-                        .period(2, TimeUnit.SECONDS)
-                        .callable(new Callable<BasicBSONObject>() {
-                            @Override
-                            public BasicBSONObject call() throws Exception {
-                                return MongoDBServerImpl.this.getAttribute(SERVICE_UP)
-                                    ? client.getServerStatus()
-                                    : null;
-                            }
-                        })
-                        .onException(Functions.<BasicBSONObject>constant(null)))
-                .build();
-
-        if (isReplicaSetMember()) {
-            replicaSetStats = FunctionFeed.builder()
-                    .entity(this)
-                    .poll(new FunctionPollConfig<Object, ReplicaSetMemberStatus>(REPLICA_SET_MEMBER_STATUS)
-                            .period(2, TimeUnit.SECONDS)
-                            .callable(new Callable<ReplicaSetMemberStatus>() {
-                                /**
-                                 * Calls {@link MongoDBClientSupport#getReplicaSetStatus} and
-                                 * extracts <code>myState</code> from the response.
-                                 * @return
-                                 *      The appropriate {@link brooklyn.entity.nosql.mongodb.ReplicaSetMemberStatus}
-                                 *      if <code>myState</code> was non-null, {@link ReplicaSetMemberStatus#UNKNOWN} otherwise.
-                                 */
-                                @Override
-                                public ReplicaSetMemberStatus call() {
-                                    BasicBSONObject serverStatus = client.getReplicaSetStatus();
-                                    int state = serverStatus.getInt("myState", -1);
-                                    return ReplicaSetMemberStatus.fromCode(state);
-                                }
-                            })
-                            .onException(Functions.constant(ReplicaSetMemberStatus.UNKNOWN)))
-                    .build();
-        } else {
-            setAttribute(IS_PRIMARY_FOR_REPLICA_SET, false);
-            setAttribute(IS_SECONDARY_FOR_REPLICA_SET, false);
-        }
-
-        // Take interesting details from STATUS.
-        subscribe(this, STATUS_BSON, new SensorEventListener<BasicBSONObject>() {
-                @Override public void onEvent(SensorEvent<BasicBSONObject> event) {
-                    BasicBSONObject map = event.getValue();
-                    if (map != null && !map.isEmpty()) {
-                        setAttribute(UPTIME_SECONDS, map.getDouble("uptime", 0));
-
-                        // Operations
-                        BasicBSONObject opcounters = (BasicBSONObject) map.get("opcounters");
-                        setAttribute(OPCOUNTERS_INSERTS, opcounters.getLong("insert", 0));
-                        setAttribute(OPCOUNTERS_QUERIES, opcounters.getLong("query", 0));
-                        setAttribute(OPCOUNTERS_UPDATES, opcounters.getLong("update", 0));
-                        setAttribute(OPCOUNTERS_DELETES, opcounters.getLong("delete", 0));
-                        setAttribute(OPCOUNTERS_GETMORE, opcounters.getLong("getmore", 0));
-                        setAttribute(OPCOUNTERS_COMMAND, opcounters.getLong("command", 0));
-
-                        // Network stats
-                        BasicBSONObject network = (BasicBSONObject) map.get("network");
-                        setAttribute(NETWORK_BYTES_IN, network.getLong("bytesIn", 0));
-                        setAttribute(NETWORK_BYTES_OUT, network.getLong("bytesOut", 0));
-                        setAttribute(NETWORK_NUM_REQUESTS, network.getLong("numRequests", 0));
-
-                        // Replica set stats
-                        BasicBSONObject repl = (BasicBSONObject) map.get("repl");
-                        if (isReplicaSetMember() && repl != null) {
-                            setAttribute(IS_PRIMARY_FOR_REPLICA_SET, repl.getBoolean("ismaster"));
-                            setAttribute(IS_SECONDARY_FOR_REPLICA_SET, repl.getBoolean("secondary"));
-                            setAttribute(REPLICA_SET_PRIMARY_ENDPOINT, repl.getString("primary"));
-                        }
-                    }
-                }
-        });
-    }
-
-    @Override
-    protected void disconnectSensors() {
-        super.disconnectSensors();
-        disconnectServiceUpIsRunning();
-        if (serviceStats != null) serviceStats.stop();
-        if (replicaSetStats != null) replicaSetStats.stop();
-    }
-
-    @Override
-    public MongoDBReplicaSet getReplicaSet() {
-        return getConfig(MongoDBServer.REPLICA_SET);
-    }
-
-    @Override
-    public boolean isReplicaSetMember() {
-        return getReplicaSet() != null;
-    }
-
-    @Override
-    public boolean initializeReplicaSet(String replicaSetName, Integer id) {
-        return client.initializeReplicaSet(replicaSetName, id);
-    }
-
-    @Override
-    public boolean addMemberToReplicaSet(MongoDBServer secondary, Integer id) {
-        // TODO The attributes IS_PRIMARY_FOR_REPLICA_SET and REPLICA_SET_MEMBER_STATUS can be out-of-sync.
-        // The former is obtained by an enricher that listens to STATUS_BSON (set by client.getServerStatus()).
-        // The latter is set by a different feed doing client.getReplicaSetStatus().getInt("myState").
-        // The ReplicaSet uses REPLICA_SET_MEMBER_STATUS to determine which node to call.
-        // 
-        // Relying on caller to respect the `false` result, to retry.
-        if (!getAttribute(IS_PRIMARY_FOR_REPLICA_SET)) {
-            LOG.warn("Attempted to add {} to replica set at server that is not primary: {}", secondary, this);
-            return false;
-        }
-        return client.addMemberToReplicaSet(secondary, id);
-    }
-
-    @Override
-    public boolean removeMemberFromReplicaSet(MongoDBServer server) {
-        if (!getAttribute(IS_PRIMARY_FOR_REPLICA_SET)) {
-            LOG.warn("Attempted to remove {} from replica set at server that is not primary: {}", server, this);
-            return false;
-        }
-        return client.removeMemberFromReplicaSet(server);
-    }
-
-    @Override
-    public String toString() {
-        return Objects.toStringHelper(this)
-                .add("id", getId())
-                .add("hostname", getAttribute(HOSTNAME))
-                .add("port", getAttribute(PORT))
-                .toString();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java
deleted file mode 100644
index ed20639..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBSshDriver.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static com.google.common.base.Preconditions.checkState;
-import brooklyn.location.basic.SshMachineLocation;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
-
-public class MongoDBSshDriver extends AbstractMongoDBSshDriver implements MongoDBDriver {
-
-    public MongoDBSshDriver(MongoDBServerImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public MongoDBServerImpl getEntity() {
-        return MongoDBServerImpl.class.cast(super.getEntity());
-    }
-
-    @Override
-    public void launch() {
-        MongoDBServer server = getEntity();
-
-        ImmutableList.Builder<String> argsBuilder = getArgsBuilderWithDefaults(server)
-            .add("--dbpath", getDataDirectory());
-
-        if (server.isReplicaSetMember()) {
-            String replicaSetName = server.getReplicaSet().getName();
-            checkState(!Strings.isNullOrEmpty(replicaSetName), "Replica set name must not be null or empty");
-            argsBuilder.add("--replSet", replicaSetName);
-        }
-
-        if (Boolean.TRUE.equals(server.getConfig(MongoDBServer.ENABLE_REST_INTERFACE)))
-            argsBuilder.add("--rest");
-
-        launch(argsBuilder);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java
deleted file mode 100644
index ab28930..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfig.java
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.Iterator;
-
-import org.bson.BSONObject;
-import org.bson.BasicBSONObject;
-import org.bson.types.BasicBSONList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-import com.google.common.net.HostAndPort;
-
-import brooklyn.location.access.BrooklynAccessUtils;
-
-/**
- * Simplifies the creation of configuration objects for Mongo DB replica sets.
- * <p/>
- * A configuration object is structured like this:
- * <pre>
- * {
- *    "_id" : "replica-set-name",
- *     "version" : 3,
- *    "members" : [
- *        { "_id" : 0, "host" : "Sams.local:27017" },
- *        { "_id" : 1, "host" : "Sams.local:27018" },
- *        { "_id" : 2, "host" : "Sams.local:27019" }
- *    ]
- * }
- * </pre>
- * To add or remove servers to a replica set you must redefine this configuration
- * (run <code>replSetReconfig</code> on the primary) with the new <code>members</code>
- * list and the <code>version</code> updated.
- */
-public class ReplicaSetConfig {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ReplicaSetConfig.class);
-    static final int MAXIMUM_REPLICA_SET_SIZE = 12;
-    static final int MAXIMUM_VOTING_MEMBERS = 7;
-
-    private Optional<HostAndPort> primary = Optional.absent();
-
-    private String name;
-    private Integer version;
-    BasicBSONList members;
-
-    public ReplicaSetConfig(String name) {
-        this(name, new BasicBSONList());
-    }
-
-    public ReplicaSetConfig(String name, BasicBSONList existingMembers) {
-        this.name = name;
-        this.members = existingMembers;
-        this.version = 1;
-    }
-
-    /**
-     * Creates a configuration with the given name.
-     */
-    public static ReplicaSetConfig builder(String name) {
-        return new ReplicaSetConfig(name);
-    }
-
-    /**
-     * Creates a configuration from an existing configuration.
-     * <p/>
-     * Automatically increments the replica set's version number.
-     */
-    public static ReplicaSetConfig fromExistingConfig(BSONObject config) {
-        checkNotNull(config);
-        checkArgument(config.containsField("_id"), "_id missing from replica set config");
-        checkArgument(config.containsField("version"), "version missing from replica set config");
-        checkArgument(config.containsField("members"), "members missing from replica set config");
-
-        String name = (String) config.get("_id");
-        Integer version = (Integer) config.get("version");
-        BasicBSONList members = (BasicBSONList) config.get("members");
-
-        return new ReplicaSetConfig(name, members).version(++version);
-    }
-
-    /**
-     * Sets the version of the configuration. The version number must increase as the replica set changes.
-     */
-    public ReplicaSetConfig version(Integer version) {
-        this.version = version;
-        return this;
-    }
-
-    /**
-     * Notes the primary member of the replica. Primary members will always be voting members.
-     */
-    public ReplicaSetConfig primary(HostAndPort primary) {
-        this.primary = Optional.of(primary);
-        return this;
-    }
-
-    /**
-     * Adds a new member to the replica set config using {@link MongoDBServer#HOSTNAME} and {@link MongoDBServer#PORT}
-     * for hostname and port. Doesn't attempt to check that the id is free.
-     */
-    public ReplicaSetConfig member(MongoDBServer server, Integer id) {
-        // TODO: Switch to SUBNET_HOSTNAME and there should be no need for a Brooklyn accessible
-        // address. It will require modification to MongoDBClientSupport, though, since it sets
-        // the primary to the host/port accessible from Brooklyn.
-        HostAndPort hap = BrooklynAccessUtils.getBrooklynAccessibleAddress(server, server.getAttribute(MongoDBServer.PORT));
-        return member(hap.getHostText(), hap.getPort(), id);
-    }
-
-    /**
-     * Adds a new member to the replica set config using the given {@link HostAndPort} for hostname and port.
-     * Doesn't attempt to check that the id is free.
-     */
-    public ReplicaSetConfig member(HostAndPort address, Integer id) {
-        return member(address.getHostText(), address.getPort(), id);
-    }
-
-    /**
-     * Adds a new member to the replica set config with the given hostname, port and id. Doesn't attempt to check
-     * that the id is free.
-     */
-    public ReplicaSetConfig member(String hostname, Integer port, Integer id) {
-        if (members.size() == MAXIMUM_REPLICA_SET_SIZE) {
-            throw new IllegalStateException(String.format(
-                    "Replica set {} exceeds maximum size of {} with addition of member at {}:{}",
-                    new Object[]{name, MAXIMUM_REPLICA_SET_SIZE, hostname, port}));
-        }
-        BasicBSONObject member = new BasicBSONObject();
-        member.put("_id", id);
-        member.put("host", String.format("%s:%s", hostname, port));
-        members.add(member);
-        return this;
-    }
-
-    /** Removes the first entity using {@link MongoDBServer#HOSTNAME} and {@link MongoDBServer#PORT}. */
-    public ReplicaSetConfig remove(MongoDBServer server) {
-        HostAndPort hap = BrooklynAccessUtils.getBrooklynAccessibleAddress(server, server.getAttribute(MongoDBServer.PORT));
-        return remove(hap.getHostText(), hap.getPort());
-    }
-
-    /** Removes the first entity with host and port matching the given address. */
-    public ReplicaSetConfig remove(HostAndPort address) {
-        return remove(address.getHostText(), address.getPort());
-    }
-
-    /**
-     * Removes the first entity with the given hostname and port from the list of members
-     */
-    public ReplicaSetConfig remove(String hostname, Integer port) {
-        String host = String.format("%s:%s", hostname, port);
-        Iterator<Object> it = this.members.iterator();
-        while (it.hasNext()) {
-            Object next = it.next();
-            if (next instanceof BasicBSONObject) {
-                BasicBSONObject basicBSONObject = (BasicBSONObject) next;
-                if (host.equals(basicBSONObject.getString("host"))) {
-                    it.remove();
-                    break;
-                }
-            }
-        }
-        return this;
-    }
-
-    /**
-     * @return A {@link BasicBSONObject} representing the configuration that is suitable for a MongoDB server.
-     */
-    public BasicBSONObject build() {
-        setVotingMembers();
-        BasicBSONObject config = new BasicBSONObject();
-        config.put("_id", name);
-        config.put("version", version);
-        config.put("members", members);
-        return config;
-    }
-
-    /**
-     * Selects 1, 3, 5 or 7 members to have a vote. The primary member (as set by
-     * {@link #primary(com.google.common.net.HostAndPort)}) is guaranteed a vote if
-     * it is in {@link #members}.
-     * <p/>
-     *
-     * Reconfiguring a server to be voters when they previously did not have votes generally triggers
-     * a primary election. This confuses the MongoDB Java driver, which logs an error like:
-     * <pre>
-     * WARN  emptying DBPortPool to sams.home/192.168.1.64:27019 b/c of error
-     * java.io.EOFException: null
-     *    at org.bson.io.Bits.readFully(Bits.java:48) ~[mongo-java-driver-2.11.3.jar:na]
-     * WARN  Command { "replSetReconfig" : ... } on sams.home/192.168.1.64:27019 failed
-     * com.mongodb.MongoException$Network: Read operation to server sams.home/192.168.1.64:27019 failed on database admin
-     *    at com.mongodb.DBTCPConnector.innerCall(DBTCPConnector.java:253) ~[mongo-java-driver-2.11.3.jar:na]
-     * Caused by: java.io.EOFException: null
-     *    at org.bson.io.Bits.readFully(Bits.java:48) ~[mongo-java-driver-2.11.3.jar:na]
-     * </pre>
-     *
-     * The MongoDB documentation on <a href=http://docs.mongodb.org/manual/tutorial/configure-a-non-voting-replica-set-member/">
-     * non-voting members</a> says:
-     * <blockquote>
-     *     Initializes a new replica set configuration. Disconnects the shell briefly and forces a
-     *     reconnection as the replica set renegotiates which member will be primary. As a result,
-     *     the shell will display an error even if this command succeeds.
-     * </blockquote>
-     *
-     * So the problem is more that the MongoDB Java driver does not understand why the server
-     * may have disconnected and is to eager to report a problem.
-     */
-    private void setVotingMembers() {
-        if (LOG.isDebugEnabled())
-            LOG.debug("Setting voting and non-voting members of replica set: {}", name);
-        boolean seenPrimary = false;
-        String expectedPrimary = primary.isPresent()
-                ? primary.get().getHostText() + ":" + primary.get().getPort()
-                : "";
-
-        // Ensure an odd number of voters
-        int setSize = this.members.size();
-        int nonPrimaryVotingMembers = Math.min(setSize % 2 == 0 ? setSize - 1 : setSize, MAXIMUM_VOTING_MEMBERS);
-        if (primary.isPresent()) {
-            if (LOG.isTraceEnabled())
-                LOG.trace("Reserving vote for primary: " + expectedPrimary);
-            nonPrimaryVotingMembers -= 1;
-        }
-
-        for (Object member : this.members) {
-            if (member instanceof BasicBSONObject) {
-                BasicBSONObject bsonObject = BasicBSONObject.class.cast(member);
-                String host = bsonObject.getString("host");
-
-                // is this member noted as the primary?
-                if (this.primary.isPresent() && expectedPrimary.equals(host)) {
-                    bsonObject.put("votes", 1);
-                    seenPrimary = true;
-                    if (LOG.isDebugEnabled())
-                        LOG.debug("Voting member (primary) of set {}: {}", name, host);
-                } else if (nonPrimaryVotingMembers-- > 0) {
-                    bsonObject.put("votes", 1);
-                    if (LOG.isDebugEnabled())
-                        LOG.debug("Voting member of set {}: {}", name, host);
-                } else {
-                    bsonObject.put("votes", 0);
-                    if (LOG.isDebugEnabled())
-                        LOG.debug("Non-voting member of set {}: {}", name, host);
-                }
-            } else {
-                LOG.error("Unexpected entry in replica set members list: " + member);
-            }
-        }
-
-        if (primary.isPresent() && !seenPrimary) {
-            LOG.warn("Cannot give replica set primary a vote in reconfigured set: " +
-                    "primary was indicated as {} but no member with that host and port was seen in the set. " +
-                    "The replica set now has an even number of voters.",
-                    this.primary);
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java
deleted file mode 100644
index 170f097..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/ReplicaSetMemberStatus.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-/**
- * @see <a href="http://docs.mongodb.org/manual/reference/replica-status/">Replica set status reference</a>
- */
-public enum ReplicaSetMemberStatus {
-
-    STARTUP("Start up, phase 1 (parsing configuration)"),
-    PRIMARY("Primary"),
-    SECONDARY("Secondary"),
-    RECOVERING("Member is recovering (initial sync, post-rollback, stale members)"),
-    FATAL("Member has encountered an unrecoverable error"),
-    STARTUP2("Start up, phase 2 (forking threads)"),
-    UNKNOWN("Unknown (the set has never connected to the member)"),
-    ARBITER("Member is an arbiter"),
-    DOWN("Member is not accessible to the set"),
-    ROLLBACK("Member is rolling back data. See rollback"),
-    SHUNNED("Member has been removed from replica set");
-
-    private final String description;
-
-    ReplicaSetMemberStatus(String description) {
-        this.description = description;
-    }
-
-    public static ReplicaSetMemberStatus fromCode(int code) {
-        switch (code) {
-            case 0: return STARTUP;
-            case 1: return PRIMARY;
-            case 2: return SECONDARY;
-            case 3: return RECOVERING;
-            case 4: return FATAL;
-            case 5: return STARTUP2;
-            case 6: return UNKNOWN;
-            case 7: return ARBITER;
-            case 8: return DOWN;
-            case 9: return ROLLBACK;
-            case 10: return SHUNNED;
-            default: return UNKNOWN;
-        }
-    }
-
-    @Override
-    public String toString() {
-        return name() + ": " + description;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java
deleted file mode 100644
index eed05d2..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouter.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.util.List;
-import java.util.Map;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SameServerEntity;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-import com.google.common.reflect.TypeToken;
-
-@ImplementedBy(CoLocatedMongoDBRouterImpl.class)
-public interface CoLocatedMongoDBRouter extends SameServerEntity {
-    @SuppressWarnings("serial")
-    @SetFromFlag("siblingSpecs")
-    ConfigKey<Iterable<EntitySpec<?>>> SIBLING_SPECS = ConfigKeys.newConfigKey(new TypeToken<Iterable<EntitySpec<?>>>(){}, 
-            "mongodb.colocatedrouter.sibling.specs", "Collection of (configured) specs for entities to be co-located with the router");
-    
-    @SetFromFlag("shardedDeployment")
-    ConfigKey<MongoDBShardedDeployment> SHARDED_DEPLOYMENT = ConfigKeys.newConfigKey(MongoDBShardedDeployment.class, 
-            "mongodb.colocatedrouter.shardeddeployment", "Sharded deployment to which the router should report");
-
-    /** Deprecated since 0.7.0 use {@link #PROPAGATING_SENSORS} instead. */
-    @Deprecated
-    @SuppressWarnings("serial")
-    @SetFromFlag("propogatingSensors")
-    ConfigKey<List<Map<String, ?>>> PROPOGATING_SENSORS = ConfigKeys.newConfigKey(new TypeToken<List<Map<String, ?>>>(){}, 
-            "mongodb.colocatedrouter.propogating.sensors", "List of sensors to be propogated from child members");
-
-    @SetFromFlag("propagatingSensors")
-    ConfigKey<List<Map<String, ?>>> PROPAGATING_SENSORS = ConfigKeys.newConfigKey(new TypeToken<List<Map<String, ?>>>(){},
-            "mongodb.colocatedrouter.propagating.sensors", "List of sensors to be propogated from child members");
-
-    public static AttributeSensor<MongoDBRouter> ROUTER = Sensors.newSensor(MongoDBRouter.class,
-            "mongodb.colocatedrouter.router", "Router");
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java
deleted file mode 100644
index a8bec4e..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/CoLocatedMongoDBRouterImpl.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.util.Collection;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.SameServerEntityImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.location.Location;
-
-import com.google.common.base.Predicates;
-import com.google.common.collect.Iterables;
-
-public class CoLocatedMongoDBRouterImpl extends SameServerEntityImpl implements CoLocatedMongoDBRouter {
-    @Override
-    public void init() {
-        super.init();
-        
-        for (EntitySpec<?> siblingSpec : getConfig(CoLocatedMongoDBRouter.SIBLING_SPECS)) {
-            addChild(siblingSpec);
-        }
-    }
-
-    @Override
-    protected void doStart(Collection<? extends Location> locations) {
-        // TODO Changed to create the router child after init as a workaround.
-        // When we use `mongo-sharded.yaml`, and we call 
-        // `getConfig(CoLocatedMongoDBRouter.SHARDED_DEPLOYMENT)`,
-        // the value is `$brooklyn:component("shardeddeployment")`.
-        // To look up the component, it tries to do `entity().getApplication()` to
-        // search the entities for one with the correct id. However if being done
-        // during `init()`, then this (which is returned by `entity()`) has not had its parent
-        // set, so `entity().getApplication()` returns null.
-        //
-        // We should move this code back to `init()` once we have a solution for that.
-        // We can also remove the call to Entities.manage() once this is in init() again.
-        
-        MongoDBRouter router = addChild(EntitySpec.create(MongoDBRouter.class)
-                .configure(MongoDBRouter.CONFIG_SERVERS,
-                        DependentConfiguration.attributeWhenReady(
-                                getConfig(CoLocatedMongoDBRouter.SHARDED_DEPLOYMENT), 
-                                MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES)));
-        Entities.manage(router);
-        setAttribute(ROUTER, (MongoDBRouter) Iterables.tryFind(getChildren(), Predicates.instanceOf(MongoDBRouter.class)).get());
-        addEnricher(Enrichers.builder().propagating(MongoDBRouter.PORT).from(router).build());
-        
-        super.doStart(locations);
-        setAttribute(Startable.SERVICE_UP, true);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java
deleted file mode 100644
index 8fd2560..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServer.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
-import brooklyn.entity.proxying.ImplementedBy;
-
-@ImplementedBy(MongoDBConfigServerImpl.class)
-public interface MongoDBConfigServer extends AbstractMongoDBServer {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java
deleted file mode 100644
index ae4ea2c..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerCluster.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-
-import com.google.common.reflect.TypeToken;
-
-@ImplementedBy(MongoDBConfigServerClusterImpl.class)
-public interface MongoDBConfigServerCluster extends DynamicCluster {
-
-    @SuppressWarnings("serial")
-    AttributeSensor<Iterable<String>> CONFIG_SERVER_ADDRESSES = Sensors.newSensor(new TypeToken<Iterable<String>>() {}, 
-            "mongodb.config.server.addresses", "List of config server hostnames and ports");
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java
deleted file mode 100644
index c556d05..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerClusterImpl.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.util.Collection;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.location.access.BrooklynAccessUtils;
-
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.net.HostAndPort;
-
-public class MongoDBConfigServerClusterImpl extends DynamicClusterImpl implements MongoDBConfigServerCluster {
-    
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        if (super.getMemberSpec() != null)
-            return super.getMemberSpec();
-        return EntitySpec.create(MongoDBConfigServer.class);
-    }
-    
-    @Override
-    public void start(Collection<? extends Location> locs) {
-        super.start(locs);
-        
-        // TODO this should be an enricher
-        Iterable<String> memberHostNamesAndPorts = Iterables.transform(getMembers(), new Function<Entity, String>() {
-            @Override
-            public String apply(Entity entity) {
-                return entity.getAttribute(MongoDBConfigServer.SUBNET_HOSTNAME) + ":" + entity.getAttribute(MongoDBConfigServer.PORT);
-            }
-        });
-        setAttribute(MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES, ImmutableList.copyOf(memberHostNamesAndPorts));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java
deleted file mode 100644
index 10122f3..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerDriver.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface MongoDBConfigServerDriver extends SoftwareProcessDriver {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java
deleted file mode 100644
index 2414340..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerImpl.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.basic.SoftwareProcessImpl;
-
-public class MongoDBConfigServerImpl extends SoftwareProcessImpl implements MongoDBConfigServer {
-
-    @Override
-    public Class<?> getDriverInterface() {
-        return MongoDBConfigServerDriver.class;
-    }
-    
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        connectServiceUpIsRunning();
-    }
-
-}



[15/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java
new file mode 100644
index 0000000..f279987
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java
@@ -0,0 +1,597 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import static brooklyn.util.JavaGroovyEquivalents.groovyTruth;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import javax.annotation.Nonnull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.config.render.RendererHints;
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityInternal;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.effector.Effectors;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.event.feed.http.JsonFunctions;
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.policy.PolicySpec;
+import brooklyn.util.collections.CollectionFunctionals;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.collections.QuorumCheck;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.guava.Functionals;
+import brooklyn.util.guava.IfFunctions;
+import brooklyn.util.math.MathPredicates;
+import brooklyn.util.task.DynamicTasks;
+import brooklyn.util.task.TaskBuilder;
+import brooklyn.util.task.Tasks;
+import brooklyn.util.text.ByteSizeStrings;
+import brooklyn.util.text.StringFunctions;
+import brooklyn.util.text.Strings;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+
+public class CouchbaseClusterImpl extends DynamicClusterImpl implements CouchbaseCluster {
+    
+    /*
+     * Refactoring required:
+     * 
+     * Currently, on start() the cluster waits for an arbitrary SERVICE_UP_TIME_OUT (3 minutes) before assuming that a quorate 
+     * number of servers are available. The servers are then added to the cluster, and a further wait period of  
+     * DELAY_BEFORE_ADVERTISING_CLUSTER (30 seconds) is used before advertising the cluster
+     * 
+     * DELAY_BEFORE_ADVERTISING_CLUSTER: It should be possible to refactor this away by adding a repeater that will poll
+     * the REST API of the primary node (once established) until the API indicates that the cluster is available
+     * 
+     * SERVICE_UP_TIME_OUT: The refactoring of this would be more substantial. One method would be to remove the bulk of the 
+     * logic from the start() method, and rely entirely on the membership tracking policy and the onServerPoolMemberChanged()
+     * method. The addition of a RUNNING sensor on the nodes would allow the cluster to determine that a node is up and
+     * running but has not yet been added to the cluster. The IS_CLUSTER_INITIALIZED key could be used to determine whether
+     * or not the cluster should be initialized, or a node simply added to an existing cluster. A repeater could be used
+     * in the driver's to ensure that the method does not return until the node has been fully added
+     * 
+     * There is an (incomplete) first-pass at this here: https://github.com/Nakomis/incubator-brooklyn/compare/couchbase-running-sensor
+     * however, there have been significant changes to the cluster initialization since that work was done so it will probably
+     * need to be re-done
+     * 
+     * Additionally, during bucket creation, a HttpPoll is used to check that the bucket has been created. This should be 
+     * refactored to use a Repeater in CouchbaseNodeSshDriver.bucketCreate() in a similar way to the one employed in
+     * CouchbaseNodeSshDriver.rebalance(). Were this done, this class could simply queue the bucket creation tasks
+     * 
+     */
+    
+    private static final Logger log = LoggerFactory.getLogger(CouchbaseClusterImpl.class);
+    private final Object mutex = new Object[0];
+    // Used to serialize bucket creation as only one bucket can be created at a time,
+    // so a feed is used to determine when a bucket has finished being created
+    private final AtomicReference<HttpFeed> resetBucketCreation = new AtomicReference<HttpFeed>();
+
+    public void init() {
+        log.info("Initializing the Couchbase cluster...");
+        super.init();
+        
+        addEnricher(
+            Enrichers.builder()
+                .transforming(COUCHBASE_CLUSTER_UP_NODES)
+                .from(this)
+                .publishing(COUCHBASE_CLUSTER_UP_NODE_ADDRESSES)
+                .computing(new ListOfHostAndPort()).build() );
+        addEnricher(
+            Enrichers.builder()
+                .transforming(COUCHBASE_CLUSTER_UP_NODE_ADDRESSES)
+                .from(this)
+                .publishing(COUCHBASE_CLUSTER_CONNECTION_URL)
+                .computing(
+                    IfFunctions.<List<String>>ifPredicate(
+                        Predicates.compose(MathPredicates.lessThan(getConfig(CouchbaseCluster.INITIAL_QUORUM_SIZE)), 
+                            CollectionFunctionals.sizeFunction(0)) )
+                    .value((String)null)
+                    .defaultApply(
+                        Functionals.chain(
+                            CollectionFunctionals.<String,List<String>>limit(4), 
+                            StringFunctions.joiner(","),
+                            StringFunctions.formatter("http://%s/"))) )
+                .build() );
+        
+        Map<? extends AttributeSensor<? extends Number>, ? extends AttributeSensor<? extends Number>> enricherSetup = 
+            ImmutableMap.<AttributeSensor<? extends Number>, AttributeSensor<? extends Number>>builder()
+                .put(CouchbaseNode.OPS, CouchbaseCluster.OPS_PER_NODE)
+                .put(CouchbaseNode.COUCH_DOCS_DATA_SIZE, CouchbaseCluster.COUCH_DOCS_DATA_SIZE_PER_NODE)
+                .put(CouchbaseNode.COUCH_DOCS_ACTUAL_DISK_SIZE, CouchbaseCluster.COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE)
+                .put(CouchbaseNode.EP_BG_FETCHED, CouchbaseCluster.EP_BG_FETCHED_PER_NODE)
+                .put(CouchbaseNode.MEM_USED, CouchbaseCluster.MEM_USED_PER_NODE)
+                .put(CouchbaseNode.COUCH_VIEWS_ACTUAL_DISK_SIZE, CouchbaseCluster.COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE)
+                .put(CouchbaseNode.CURR_ITEMS, CouchbaseCluster.CURR_ITEMS_PER_NODE)
+                .put(CouchbaseNode.VB_REPLICA_CURR_ITEMS, CouchbaseCluster.VB_REPLICA_CURR_ITEMS_PER_NODE)
+                .put(CouchbaseNode.COUCH_VIEWS_DATA_SIZE, CouchbaseCluster.COUCH_VIEWS_DATA_SIZE_PER_NODE)
+                .put(CouchbaseNode.GET_HITS, CouchbaseCluster.GET_HITS_PER_NODE)
+                .put(CouchbaseNode.CMD_GET, CouchbaseCluster.CMD_GET_PER_NODE)
+                .put(CouchbaseNode.CURR_ITEMS_TOT, CouchbaseCluster.CURR_ITEMS_TOT_PER_NODE)
+            .build();
+        
+        for (AttributeSensor<? extends Number> nodeSensor : enricherSetup.keySet()) {
+            addSummingMemberEnricher(nodeSensor);
+            addAveragingMemberEnricher(nodeSensor, enricherSetup.get(nodeSensor));
+        }
+        
+        addEnricher(Enrichers.builder().updatingMap(Attributes.SERVICE_NOT_UP_INDICATORS)
+            .from(IS_CLUSTER_INITIALIZED).computing(
+                IfFunctions.ifNotEquals(true).value("The cluster is not yet completely initialized")
+                    .defaultValue(null).build()).build() );
+    }
+    
+    private void addAveragingMemberEnricher(AttributeSensor<? extends Number> fromSensor, AttributeSensor<? extends Number> toSensor) {
+        addEnricher(Enrichers.builder()
+            .aggregating(fromSensor)
+            .publishing(toSensor)
+            .fromMembers()
+            .computingAverage()
+            .build()
+        );
+    }
+
+    private void addSummingMemberEnricher(AttributeSensor<? extends Number> source) {
+        addEnricher(Enrichers.builder()
+            .aggregating(source)
+            .publishing(source)
+            .fromMembers()
+            .computingSum()
+            .build()
+        );
+    }
+
+    @Override
+    protected void doStart() {
+        setAttribute(IS_CLUSTER_INITIALIZED, false);
+        
+        super.doStart();
+
+        connectSensors();
+        
+        setAttribute(BUCKET_CREATION_IN_PROGRESS, false);
+
+        //start timeout before adding the servers
+        Tasks.setBlockingDetails("Pausing while Couchbase stabilizes");
+        Time.sleep(getConfig(NODES_STARTED_STABILIZATION_DELAY));
+
+        Optional<Set<Entity>> upNodes = Optional.<Set<Entity>>fromNullable(getAttribute(COUCHBASE_CLUSTER_UP_NODES));
+        if (upNodes.isPresent() && !upNodes.get().isEmpty()) {
+
+            Tasks.setBlockingDetails("Adding servers to Couchbase");
+            
+            //TODO: select a new primary node if this one fails
+            Entity primaryNode = upNodes.get().iterator().next();
+            ((EntityInternal) primaryNode).setAttribute(CouchbaseNode.IS_PRIMARY_NODE, true);
+            setAttribute(COUCHBASE_PRIMARY_NODE, primaryNode);
+
+            Set<Entity> serversToAdd = MutableSet.<Entity>copyOf(getUpNodes());
+
+            if (serversToAdd.size() >= getQuorumSize() && serversToAdd.size() > 1) {
+                log.info("Number of SERVICE_UP nodes:{} in cluster:{} reached Quorum:{}, adding the servers", new Object[]{serversToAdd.size(), getId(), getQuorumSize()});
+                addServers(serversToAdd);
+
+                //wait for servers to be added to the couchbase server
+                try {
+                    Tasks.setBlockingDetails("Delaying before advertising cluster up");
+                    Time.sleep(getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER));
+                } finally {
+                    Tasks.resetBlockingDetails();
+                }
+                
+                ((CouchbaseNode)getPrimaryNode()).rebalance();
+            } else {
+                if (getQuorumSize()>1) {
+                    log.warn(this+" is not quorate; will likely fail later, but proceeding for now");
+                }
+                for (Entity server: serversToAdd) {
+                    ((EntityInternal) server).setAttribute(CouchbaseNode.IS_IN_CLUSTER, true);
+                }
+            }
+                
+            if (getConfig(CREATE_BUCKETS)!=null) {
+                try {
+                    Tasks.setBlockingDetails("Creating buckets in Couchbase");
+
+                    createBuckets();
+                    DependentConfiguration.waitInTaskForAttributeReady(this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
+
+                } finally {
+                    Tasks.resetBlockingDetails();
+                }
+            }
+
+            if (getConfig(REPLICATION)!=null) {
+                try {
+                    Tasks.setBlockingDetails("Configuring replication rules");
+
+                    List<Map<String, Object>> replRules = getConfig(REPLICATION);
+                    for (Map<String, Object> replRule: replRules) {
+                        DynamicTasks.queue(Effectors.invocation(getPrimaryNode(), CouchbaseNode.ADD_REPLICATION_RULE, replRule));
+                    }
+                    DynamicTasks.waitForLast();
+
+                } finally {
+                    Tasks.resetBlockingDetails();
+                }
+            }
+
+            setAttribute(IS_CLUSTER_INITIALIZED, true);
+            
+        } else {
+            throw new IllegalStateException("No up nodes available after starting");
+        }
+    }
+
+    @Override
+    public void stop() {
+        if (resetBucketCreation.get() != null) {
+            resetBucketCreation.get().stop();
+        }
+        super.stop();
+    }
+
+    protected void connectSensors() {
+        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
+                .displayName("Controller targets tracker")
+                .configure("group", this));
+    }
+    
+    private final static class ListOfHostAndPort implements Function<Set<Entity>, List<String>> {
+        @Override public List<String> apply(Set<Entity> input) {
+            List<String> addresses = Lists.newArrayList();
+            for (Entity entity : input) {
+                addresses.add(String.format("%s",
+                        BrooklynAccessUtils.getBrooklynAccessibleAddress(entity, entity.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT))));
+            }
+            return addresses;
+        }
+    }
+
+    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override protected void onEntityChange(Entity member) {
+            ((CouchbaseClusterImpl)entity).onServerPoolMemberChanged(member);
+        }
+
+        @Override protected void onEntityAdded(Entity member) {
+            ((CouchbaseClusterImpl)entity).onServerPoolMemberChanged(member);
+        }
+
+        @Override protected void onEntityRemoved(Entity member) {
+            ((CouchbaseClusterImpl)entity).onServerPoolMemberChanged(member);
+        }
+    };
+
+    protected synchronized void onServerPoolMemberChanged(Entity member) {
+        if (log.isTraceEnabled()) log.trace("For {}, considering membership of {} which is in locations {}",
+                new Object[]{this, member, member.getLocations()});
+
+        //FIXME: make use of servers to be added after cluster initialization.
+        synchronized (mutex) {
+            if (belongsInServerPool(member)) {
+
+                Optional<Set<Entity>> upNodes = Optional.fromNullable(getUpNodes());
+                if (upNodes.isPresent()) {
+
+                    if (!upNodes.get().contains(member)) {
+                        Set<Entity> newNodes = Sets.newHashSet(getUpNodes());
+                        newNodes.add(member);
+                        setAttribute(COUCHBASE_CLUSTER_UP_NODES, newNodes);
+
+                        //add to set of servers to be added.
+                        if (isClusterInitialized()) {
+                            addServer(member);
+                        }
+                    }
+                } else {
+                    Set<Entity> newNodes = Sets.newHashSet();
+                    newNodes.add(member);
+                    setAttribute(COUCHBASE_CLUSTER_UP_NODES, newNodes);
+
+                    if (isClusterInitialized()) {
+                        addServer(member);
+                    }
+                }
+            } else {
+                Set<Entity> upNodes = getUpNodes();
+                if (upNodes != null && upNodes.contains(member)) {
+                    upNodes.remove(member);
+                    setAttribute(COUCHBASE_CLUSTER_UP_NODES, upNodes);
+                    log.info("Removing couchbase node {}: {}; from cluster", new Object[]{this, member});
+                }
+            }
+            if (log.isTraceEnabled()) log.trace("Done {} checkEntity {}", this, member);
+        }
+    }
+
+    protected boolean belongsInServerPool(Entity member) {
+        if (!groovyTruth(member.getAttribute(Startable.SERVICE_UP))) {
+            if (log.isTraceEnabled()) log.trace("Members of {}, checking {}, eliminating because not up", this, member);
+            return false;
+        }
+        if (!getMembers().contains(member)) {
+            if (log.isTraceEnabled())
+                log.trace("Members of {}, checking {}, eliminating because not member", this, member);
+
+            return false;
+        }
+        if (log.isTraceEnabled()) log.trace("Members of {}, checking {}, approving", this, member);
+
+        return true;
+    }
+
+
+    protected EntitySpec<?> getMemberSpec() {
+        EntitySpec<?> result = super.getMemberSpec();
+        if (result != null) return result;
+        return EntitySpec.create(CouchbaseNode.class);
+    }
+
+    @Override
+    public int getQuorumSize() {
+        Integer quorumSize = getConfig(CouchbaseCluster.INITIAL_QUORUM_SIZE);
+        if (quorumSize != null && quorumSize > 0)
+            return quorumSize;
+        // by default the quorum would be floor(initial_cluster_size/2) + 1
+        return (int) Math.floor(getConfig(INITIAL_SIZE) / 2) + 1;
+    }
+
+    protected int getActualSize() {
+        return Optional.fromNullable(getAttribute(CouchbaseCluster.ACTUAL_CLUSTER_SIZE)).or(-1);
+    }
+
+    private Set<Entity> getUpNodes() {
+        return getAttribute(COUCHBASE_CLUSTER_UP_NODES);
+    }
+
+    private CouchbaseNode getPrimaryNode() {
+        return (CouchbaseNode) getAttribute(COUCHBASE_PRIMARY_NODE);
+    }
+
+    @Override
+    protected void initEnrichers() {
+        addEnricher(Enrichers.builder().updatingMap(ServiceStateLogic.SERVICE_NOT_UP_INDICATORS)
+            .from(COUCHBASE_CLUSTER_UP_NODES)
+            .computing(new Function<Set<Entity>, Object>() {
+                @Override
+                public Object apply(Set<Entity> input) {
+                    if (input==null) return "Couchbase up nodes not set";
+                    if (input.isEmpty()) return "No Couchbase up nodes";
+                    if (input.size() < getQuorumSize()) return "Couchbase up nodes not quorate";
+                    return null;
+                }
+            }).build());
+        
+        if (config().getLocalRaw(UP_QUORUM_CHECK).isAbsent()) {
+            // TODO Only leaving CouchbaseQuorumCheck here in case it is contained in persisted state.
+            // If so, need a transformer and then to delete it
+            @SuppressWarnings({ "unused", "hiding" })
+            @Deprecated
+            class CouchbaseQuorumCheck implements QuorumCheck {
+                @Override
+                public boolean isQuorate(int sizeHealthy, int totalSize) {
+                    // check members count passed in AND the sensor  
+                    if (sizeHealthy < getQuorumSize()) return false;
+                    return true;
+                }
+            }
+            config().set(UP_QUORUM_CHECK, new CouchbaseClusterImpl.CouchbaseQuorumCheck(this));
+        }
+        super.initEnrichers();
+    }
+    
+    static class CouchbaseQuorumCheck implements QuorumCheck {
+        private final CouchbaseCluster cluster;
+        CouchbaseQuorumCheck(CouchbaseCluster cluster) {
+            this.cluster = cluster;
+        }
+        @Override
+        public boolean isQuorate(int sizeHealthy, int totalSize) {
+            // check members count passed in AND the sensor  
+            if (sizeHealthy < cluster.getQuorumSize()) return false;
+            return true;
+        }
+    }
+    protected void addServers(Set<Entity> serversToAdd) {
+        Preconditions.checkNotNull(serversToAdd);
+        for (Entity s : serversToAdd) {
+            addServerSeveralTimes(s, 12, Duration.TEN_SECONDS);
+        }
+    }
+
+    /** try adding in a loop because we are seeing spurious port failures in AWS */
+    protected void addServerSeveralTimes(Entity s, int numRetries, Duration delayOnFailure) {
+        try {
+            addServer(s);
+        } catch (Exception e) {
+            Exceptions.propagateIfFatal(e);
+            if (numRetries<=0) throw Exceptions.propagate(e);
+            // retry once after sleep because we are getting some odd primary-change events
+            log.warn("Error adding "+s+" to "+this+", "+numRetries+" retries remaining, will retry after delay ("+e+")");
+            Time.sleep(delayOnFailure);
+            addServerSeveralTimes(s, numRetries-1, delayOnFailure);
+        }
+    }
+
+    protected void addServer(Entity serverToAdd) {
+        Preconditions.checkNotNull(serverToAdd);
+        if (serverToAdd.equals(getPrimaryNode())) {
+            // no need to add; but we pass it in anyway because it makes the calling logic easier
+            return;
+        }
+        if (!isMemberInCluster(serverToAdd)) {
+            HostAndPort webAdmin = HostAndPort.fromParts(serverToAdd.getAttribute(SoftwareProcess.SUBNET_HOSTNAME),
+                    serverToAdd.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
+            String username = serverToAdd.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
+            String password = serverToAdd.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
+
+            if (isClusterInitialized()) {
+                Entities.invokeEffectorWithArgs(this, getPrimaryNode(), CouchbaseNode.SERVER_ADD_AND_REBALANCE, webAdmin.toString(), username, password).getUnchecked();
+            } else {
+                Entities.invokeEffectorWithArgs(this, getPrimaryNode(), CouchbaseNode.SERVER_ADD, webAdmin.toString(), username, password).getUnchecked();
+            }
+            //FIXME check feedback of whether the server was added.
+            ((EntityInternal) serverToAdd).setAttribute(CouchbaseNode.IS_IN_CLUSTER, true);
+        }
+    }
+
+    /** finds the cluster name specified for a node or a cluster, 
+     * using {@link CouchbaseCluster#CLUSTER_NAME} or falling back to the cluster (or node) ID. */
+    public static String getClusterName(Entity node) {
+        String name = node.getConfig(CLUSTER_NAME);
+        if (!Strings.isBlank(name)) return Strings.makeValidFilename(name);
+        return getClusterOrNode(node).getId();
+    }
+    
+    /** returns Couchbase cluster in ancestry, defaulting to the given node if none */
+    @Nonnull public static Entity getClusterOrNode(Entity node) {
+        Iterable<CouchbaseCluster> clusterNodes = Iterables.filter(Entities.ancestors(node), CouchbaseCluster.class);
+        return Iterables.getFirst(clusterNodes, node);
+    }
+    
+    public boolean isClusterInitialized() {
+        return Optional.fromNullable(getAttribute(IS_CLUSTER_INITIALIZED)).or(false);
+    }
+
+    public boolean isMemberInCluster(Entity e) {
+        return Optional.fromNullable(e.getAttribute(CouchbaseNode.IS_IN_CLUSTER)).or(false);
+    }
+    
+    public void createBuckets() {
+        //TODO: check for port conflicts if buckets are being created with a port
+        List<Map<String, Object>> bucketsToCreate = getConfig(CREATE_BUCKETS);
+        if (bucketsToCreate==null) return;
+        
+        Entity primaryNode = getPrimaryNode();
+
+        for (Map<String, Object> bucketMap : bucketsToCreate) {
+            String bucketName = bucketMap.containsKey("bucket") ? (String) bucketMap.get("bucket") : "default";
+            String bucketType = bucketMap.containsKey("bucket-type") ? (String) bucketMap.get("bucket-type") : "couchbase";
+            // default bucket must be on this port; other buckets can (must) specify their own (unique) port
+            Integer bucketPort = bucketMap.containsKey("bucket-port") ? (Integer) bucketMap.get("bucket-port") : 11211;
+            Integer bucketRamSize = bucketMap.containsKey("bucket-ramsize") ? (Integer) bucketMap.get("bucket-ramsize") : 100;
+            Integer bucketReplica = bucketMap.containsKey("bucket-replica") ? (Integer) bucketMap.get("bucket-replica") : 1;
+
+            createBucket(primaryNode, bucketName, bucketType, bucketPort, bucketRamSize, bucketReplica);
+        }
+    }
+
+    public void createBucket(final Entity primaryNode, final String bucketName, final String bucketType, final Integer bucketPort, final Integer bucketRamSize, final Integer bucketReplica) {
+        DynamicTasks.queueIfPossible(TaskBuilder.<Void>builder().name("Creating bucket " + bucketName).body(
+                new Callable<Void>() {
+                    @Override
+                    public Void call() throws Exception {
+                        DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
+                        if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
+                            CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
+                        }
+                        setAttribute(CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, true);
+                        HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(primaryNode, primaryNode.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
+
+                        CouchbaseClusterImpl.this.resetBucketCreation.set(HttpFeed.builder()
+                                .entity(CouchbaseClusterImpl.this)
+                                .period(500, TimeUnit.MILLISECONDS)
+                                .baseUri(String.format("http://%s/pools/default/buckets/%s", hostAndPort, bucketName))
+                                .credentials(primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME), primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD))
+                                .poll(new HttpPollConfig<Boolean>(BUCKET_CREATION_IN_PROGRESS)
+                                        .onSuccess(Functionals.chain(HttpValueFunctions.jsonContents(), JsonFunctions.walkN("nodes"), new Function<JsonElement, Boolean>() {
+                                            @Override
+                                            public Boolean apply(JsonElement input) {
+                                                // Wait until bucket has been created on all nodes and the couchApiBase element has been published (indicating that the bucket is useable)
+                                                JsonArray servers = input.getAsJsonArray();
+                                                if (servers.size() != CouchbaseClusterImpl.this.getMembers().size()) {
+                                                    return true;
+                                                }
+                                                for (JsonElement server : servers) {
+                                                    Object api = server.getAsJsonObject().get("couchApiBase");
+                                                    if (api == null || Strings.isEmpty(String.valueOf(api))) {
+                                                        return true;
+                                                    }
+                                                }
+                                                return false;
+                                            }
+                                        }))
+                                        .onFailureOrException(new Function<Object, Boolean>() {
+                                            @Override
+                                            public Boolean apply(Object input) {
+                                                if (input instanceof brooklyn.util.http.HttpToolResponse) {
+                                                    if (((brooklyn.util.http.HttpToolResponse) input).getResponseCode() == 404) {
+                                                        return true;
+                                                    }
+                                                }
+                                                if (input instanceof Throwable)
+                                                    Exceptions.propagate((Throwable) input);
+                                                throw new IllegalStateException("Unexpected response when creating bucket:" + input);
+                                            }
+                                        }))
+                                .build());
+
+                        // TODO: Bail out if bucket creation fails, to allow next bucket to proceed
+                        Entities.invokeEffectorWithArgs(CouchbaseClusterImpl.this, primaryNode, CouchbaseNode.BUCKET_CREATE, bucketName, bucketType, bucketPort, bucketRamSize, bucketReplica);
+                        DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
+                        if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
+                            CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
+                        }
+                        return null;
+                    }
+                }
+        ).build()).orSubmitAndBlock();
+    }
+    
+    static {
+        RendererHints.register(COUCH_DOCS_DATA_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
+        RendererHints.register(COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
+        RendererHints.register(MEM_USED_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
+        RendererHints.register(COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
+        RendererHints.register(COUCH_VIEWS_DATA_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNode.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNode.java
new file mode 100644
index 0000000..16434fa
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNode.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import java.net.URI;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.config.render.RendererHints;
+import brooklyn.entity.annotation.Effector;
+import brooklyn.entity.annotation.EffectorParam;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.MethodEffector;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.effector.Effectors;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.text.ByteSizeStrings;
+
+@Catalog(name="CouchBase Node", description="Couchbase Server is an open source, distributed (shared-nothing architecture) "
+        + "NoSQL document-oriented database that is optimized for interactive applications.")
+@ImplementedBy(CouchbaseNodeImpl.class)
+public interface CouchbaseNode extends SoftwareProcess {
+
+    @SetFromFlag("adminUsername")
+    ConfigKey<String> COUCHBASE_ADMIN_USERNAME = ConfigKeys.newStringConfigKey("couchbase.adminUsername", "Username for the admin user on the node", "Administrator");
+
+    @SetFromFlag("adminPassword")
+    ConfigKey<String> COUCHBASE_ADMIN_PASSWORD = ConfigKeys.newStringConfigKey("couchbase.adminPassword", "Password for the admin user on the node", "Password");
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION,
+            "3.0.0");
+
+    @SetFromFlag("enterprise")
+    ConfigKey<Boolean> USE_ENTERPRISE = ConfigKeys.newBooleanConfigKey("couchbase.enterprise.enabled",
+        "Whether to use Couchbase Enterprise; if false uses the community version. Defaults to true.", true);
+
+    @SetFromFlag("downloadUrl")
+    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "http://packages.couchbase.com/releases/${version}/"
+                + "couchbase-server-${driver.communityOrEnterprise}${driver.downloadLinkPreVersionSeparator}${version}${driver.downloadLinkOsTagWithPrefix}");
+
+    @SetFromFlag("clusterInitRamSize")
+    BasicAttributeSensorAndConfigKey<Integer> COUCHBASE_CLUSTER_INIT_RAM_SIZE = new BasicAttributeSensorAndConfigKey<Integer>(
+            Integer.class, "couchbase.clusterInitRamSize", "initial ram size of the cluster", 300);
+
+    PortAttributeSensorAndConfigKey COUCHBASE_WEB_ADMIN_PORT = new PortAttributeSensorAndConfigKey("couchbase.webAdminPort", "Web Administration Port", "8091+");
+    PortAttributeSensorAndConfigKey COUCHBASE_API_PORT = new PortAttributeSensorAndConfigKey("couchbase.apiPort", "Couchbase API Port", "8092+");
+    PortAttributeSensorAndConfigKey COUCHBASE_INTERNAL_BUCKET_PORT = new PortAttributeSensorAndConfigKey("couchbase.internalBucketPort", "Internal Bucket Port", "11209");
+    PortAttributeSensorAndConfigKey COUCHBASE_INTERNAL_EXTERNAL_BUCKET_PORT = new PortAttributeSensorAndConfigKey("couchbase.internalExternalBucketPort", "Internal/External Bucket Port", "11210");
+    PortAttributeSensorAndConfigKey COUCHBASE_CLIENT_INTERFACE_PROXY = new PortAttributeSensorAndConfigKey("couchbase.clientInterfaceProxy", "Client interface (proxy)", "11211");
+    PortAttributeSensorAndConfigKey COUCHBASE_INCOMING_SSL_PROXY = new PortAttributeSensorAndConfigKey("couchbase.incomingSslProxy", "Incoming SSL Proxy", "11214");
+    PortAttributeSensorAndConfigKey COUCHBASE_INTERNAL_OUTGOING_SSL_PROXY = new PortAttributeSensorAndConfigKey("couchbase.internalOutgoingSslProxy", "Internal Outgoing SSL Proxy", "11215");
+    PortAttributeSensorAndConfigKey COUCHBASE_REST_HTTPS_FOR_SSL = new PortAttributeSensorAndConfigKey("couchbase.internalRestHttpsForSsl", "Internal REST HTTPS for SSL", "18091");
+    PortAttributeSensorAndConfigKey COUCHBASE_CAPI_HTTPS_FOR_SSL = new PortAttributeSensorAndConfigKey("couchbase.internalCapiHttpsForSsl", "Internal CAPI HTTPS for SSL", "18092");
+    PortAttributeSensorAndConfigKey ERLANG_PORT_MAPPER = new PortAttributeSensorAndConfigKey("couchbase.erlangPortMapper", "Erlang Port Mapper Daemon Listener Port (epmd)", "4369");
+    PortAttributeSensorAndConfigKey NODE_DATA_EXCHANGE_PORT_RANGE_START = new PortAttributeSensorAndConfigKey("couchbase.nodeDataExchangePortRangeStart", "Node data exchange Port Range Start", "21100+");
+    PortAttributeSensorAndConfigKey NODE_DATA_EXCHANGE_PORT_RANGE_END = new PortAttributeSensorAndConfigKey("couchbase.nodeDataExchangePortRangeEnd", "Node data exchange Port Range End", "21199+");
+
+    AttributeSensor<Boolean> IS_PRIMARY_NODE = Sensors.newBooleanSensor("couchbase.isPrimaryNode", "flag to determine if the current couchbase node is the primary node for the cluster");
+    AttributeSensor<Boolean> IS_IN_CLUSTER = Sensors.newBooleanSensor("couchbase.isInCluster", "flag to determine if the current couchbase node has been added to a cluster, "
+        + "including being the first / primary node");
+    AttributeSensor<URI> COUCHBASE_WEB_ADMIN_URL = Attributes.MAIN_URI;
+    
+    // Interesting stats
+    AttributeSensor<Double> OPS = Sensors.newDoubleSensor("couchbase.stats.ops", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/ops");
+    AttributeSensor<Long> COUCH_DOCS_DATA_SIZE = Sensors.newLongSensor("couchbase.stats.couch.docs.data.size", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/couch_docs_data_size");
+    AttributeSensor<Long> COUCH_DOCS_ACTUAL_DISK_SIZE = Sensors.newLongSensor("couchbase.stats.couch.docs.actual.disk.size", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/couch_docs_actual_disk_size");
+    AttributeSensor<Long> EP_BG_FETCHED = Sensors.newLongSensor("couchbase.stats.ep.bg.fetched", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/ep_bg_fetched");
+    AttributeSensor<Long> MEM_USED = Sensors.newLongSensor("couchbase.stats.mem.used", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/mem_used");
+    AttributeSensor<Long> COUCH_VIEWS_ACTUAL_DISK_SIZE = Sensors.newLongSensor("couchbase.stats.couch.views.actual.disk.size", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/couch_views_actual_disk_size");
+    AttributeSensor<Long> CURR_ITEMS = Sensors.newLongSensor("couchbase.stats.curr.items", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/curr_items");
+    AttributeSensor<Long> VB_REPLICA_CURR_ITEMS = Sensors.newLongSensor("couchbase.stats.vb.replica.curr.items", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/vb_replica_curr_items");
+    AttributeSensor<Long> COUCH_VIEWS_DATA_SIZE = Sensors.newLongSensor("couchbase.stats.couch.views.data.size", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/couch_views_data_size");
+    AttributeSensor<Long> GET_HITS = Sensors.newLongSensor("couchbase.stats.get.hits", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/get_hits");
+    AttributeSensor<Double> CMD_GET = Sensors.newDoubleSensor("couchbase.stats.cmd.get", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/cmd_get");
+    AttributeSensor<Long> CURR_ITEMS_TOT = Sensors.newLongSensor("couchbase.stats.curr.items.tot", 
+            "Retrieved from pools/nodes/<current node>/interestingStats/curr_items_tot");
+    AttributeSensor<String> REBALANCE_STATUS = Sensors.newStringSensor("couchbase.rebalance.status", 
+            "Displays the current rebalance status from pools/nodes/rebalanceStatus");
+    
+    class MainUri {
+        public static final AttributeSensor<URI> MAIN_URI = Attributes.MAIN_URI;
+        
+        static {
+            // ROOT_URL does not need init because it refers to something already initialized
+            RendererHints.register(COUCHBASE_WEB_ADMIN_URL, RendererHints.namedActionWithUrl());
+
+            RendererHints.register(COUCH_DOCS_DATA_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
+            RendererHints.register(COUCH_DOCS_ACTUAL_DISK_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
+            RendererHints.register(MEM_USED, RendererHints.displayValue(ByteSizeStrings.metric()));
+            RendererHints.register(COUCH_VIEWS_ACTUAL_DISK_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
+            RendererHints.register(COUCH_VIEWS_DATA_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
+        }
+    }
+    
+    // this long-winded reference is done just to trigger the initialization above
+    AttributeSensor<URI> MAIN_URI = MainUri.MAIN_URI;
+
+    MethodEffector<Void> SERVER_ADD = new MethodEffector<Void>(CouchbaseNode.class, "serverAdd");
+    MethodEffector<Void> SERVER_ADD_AND_REBALANCE = new MethodEffector<Void>(CouchbaseNode.class, "serverAddAndRebalance");
+    MethodEffector<Void> REBALANCE = new MethodEffector<Void>(CouchbaseNode.class, "rebalance");
+    MethodEffector<Void> BUCKET_CREATE = new MethodEffector<Void>(CouchbaseNode.class, "bucketCreate");
+    brooklyn.entity.Effector<Void> ADD_REPLICATION_RULE = Effectors.effector(Void.class, "addReplicationRule")
+        .description("Adds a replication rule from the indicated bucket on the cluster where this node is located "
+            + "to the indicated cluster and optional destination bucket")
+        .parameter(String.class, "fromBucket", "Bucket to be replicated")
+        .parameter(Object.class, "toCluster", "Entity (or ID) of the cluster to which this should replicate")
+        .parameter(String.class, "toBucket", "Destination bucket for replication in the toCluster, defaulting to the same as the fromBucket")
+        .buildAbstract();
+
+    @Effector(description = "add a server to a cluster")
+    public void serverAdd(@EffectorParam(name = "serverHostname") String serverToAdd, @EffectorParam(name = "username") String username, @EffectorParam(name = "password") String password);
+    
+    @Effector(description = "add a server to a cluster, and immediately rebalances")
+    public void serverAddAndRebalance(@EffectorParam(name = "serverHostname") String serverToAdd, @EffectorParam(name = "username") String username, @EffectorParam(name = "password") String password);
+
+    @Effector(description = "rebalance the couchbase cluster")
+    public void rebalance();
+    
+    @Effector(description = "create a new bucket")
+    public void bucketCreate(@EffectorParam(name = "bucketName") String bucketName, @EffectorParam(name = "bucketType") String bucketType, 
+            @EffectorParam(name = "bucketPort") Integer bucketPort, @EffectorParam(name = "bucketRamSize") Integer bucketRamSize, 
+            @EffectorParam(name = "bucketReplica") Integer bucketReplica);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java
new file mode 100644
index 0000000..1ff28fd
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface CouchbaseNodeDriver extends SoftwareProcessDriver {
+    public String getOsTag();
+    public String getDownloadLinkPreVersionSeparator();
+    public String getDownloadLinkOsTagWithPrefix();
+    
+    public String getCommunityOrEnterprise();
+
+    public void serverAdd(String serverToAdd, String username, String password);
+
+    public void rebalance();
+    
+    public void bucketCreate(String bucketName, String bucketType, Integer bucketPort, Integer bucketRamSize, Integer bucketReplica);
+
+    public void serverAddAndRebalance(String serverToAdd, String username, String password);
+
+    public void addReplicationRule(Entity toCluster, String fromBucket, String toBucket);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java
new file mode 100644
index 0000000..a0654db
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import static java.lang.String.format;
+
+import java.net.URI;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.entity.effector.EffectorBody;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.event.feed.http.JsonFunctions;
+import brooklyn.location.MachineProvisioningLocation;
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.location.cloud.CloudLocationConfig;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.config.ConfigBag;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.guava.Functionals;
+import brooklyn.util.guava.MaybeFunctions;
+import brooklyn.util.guava.TypeTokens;
+import brooklyn.util.http.HttpTool;
+import brooklyn.util.http.HttpToolResponse;
+import brooklyn.util.net.Urls;
+import brooklyn.util.task.Tasks;
+import brooklyn.util.text.Strings;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.base.Functions;
+import com.google.common.base.Preconditions;
+import com.google.common.net.HostAndPort;
+import com.google.common.net.HttpHeaders;
+import com.google.common.net.MediaType;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+
+public class CouchbaseNodeImpl extends SoftwareProcessImpl implements CouchbaseNode {
+
+    private static final Logger log = LoggerFactory.getLogger(CouchbaseNodeImpl.class);
+
+    private volatile HttpFeed httpFeed;
+
+    @Override
+    public Class<CouchbaseNodeDriver> getDriverInterface() {
+        return CouchbaseNodeDriver.class;
+    }
+
+    @Override
+    public CouchbaseNodeDriver getDriver() {
+        return (CouchbaseNodeDriver) super.getDriver();
+    }
+
+    @Override
+    public void init() {
+        super.init();
+
+        subscribe(this, Attributes.SERVICE_UP, new SensorEventListener<Boolean>() {
+            @Override
+            public void onEvent(SensorEvent<Boolean> booleanSensorEvent) {
+                if (Boolean.TRUE.equals(booleanSensorEvent.getValue())) {
+                    Integer webPort = getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT);
+                    Preconditions.checkNotNull(webPort, CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT+" not set for %s; is an acceptable port available?", this);
+                    String hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(CouchbaseNodeImpl.this, webPort).toString();
+                    setAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_URL, URI.create(format("http://%s", hostAndPort)));
+                }
+            }
+        });
+
+        getMutableEntityType().addEffector(ADD_REPLICATION_RULE, new EffectorBody<Void>() {
+            @Override
+            public Void call(ConfigBag parameters) {
+                addReplicationRule(parameters);
+                return null;
+            }
+        });
+    }
+
+    protected Map<String, Object> obtainProvisioningFlags(@SuppressWarnings("rawtypes") MachineProvisioningLocation location) {
+        ConfigBag result = ConfigBag.newInstance(super.obtainProvisioningFlags(location));
+        result.configure(CloudLocationConfig.OS_64_BIT, true);
+        return result.getAllConfig();
+    }
+
+    @Override
+    protected Collection<Integer> getRequiredOpenPorts() {
+        // TODO this creates a huge list of inbound ports; much better to define on a security group using range syntax!
+        int erlangRangeStart = getConfig(NODE_DATA_EXCHANGE_PORT_RANGE_START).iterator().next();
+        int erlangRangeEnd = getConfig(NODE_DATA_EXCHANGE_PORT_RANGE_END).iterator().next();
+
+        Set<Integer> newPorts = MutableSet.<Integer>copyOf(super.getRequiredOpenPorts());
+        newPorts.remove(erlangRangeStart);
+        newPorts.remove(erlangRangeEnd);
+        for (int i = erlangRangeStart; i <= erlangRangeEnd; i++)
+            newPorts.add(i);
+        return newPorts;
+    }
+
+    @Override
+    public void serverAdd(String serverToAdd, String username, String password) {
+        getDriver().serverAdd(serverToAdd, username, password);
+    }
+
+    @Override
+    public void serverAddAndRebalance(String serverToAdd, String username, String password) {
+        getDriver().serverAddAndRebalance(serverToAdd, username, password);
+    }
+
+    @Override
+    public void rebalance() {
+        getDriver().rebalance();
+    }
+
+    protected final static Function<HttpToolResponse, JsonElement> GET_THIS_NODE_STATS = Functionals.chain(
+        HttpValueFunctions.jsonContents(),
+        JsonFunctions.walk("nodes"),
+        new Function<JsonElement, JsonElement>() {
+            @Override public JsonElement apply(JsonElement input) {
+                JsonArray nodes = input.getAsJsonArray();
+                for (JsonElement element : nodes) {
+                    JsonElement thisNode = element.getAsJsonObject().get("thisNode");
+                    if (thisNode!=null && Boolean.TRUE.equals(thisNode.getAsBoolean())) {
+                        return element.getAsJsonObject().get("interestingStats");
+                    }
+                }
+                return null;
+        }}
+    );
+
+    protected final static <T> HttpPollConfig<T> getSensorFromNodeStat(AttributeSensor<T> sensor, String ...jsonPath) {
+        return new HttpPollConfig<T>(sensor)
+            .onSuccess(Functionals.chain(GET_THIS_NODE_STATS,
+                MaybeFunctions.<JsonElement>wrap(),
+                JsonFunctions.walkM(jsonPath),
+                JsonFunctions.castM(TypeTokens.getRawRawType(sensor.getTypeToken()), null)))
+            .onFailureOrException(Functions.<T>constant(null));
+    }
+
+    @Override
+    protected void postStart() {
+        super.postStart();
+        renameServerToPublicHostname();
+    }
+
+    protected void renameServerToPublicHostname() {
+        // http://docs.couchbase.com/couchbase-manual-2.5/cb-install/#couchbase-getting-started-hostnames
+        URI apiUri = null;
+        try {
+            HostAndPort accessible = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getAttribute(COUCHBASE_WEB_ADMIN_PORT));
+            apiUri = URI.create(String.format("http://%s:%d/node/controller/rename", accessible.getHostText(), accessible.getPort()));
+            UsernamePasswordCredentials credentials = new UsernamePasswordCredentials(getConfig(COUCHBASE_ADMIN_USERNAME), getConfig(COUCHBASE_ADMIN_PASSWORD));
+            HttpToolResponse response = HttpTool.httpPost(
+                    // the uri is required by the HttpClientBuilder in order to set the AuthScope of the credentials
+                    HttpTool.httpClientBuilder().uri(apiUri).credentials(credentials).build(),
+                    apiUri,
+                    MutableMap.of(
+                            HttpHeaders.CONTENT_TYPE, MediaType.FORM_DATA.toString(),
+                            HttpHeaders.ACCEPT, "*/*",
+                            // this appears needed; without it we get org.apache.http.NoHttpResponseException !?
+                            HttpHeaders.AUTHORIZATION, HttpTool.toBasicAuthorizationValue(credentials)),
+                    Charsets.UTF_8.encode("hostname="+Urls.encode(accessible.getHostText())).array());
+            log.debug("Renamed Couchbase server "+this+" via "+apiUri+": "+response);
+            if (!HttpTool.isStatusCodeHealthy(response.getResponseCode())) {
+                log.warn("Invalid response code, renaming "+apiUri+": "+response);
+            }
+        } catch (Exception e) {
+            Exceptions.propagateIfFatal(e);
+            log.warn("Error renaming server, using "+apiUri+": "+e, e);
+        }
+    }
+
+    public void connectSensors() {
+        super.connectSensors();
+        connectServiceUpIsRunning();
+
+        HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, this.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
+        httpFeed = HttpFeed.builder()
+            .entity(this)
+            .period(Duration.seconds(3))
+            .baseUri("http://" + hostAndPort + "/pools/nodes/")
+            .credentialsIfNotNull(getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME), getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD))
+            .poll(getSensorFromNodeStat(CouchbaseNode.OPS, "ops"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_DOCS_DATA_SIZE, "couch_docs_data_size"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_DOCS_ACTUAL_DISK_SIZE, "couch_docs_actual_disk_size"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.EP_BG_FETCHED, "ep_bg_fetched"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.MEM_USED, "mem_used"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_VIEWS_ACTUAL_DISK_SIZE, "couch_views_actual_disk_size"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.CURR_ITEMS, "curr_items"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.VB_REPLICA_CURR_ITEMS, "vb_replica_curr_items"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_VIEWS_DATA_SIZE, "couch_views_data_size"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.GET_HITS, "get_hits"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.CMD_GET, "cmd_get"))
+            .poll(getSensorFromNodeStat(CouchbaseNode.CURR_ITEMS_TOT, "curr_items_tot"))
+            .poll(new HttpPollConfig<String>(CouchbaseNode.REBALANCE_STATUS)
+                    .onSuccess(HttpValueFunctions.jsonContents("rebalanceStatus", String.class))
+                    .onFailureOrException(Functions.constant("Could not retrieve")))
+            .build();
+    }
+
+    public void disconnectSensors() {
+        super.disconnectSensors();
+        disconnectServiceUpIsRunning();
+        if (httpFeed != null) {
+            httpFeed.stop();
+        }
+    }
+
+    @Override
+    public void bucketCreate(String bucketName, String bucketType, Integer bucketPort, Integer bucketRamSize, Integer bucketReplica) {
+        if (Strings.isBlank(bucketType)) bucketType = "couchbase";
+        if (bucketRamSize==null || bucketRamSize<=0) bucketRamSize = 200;
+        if (bucketReplica==null || bucketReplica<0) bucketReplica = 1;
+
+        getDriver().bucketCreate(bucketName, bucketType, bucketPort, bucketRamSize, bucketReplica);
+    }
+
+    /** exposed through {@link CouchbaseNode#ADD_REPLICATION_RULE} */
+    protected void addReplicationRule(ConfigBag ruleArgs) {
+        Object toClusterO = Preconditions.checkNotNull(ruleArgs.getStringKey("toCluster"), "toCluster must not be null");
+        if (toClusterO instanceof String) {
+            toClusterO = getManagementContext().lookup((String)toClusterO);
+        }
+        Entity toCluster = Tasks.resolving(toClusterO, Entity.class).context(getExecutionContext()).get();
+
+        String fromBucket = Preconditions.checkNotNull( (String)ruleArgs.getStringKey("fromBucket"), "fromBucket must be specified" );
+
+        String toBucket = (String)ruleArgs.getStringKey("toBucket");
+        if (toBucket==null) toBucket = fromBucket;
+
+        if (!ruleArgs.getUnusedConfig().isEmpty()) {
+            throw new IllegalArgumentException("Unsupported replication rule data: "+ruleArgs.getUnusedConfig());
+        }
+
+        getDriver().addReplicationRule(toCluster, fromBucket, toBucket);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java
new file mode 100644
index 0000000..73ed934
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java
@@ -0,0 +1,512 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import static brooklyn.util.ssh.BashCommands.*;
+import static java.lang.String.format;
+
+import java.net.URI;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+import org.apache.http.auth.UsernamePasswordCredentials;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.Group;
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.drivers.downloads.BasicDownloadRequirement;
+import brooklyn.entity.drivers.downloads.DownloadProducerFromUrlAttribute;
+import brooklyn.entity.software.SshEffectorTasks;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.location.OsDetails;
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.management.Task;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.http.HttpTool;
+import brooklyn.util.http.HttpToolResponse;
+import brooklyn.util.repeat.Repeater;
+import brooklyn.util.ssh.BashCommands;
+import brooklyn.util.task.DynamicTasks;
+import brooklyn.util.task.TaskBuilder;
+import brooklyn.util.task.TaskTags;
+import brooklyn.util.task.Tasks;
+import brooklyn.util.text.NaturalOrderComparator;
+import brooklyn.util.text.StringEscapes.BashStringEscapes;
+import brooklyn.util.text.Strings;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.net.HostAndPort;
+
+public class CouchbaseNodeSshDriver extends AbstractSoftwareProcessSshDriver implements CouchbaseNodeDriver {
+
+    public CouchbaseNodeSshDriver(final CouchbaseNodeImpl entity, final SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    public static String couchbaseCli(String cmd) {
+        return "/opt/couchbase/bin/couchbase-cli " + cmd + " ";
+    }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(getInstallDir());
+    }
+
+    @Override
+    public void install() {
+        //for reference https://github.com/urbandecoder/couchbase/blob/master/recipes/server.rb
+        //installation instructions (http://docs.couchbase.com/couchbase-manual-2.5/cb-install/#preparing-to-install)
+
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+
+        if (osDetails.isLinux()) {
+            List<String> commands = installLinux(urls, saveAs);
+            //FIXME installation return error but the server is up and running.
+            newScript(INSTALLING)
+                    .body.append(commands).execute();
+        } else {
+            Tasks.markInessential();
+            throw new IllegalStateException("Unsupported OS for installing Couchbase. Will continue but may fail later.");
+        }
+    }
+
+    private List<String> installLinux(List<String> urls, String saveAs) {
+
+        log.info("Installing " + getEntity() + " using couchbase-server-{} {}", getCommunityOrEnterprise(), getVersion());
+
+        String apt = chainGroup(
+                installPackage(MutableMap.of("apt", "python-httplib2 libssl0.9.8"), null),
+                sudo(format("dpkg -i %s", saveAs)));
+
+        String yum = chainGroup(
+                "which yum",
+                // The following prevents failure on RHEL AWS nodes:
+                // https://forums.aws.amazon.com/thread.jspa?threadID=100509
+                ok(sudo("sed -i.bk s/^enabled=1$/enabled=0/ /etc/yum/pluginconf.d/subscription-manager.conf")),
+                ok(sudo("yum check-update")),
+                sudo("yum install -y pkgconfig"),
+                // RHEL requires openssl version 098
+                sudo("[ -f /etc/redhat-release ] && (grep -i \"red hat\" /etc/redhat-release && sudo yum install -y openssl098e) || :"),
+                sudo(format("rpm --install %s", saveAs)));
+
+        String link = new DownloadProducerFromUrlAttribute().apply(new BasicDownloadRequirement(this)).getPrimaryLocations().iterator().next();
+        return ImmutableList.<String>builder()
+                .add(INSTALL_CURL)
+                .addAll(Arrays.asList(INSTALL_CURL,
+                        BashCommands.require(BashCommands.alternatives(BashCommands.simpleDownloadUrlAs(urls, saveAs),
+                                        // Referer link is required for 3.0.0; note mis-spelling is correct, as per http://en.wikipedia.org/wiki/HTTP_referer
+                                        "curl -f -L -k " + BashStringEscapes.wrapBash(link)
+                                                + " -H 'Referer: http://www.couchbase.com/downloads'"
+                                                + " -o " + saveAs),
+                                "Could not retrieve " + saveAs + " (from " + urls.size() + " sites)", 9)))
+                .add(alternatives(apt, yum))
+                .build();
+    }
+
+    @Override
+    public void customize() {
+        //TODO: add linux tweaks for couchbase
+        //http://blog.couchbase.com/often-overlooked-linux-os-tweaks
+        //http://blog.couchbase.com/kirk
+
+        //turn off swappiness
+        //vm.swappiness=0
+        //sudo echo 0 > /proc/sys/vm/swappiness
+
+        //os page cache = 20%
+
+        //disable THP
+        //sudo echo never > /sys/kernel/mm/transparent_hugepage/enabled
+        //sudo echo never > /sys/kernel/mm/transparent_hugepage/defrag
+
+        //turn off transparent huge pages
+        //limit page cache disty bytes
+        //control the rate page cache is flused ... vm.dirty_*
+    }
+
+    @Override
+    public void launch() {
+        String clusterPrefix = "--cluster-" + (isPreV3() ? "init-" : "");
+        // in v30, the cluster arguments were changed, and it became mandatory to supply a url + password (if there is none, these are ignored)
+        newScript(LAUNCHING)
+                .body.append(
+                sudo("/etc/init.d/couchbase-server start"),
+                "for i in {0..120}\n" +
+                        "do\n" +
+                        "    if [ $i -eq 120 ]; then echo REST API unavailable after 120 seconds, failing; exit 1; fi;\n" +
+                        "    curl -s " + String.format("http://localhost:%s", getWebPort()) + " > /dev/null && echo REST API available after $i seconds && break\n" +
+                        "    sleep 1\n" +
+                        "done\n" +
+                        couchbaseCli("cluster-init") +
+                        (isPreV3() ? getCouchbaseHostnameAndPort() : getCouchbaseHostnameAndCredentials()) +
+                        " " + clusterPrefix + "username=" + getUsername() +
+                        " " + clusterPrefix + "password=" + getPassword() +
+                        " " + clusterPrefix + "port=" + getWebPort() +
+                        " " + clusterPrefix + "ramsize=" + getClusterInitRamSize())
+                .execute();
+    }
+
+    @Override
+    public boolean isRunning() {
+        //TODO add a better way to check if couchbase server is running
+        return (newScript(CHECK_RUNNING)
+                .body.append(format("curl -u %s:%s http://localhost:%s/pools/nodes", getUsername(), getPassword(), getWebPort()))
+                .execute() == 0);
+    }
+
+    @Override
+    public void stop() {
+        newScript(STOPPING)
+                .body.append(sudo("/etc/init.d/couchbase-server stop"))
+                .execute();
+    }
+
+    @Override
+    public String getVersion() {
+        return entity.getConfig(CouchbaseNode.SUGGESTED_VERSION);
+    }
+
+    @Override
+    public String getOsTag() {
+        return newDownloadLinkSegmentComputer().getOsTag();
+    }
+
+    protected DownloadLinkSegmentComputer newDownloadLinkSegmentComputer() {
+        return new DownloadLinkSegmentComputer(getLocation().getOsDetails(), !isPreV3(), Strings.toString(getEntity()));
+    }
+
+    public static class DownloadLinkSegmentComputer {
+        // links are:
+        // http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.rpm
+        // http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.deb
+        // ^^^ preV3 is _ everywhere
+        // http://packages.couchbase.com/releases/3.0.0/couchbase-server-community_3.0.0-ubuntu12.04_amd64.deb
+        // ^^^ most V3 is _${version}-
+        // http://packages.couchbase.com/releases/3.0.0/couchbase-server-community-3.0.0-centos6.x86_64.rpm
+        // ^^^ but RHEL is -${version}-
+
+        @Nullable
+        private final OsDetails os;
+        @Nonnull
+        private final boolean isV3OrLater;
+        @Nonnull
+        private final String context;
+        @Nonnull
+        private final String osName;
+        @Nonnull
+        private final boolean isRpm;
+        @Nonnull
+        private final boolean is64bit;
+
+        public DownloadLinkSegmentComputer(@Nullable OsDetails os, boolean isV3OrLater, @Nonnull String context) {
+            this.os = os;
+            this.isV3OrLater = isV3OrLater;
+            this.context = context;
+            if (os == null) {
+                // guess centos as RPM is sensible default
+                log.warn("No details known for OS of " + context + "; assuming 64-bit RPM distribution of Couchbase");
+                osName = "centos";
+                isRpm = true;
+                is64bit = true;
+                return;
+            }
+            osName = os.getName().toLowerCase();
+            isRpm = !(osName.contains("deb") || osName.contains("ubuntu"));
+            is64bit = os.is64bit();
+        }
+
+        /**
+         * separator after the version number used to be _ but is - in 3.0 and later
+         */
+        public String getPreVersionSeparator() {
+            if (!isV3OrLater) return "_";
+            if (isRpm) return "-";
+            return "_";
+        }
+
+        public String getOsTag() {
+            // couchbase only provide certain versions; if on other platforms let's suck-it-and-see
+            String family;
+            if (osName.contains("debian")) family = "debian7_";
+            else if (osName.contains("ubuntu")) family = "ubuntu12.04_";
+            else if (osName.contains("centos") || osName.contains("rhel") || (osName.contains("red") && osName.contains("hat")))
+                family = "centos6.";
+            else {
+                log.warn("Unrecognised OS " + os + " of " + context + "; assuming RPM distribution of Couchbase");
+                family = "centos6.";
+            }
+
+            if (!is64bit && !isV3OrLater) {
+                // NB: 32-bit binaries aren't (yet?) available for v30
+                log.warn("32-bit binaries for Couchbase might not be available, when deploying " + context);
+            }
+            String arch = !is64bit ? "x86" : !isRpm && isV3OrLater ? "amd64" : "x86_64";
+            String fileExtension = isRpm ? ".rpm" : ".deb";
+
+            if (isV3OrLater)
+                return family + arch + fileExtension;
+            else
+                return arch + fileExtension;
+        }
+
+        public String getOsTagWithPrefix() {
+            return (!isV3OrLater ? "_" : "-") + getOsTag();
+        }
+    }
+
+    @Override
+    public String getDownloadLinkOsTagWithPrefix() {
+        return newDownloadLinkSegmentComputer().getOsTagWithPrefix();
+    }
+
+    @Override
+    public String getDownloadLinkPreVersionSeparator() {
+        return newDownloadLinkSegmentComputer().getPreVersionSeparator();
+    }
+
+    private boolean isPreV3() {
+        return NaturalOrderComparator.INSTANCE.compare(getEntity().getConfig(CouchbaseNode.SUGGESTED_VERSION), "3.0") < 0;
+    }
+
+    @Override
+    public String getCommunityOrEnterprise() {
+        Boolean isEnterprise = getEntity().getConfig(CouchbaseNode.USE_ENTERPRISE);
+        return isEnterprise ? "enterprise" : "community";
+    }
+
+    private String getUsername() {
+        return entity.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
+    }
+
+    private String getPassword() {
+        return entity.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
+    }
+
+    private String getWebPort() {
+        return "" + entity.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT);
+    }
+
+    private String getCouchbaseHostnameAndCredentials() {
+        return format("-c %s:%s -u %s -p %s", getSubnetHostname(), getWebPort(), getUsername(), getPassword());
+    }
+
+    private String getCouchbaseHostnameAndPort() {
+        return format("-c %s:%s", getSubnetHostname(), getWebPort());
+    }
+
+    private String getClusterInitRamSize() {
+        return entity.getConfig(CouchbaseNode.COUCHBASE_CLUSTER_INIT_RAM_SIZE).toString();
+    }
+
+    @Override
+    public void rebalance() {
+        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "explicitly started");
+        newScript("rebalance")
+                .body.append(
+                couchbaseCli("rebalance") + getCouchbaseHostnameAndCredentials())
+                .failOnNonZeroResultCode()
+                .execute();
+
+        // wait until the re-balance is started
+        // (if it's quick, this might miss it, but it will only block for 30s if so)
+        Repeater.create()
+                .backoff(Repeater.DEFAULT_REAL_QUICK_PERIOD, 2, Duration.millis(500))
+                .limitTimeTo(Duration.THIRTY_SECONDS)
+                .until(new Callable<Boolean>() {
+                           @Override
+                           public Boolean call() throws Exception {
+                               for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
+                                   if (isNodeRebalancing(nodeHostAndPort.toString())) {
+                                       return true;
+                                   }
+                               }
+                               return false;
+                           }
+                       }
+                ).run();
+
+        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "waiting for completion");
+        // Wait until the Couchbase node finishes the re-balancing
+        Task<Boolean> reBalance = TaskBuilder.<Boolean>builder()
+                .name("Waiting until node is rebalancing")
+                .body(new Callable<Boolean>() {
+                    @Override
+                    public Boolean call() throws Exception {
+                        return Repeater.create()
+                                .backoff(Duration.ONE_SECOND, 1.2, Duration.TEN_SECONDS)
+                                .limitTimeTo(Duration.FIVE_MINUTES)
+                                .until(new Callable<Boolean>() {
+                                    @Override
+                                    public Boolean call() throws Exception {
+                                        for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
+                                            if (isNodeRebalancing(nodeHostAndPort.toString())) {
+                                                return false;
+                                            }
+                                        }
+                                        return true;
+                                    }
+                                })
+                                .run();
+                        }
+                })
+                .build();
+        Boolean completed = DynamicTasks.queueIfPossible(reBalance)
+                .orSubmitAndBlock()
+                .andWaitForSuccess();
+        if (completed) {
+            entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "completed");
+            ServiceStateLogic.ServiceNotUpLogic.clearNotUpIndicator(getEntity(), "rebalancing");
+            log.info("Rebalanced cluster via primary node {}", getEntity());
+        } else {
+            entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "timed out");
+            ServiceStateLogic.ServiceNotUpLogic.updateNotUpIndicator(getEntity(), "rebalancing", "rebalance did not complete within time limit");
+            log.warn("Timeout rebalancing cluster via primary node {}", getEntity());
+        }
+    }
+
+    private Iterable<HostAndPort> getNodesHostAndPort() {
+        Group group = Iterables.getFirst(getEntity().getGroups(), null);
+        if (group == null) return Lists.newArrayList();
+        return Iterables.transform(group.getAttribute(CouchbaseCluster.COUCHBASE_CLUSTER_UP_NODES),
+                new Function<Entity, HostAndPort>() {
+                    @Override
+                    public HostAndPort apply(Entity input) {
+                        return BrooklynAccessUtils.getBrooklynAccessibleAddress(input, input.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
+                    }
+                });
+    }
+
+    private boolean isNodeRebalancing(String nodeHostAndPort) {
+        HttpToolResponse response = getApiResponse("http://" + nodeHostAndPort + "/pools/default/rebalanceProgress");
+        if (response.getResponseCode() != 200) {
+            throw new IllegalStateException("failed retrieving rebalance status: " + response);
+        }
+        return !"none".equals(HttpValueFunctions.jsonContents("status", String.class).apply(response));
+    }
+
+    private HttpToolResponse getApiResponse(String uri) {
+        return HttpTool.httpGet(HttpTool.httpClientBuilder()
+                        // the uri is required by the HttpClientBuilder in order to set the AuthScope of the credentials
+                        .uri(uri)
+                        .credentials(new UsernamePasswordCredentials(getUsername(), getPassword()))
+                        .build(),
+                URI.create(uri),
+                ImmutableMap.<String, String>of());
+    }
+
+    @Override
+    public void serverAdd(String serverToAdd, String username, String password) {
+        newScript("serverAdd").body.append(couchbaseCli("server-add")
+                + getCouchbaseHostnameAndCredentials() +
+                " --server-add=" + BashStringEscapes.wrapBash(serverToAdd) +
+                " --server-add-username=" + BashStringEscapes.wrapBash(username) +
+                " --server-add-password=" + BashStringEscapes.wrapBash(password))
+                .failOnNonZeroResultCode()
+                .execute();
+    }
+
+    @Override
+    public void serverAddAndRebalance(String serverToAdd, String username, String password) {
+        newScript("serverAddAndRebalance").body.append(couchbaseCli("rebalance")
+                + getCouchbaseHostnameAndCredentials() +
+                " --server-add=" + BashStringEscapes.wrapBash(serverToAdd) +
+                " --server-add-username=" + BashStringEscapes.wrapBash(username) +
+                " --server-add-password=" + BashStringEscapes.wrapBash(password))
+                .failOnNonZeroResultCode()
+                .execute();
+        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "triggered as part of server-add");
+    }
+
+    @Override
+    public void bucketCreate(String bucketName, String bucketType, Integer bucketPort, Integer bucketRamSize, Integer bucketReplica) {
+        log.info("Adding bucket: {} to cluster {} primary node: {}", new Object[]{bucketName, CouchbaseClusterImpl.getClusterOrNode(getEntity()), getEntity()});
+
+        newScript("bucketCreate").body.append(couchbaseCli("bucket-create")
+                + getCouchbaseHostnameAndCredentials() +
+                " --bucket=" + BashStringEscapes.wrapBash(bucketName) +
+                " --bucket-type=" + BashStringEscapes.wrapBash(bucketType) +
+                " --bucket-port=" + bucketPort +
+                " --bucket-ramsize=" + bucketRamSize +
+                " --bucket-replica=" + bucketReplica)
+                .failOnNonZeroResultCode()
+                .execute();
+    }
+
+    @Override
+    public void addReplicationRule(Entity toCluster, String fromBucket, String toBucket) {
+        DynamicTasks.queue(DependentConfiguration.attributeWhenReady(toCluster, Attributes.SERVICE_UP)).getUnchecked();
+
+        String destName = CouchbaseClusterImpl.getClusterName(toCluster);
+
+        log.info("Setting up XDCR for " + fromBucket + " from " + CouchbaseClusterImpl.getClusterName(getEntity()) + " (via " + getEntity() + ") "
+                + "to " + destName + " (" + toCluster + ")");
+
+        Entity destPrimaryNode = toCluster.getAttribute(CouchbaseCluster.COUCHBASE_PRIMARY_NODE);
+        String destHostname = destPrimaryNode.getAttribute(Attributes.HOSTNAME);
+        String destUsername = toCluster.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
+        String destPassword = toCluster.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
+
+        // on the REST API there is mention of a 'type' 'continuous' but i don't see other refs to this
+
+        // PROTOCOL   Select REST protocol or memcached for replication. xmem indicates memcached while capi indicates REST protocol.
+        // looks like xmem is the default; leave off for now
+//        String replMode = "xmem";
+
+        DynamicTasks.queue(TaskTags.markInessential(SshEffectorTasks.ssh(
+                couchbaseCli("xdcr-setup") +
+                        getCouchbaseHostnameAndCredentials() +
+                        " --create" +
+                        " --xdcr-cluster-name=" + BashStringEscapes.wrapBash(destName) +
+                        " --xdcr-hostname=" + BashStringEscapes.wrapBash(destHostname) +
+                        " --xdcr-username=" + BashStringEscapes.wrapBash(destUsername) +
+                        " --xdcr-password=" + BashStringEscapes.wrapBash(destPassword)
+        ).summary("create xdcr destination " + destName).newTask()));
+
+        // would be nice to auto-create bucket, but we'll need to know the parameters; the port in particular is tedious
+//        ((CouchbaseNode)destPrimaryNode).bucketCreate(toBucket, "couchbase", null, 0, 0);
+
+        DynamicTasks.queue(SshEffectorTasks.ssh(
+                couchbaseCli("xdcr-replicate") +
+                        getCouchbaseHostnameAndCredentials() +
+                        " --create" +
+                        " --xdcr-cluster-name=" + BashStringEscapes.wrapBash(destName) +
+                        " --xdcr-from-bucket=" + BashStringEscapes.wrapBash(fromBucket) +
+                        " --xdcr-to-bucket=" + BashStringEscapes.wrapBash(toBucket)
+//            + " --xdcr-replication-mode="+replMode
+        ).summary("configure replication for " + fromBucket + " to " + destName + ":" + toBucket).newTask());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java
new file mode 100644
index 0000000..384434c
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+@ImplementedBy(CouchbaseSyncGatewayImpl.class)
+public interface CouchbaseSyncGateway extends SoftwareProcess {
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION,
+            "1.0-beta3.1");
+
+    @SetFromFlag("downloadUrl")
+    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "http://packages.couchbase.com/releases/couchbase-sync-gateway/1.0-beta/couchbase-sync-gateway-community_${version}_${driver.osTag}");
+    
+    @SetFromFlag("couchbaseServer")
+    ConfigKey<Entity> COUCHBASE_SERVER = ConfigKeys.newConfigKey(Entity.class, "couchbaseSyncGateway.couchbaseNode", 
+            "Couchbase server node or cluster the sync gateway connects to");
+
+    @SetFromFlag("serverPool")
+    ConfigKey<String> COUCHBASE_SERVER_POOL = ConfigKeys.newStringConfigKey("couchbaseSyncGateway.serverPool", 
+            "Couchbase Server pool name in which to find buckets", "default");
+    
+    @SetFromFlag("couchbaseServerBucket")
+    ConfigKey<String> COUCHBASE_SERVER_BUCKET = ConfigKeys.newStringConfigKey("couchbaseSyncGateway.serverBucket", 
+            "Name of the Couchbase bucket to use", "sync_gateway");
+
+    @SetFromFlag("pretty")
+    ConfigKey<Boolean> PRETTY = ConfigKeys.newBooleanConfigKey("couchbaseSyncGateway.pretty", 
+            "Pretty-print JSON responses. This is useful for debugging, but reduces performance.", false);
+
+    @SetFromFlag("verbose")
+    ConfigKey<Boolean> VERBOSE = ConfigKeys.newBooleanConfigKey("couchbaseSyncGateway.verbose", 
+            "Logs more information about requests.", false);
+
+    AttributeSensor<String> COUCHBASE_SERVER_WEB_URL = Sensors.newStringSensor("couchbaseSyncGateway.serverWebUrl", 
+            "The Url and web port of the couchbase server to connect to");
+    
+    AttributeSensor<String> MANAGEMENT_URL = Sensors.newStringSensor("coucbaseSyncGateway.managementUrl", 
+            "Management URL for Couchbase Sycn Gateway");
+
+    PortAttributeSensorAndConfigKey SYNC_REST_API_PORT = new PortAttributeSensorAndConfigKey("couchbaseSyncGateway.syncRestPort", 
+            "Port the Sync REST API listens on", "4984");
+    
+    PortAttributeSensorAndConfigKey ADMIN_REST_API_PORT = new PortAttributeSensorAndConfigKey("couchbaseSyncGateway.adminRestPort", 
+            "Port the Admin REST API listens on", "4985");
+
+}
\ No newline at end of file



[02/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java
new file mode 100644
index 0000000..9b890fa
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import java.net.UnknownHostException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.bson.types.ObjectId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Throwables;
+import com.google.common.net.HostAndPort;
+import com.mongodb.BasicDBObject;
+import com.mongodb.CommandResult;
+import com.mongodb.DB;
+import com.mongodb.DBCollection;
+import com.mongodb.DBObject;
+import com.mongodb.MongoClient;
+import com.mongodb.ReadPreference;
+
+import brooklyn.location.access.BrooklynAccessUtils;
+
+public class MongoDBTestHelper {
+
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBTestHelper.class);
+
+    private static final String TEST_DB = "brooklyn_test";
+    private static final String TEST_COLLECTION = "test_collection";
+    private static final String ADMIN_DB = "admin";
+
+    /**
+     * Inserts a new object with { key: value } at given server.
+     * @return The new document's id
+     */
+    public static String insert(AbstractMongoDBServer entity, String key, Object value) {
+        LOG.info("Inserting {}:{} at {}", new Object[]{key, value, entity});
+        MongoClient mongoClient = clientForServer(entity);
+        try {
+            DB db = mongoClient.getDB(TEST_DB);
+            DBCollection testCollection = db.getCollection(TEST_COLLECTION);
+            BasicDBObject doc = new BasicDBObject(key, value);
+            testCollection.insert(doc);
+            ObjectId id = (ObjectId) doc.get("_id");
+            return id.toString();
+        } finally {
+            mongoClient.close();
+        }
+    }
+
+    /** @return The {@link DBObject} representing the object with the given id */
+    public static DBObject getById(AbstractMongoDBServer entity, String id) {
+        LOG.info("Getting {} from {}", new Object[]{id, entity});
+        MongoClient mongoClient = clientForServer(entity);
+        // Secondary preferred means the driver will let us read from secondaries too.
+        mongoClient.setReadPreference(ReadPreference.secondaryPreferred());
+        try {
+            DB db = mongoClient.getDB(TEST_DB);
+            DBCollection testCollection = db.getCollection(TEST_COLLECTION);
+            return testCollection.findOne(new BasicDBObject("_id", new ObjectId(id)));
+        } finally {
+            mongoClient.close();
+        }
+    }
+    
+    public static List<String> getDatabaseNames(AbstractMongoDBServer entity) {
+        LOG.info("Getting database names from {}", entity);
+        MongoClient mongoClient = clientForServer(entity);
+        try {
+            return mongoClient.getDatabaseNames();
+        } finally {
+            mongoClient.close();
+        }
+    }
+    
+    public static boolean isConfigServer(AbstractMongoDBServer entity) {
+        LOG.info("Checking if {} is a config server", entity);
+        MongoClient mongoClient = clientForServer(entity);
+        try {
+            DB db = mongoClient.getDB(ADMIN_DB);
+            CommandResult commandResult = db.command("getCmdLineOpts");
+            Map<?, ?> parsedArgs = (Map<?, ?>)commandResult.get("parsed");
+            if (parsedArgs == null) return false;
+            Boolean configServer = (Boolean)parsedArgs.get("configsvr");
+            if (configServer != null) {
+                // v2.5 format
+                return Boolean.TRUE.equals(configServer);
+            } else {
+                // v2.6 format
+                String role = (String) ((Map)parsedArgs.get("sharding")).get("clusterRole");
+                return "configsvr".equals(role);
+            }
+        } finally {
+            mongoClient.close();
+        }
+    }
+
+    private static MongoClient clientForServer(AbstractMongoDBServer server) {
+        try {
+            HostAndPort hap = BrooklynAccessUtils.getBrooklynAccessibleAddress(server, server.getAttribute(MongoDBServer.PORT));
+            return new MongoClient(hap.getHostText(), hap.getPort());
+        } catch (UnknownHostException e) {
+            // Fail whatever test called this method.
+            throw Throwables.propagate(e);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java
new file mode 100644
index 0000000..dc5cb5c
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotEquals;
+import static org.testng.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import javax.annotation.Nullable;
+
+import org.apache.brooklyn.entity.nosql.mongodb.ReplicaSetConfig;
+import org.bson.BSONObject;
+import org.bson.BasicBSONObject;
+import org.bson.types.BasicBSONList;
+import org.testng.annotations.Test;
+
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.net.HostAndPort;
+
+public class ReplicaSetConfigTest {
+
+    // true if object has key "votes" that is > 1
+    static Predicate<BasicBSONObject> IS_VOTING_MEMBER = new Predicate<BasicBSONObject>() {
+        @Override public boolean apply(@Nullable BasicBSONObject input) {
+            return input != null && input.containsField("votes") && input.getInt("votes") > 0;
+        }
+    };
+
+    private BasicBSONObject makeSetMember(Integer id, String host) {
+        return new BasicBSONObject(ImmutableMap.of("_id", id, "host", host));
+    }
+
+    private BasicBSONObject makeSetConfig(String id, Integer version, BasicBSONObject... members) {
+        BasicBSONList memberList = new BasicBSONList();
+        memberList.addAll(Arrays.asList(members));
+        return new BasicBSONObject(ImmutableMap.of("_id", id, "version", version, "members", memberList));
+    }
+
+    private BasicBSONObject makeSetWithNMembers(int n) {
+        ReplicaSetConfig setConfig = ReplicaSetConfig.builder("replica-set-name");
+        for (int i = 0; i < n; i++) {
+            setConfig.member("host-"+i, i, i);
+        }
+        return setConfig.build();
+    }
+
+    private Collection<HostAndPort> votingMembersOfSet(BasicBSONObject config) {
+        BasicBSONList membersObject = BasicBSONList.class.cast(config.get("members"));
+        List<BasicBSONObject> members = Lists.newArrayList();
+        for (Object object : membersObject) members.add(BasicBSONObject.class.cast(object));
+        return FluentIterable.from(members)
+                .filter(IS_VOTING_MEMBER)
+                .transform(new Function<BasicBSONObject, HostAndPort>() {
+                    @Override public HostAndPort apply(BasicBSONObject input) {
+                        return HostAndPort.fromString(input.getString("host"));
+                    }
+                })
+                .toList();
+    }
+
+    private Collection<HostAndPort> nonVotingMembersOfSet(BasicBSONObject config) {
+        BasicBSONList membersObject = BasicBSONList.class.cast(config.get("members"));
+        List<BasicBSONObject> members = Lists.newArrayList();
+        for (Object object : membersObject) members.add(BasicBSONObject.class.cast(object));
+        return FluentIterable
+                .from(members)
+                .filter(Predicates.not(IS_VOTING_MEMBER))
+                .transform(new Function<BasicBSONObject, HostAndPort>() {
+                    @Override public HostAndPort apply(BasicBSONObject input) {
+                        return HostAndPort.fromString(input.getString("host"));
+                    }
+                })
+                .toList();
+    }
+
+    @Test
+    public void testCreateFromScratch() {
+        BasicBSONObject config = ReplicaSetConfig.builder("rs")
+            .member("host-a", 12345, 1)
+            .member("host-b", 54321, 2)
+            .build();
+        assertEquals(config.get("_id"), "rs");
+        assertEquals(config.getInt("version"), 1);
+        assertTrue(config.get("members") instanceof BasicBSONList);
+        BasicBSONList members = (BasicBSONList) config.get("members");
+        assertEquals(members.size(), 2);
+    }
+
+    @Test
+    public void testCreateFromExistingConfig() {
+        // Replica set of one member
+        int version = 44;
+        BasicBSONObject config = makeSetConfig("replica-set-name", version, makeSetMember(33, "example.com:7777"));
+
+        // Use existing set to add two more members
+        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(config)
+            .member("foo", 8888, 34)
+            .member("bar", 9999, 35)
+            .build();
+
+        assertEquals(newConfig.get("_id"), "replica-set-name");
+        assertEquals(newConfig.get("version"), version + 1);
+        BasicBSONList members = (BasicBSONList) newConfig.get("members");
+        assertEquals(members.size(), 3);
+
+        BSONObject original = (BSONObject) members.get(0);
+        assertEquals(original.get("_id"), 33);
+        assertEquals(original.get("host"), "example.com:7777");
+
+        BSONObject second = (BSONObject) members.get(1);
+        assertEquals(second.get("_id"), 34);
+        assertEquals(second.get("host"), "foo:8888");
+
+        BSONObject third = (BSONObject) members.get(2);
+        assertEquals(third.get("_id"), 35);
+        assertEquals(third.get("host"), "bar:9999");
+    }
+
+    @Test
+    public void testRemoveMember() {
+        int version = 44;
+        BasicBSONObject config = makeSetConfig("replica-set-name", version,
+                makeSetMember(33, "example.com:7777"),
+                makeSetMember(34, "example.com:7778"));
+
+        // Use existing set to add two more members
+        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(config)
+            .remove("example.com", 7777)
+            .build();
+
+        assertEquals(newConfig.get("version"), version + 1);
+        BasicBSONList members = (BasicBSONList) newConfig.get("members");
+        assertEquals(members.size(), 1);
+        assertEquals(BSONObject.class.cast(members.get(0)).get("host"), "example.com:7778");
+
+        newConfig = ReplicaSetConfig.fromExistingConfig(newConfig)
+            .remove("example.com", 7778)
+            .build();
+
+        members = (BasicBSONList) newConfig.get("members");
+        assertTrue(members.isEmpty());
+    }
+
+    @Test
+    public void testRemoveNonExistentMemberHasNoEffect() {
+        BasicBSONObject config = makeSetConfig("replica-set-name", 1,
+                makeSetMember(33, "example.com:7777"),
+                makeSetMember(34, "example.com:7778"));
+
+        BasicBSONList members = (BasicBSONList) config.get("members");
+        assertEquals(members.size(), 2);
+
+        BasicBSONObject altered = ReplicaSetConfig.fromExistingConfig(config)
+                .remove("foo", 99)
+                .build();
+
+        members = (BasicBSONList) altered.get("members");
+        assertEquals(members.size(), 2);
+    }
+
+    @Test
+    public void testSetOfFourMembersHasThreeVoters() {
+        BasicBSONObject config = makeSetWithNMembers(4);
+        assertEquals(votingMembersOfSet(config).size(), 3, "Expected three voters in set with four members");
+        assertEquals(nonVotingMembersOfSet(config).size(), 1, "Expected one non-voter in set with four members");
+    }
+
+    @Test
+    public void testFourthServerOfFourIsGivenVoteWhenAnotherServerIsRemoved() {
+        BasicBSONObject config = makeSetWithNMembers(4);
+        HostAndPort toRemove = votingMembersOfSet(config).iterator().next();
+
+        BasicBSONObject updated = ReplicaSetConfig.fromExistingConfig(config)
+                .remove(toRemove)
+                .build();
+
+        assertEquals(votingMembersOfSet(updated).size(), 3);
+        assertTrue(nonVotingMembersOfSet(updated).isEmpty());
+
+        BasicBSONList newMembers = BasicBSONList.class.cast(updated.get("members"));
+        for (Object object : newMembers) {
+            BasicBSONObject member = BasicBSONObject.class.cast(object);
+            HostAndPort memberHostAndPort = HostAndPort.fromString(member.getString("host"));
+            assertNotEquals(memberHostAndPort, toRemove);
+        }
+    }
+
+    @Test
+    public void testMaximumNumberOfVotersIsLimited() {
+        BasicBSONObject config = makeSetWithNMembers(ReplicaSetConfig.MAXIMUM_REPLICA_SET_SIZE);
+        int voters = ReplicaSetConfig.MAXIMUM_VOTING_MEMBERS;
+        int nonVoters = ReplicaSetConfig.MAXIMUM_REPLICA_SET_SIZE - voters;
+        assertEquals(votingMembersOfSet(config).size(), voters, "Expected number of voters in max-size set to be " + voters);
+        assertEquals(nonVotingMembersOfSet(config).size(), nonVoters, "Expected number of non-voters in max-size set to be " + nonVoters);
+    }
+
+    @Test(expectedExceptions = IllegalStateException.class)
+    public void testMoreMembersThanMaximumAllowsRejected() {
+        makeSetWithNMembers(ReplicaSetConfig.MAXIMUM_REPLICA_SET_SIZE + 1);
+    }
+
+    @Test
+    public void testPrimaryGivenVoteWhenLastInMemberList() {
+        BasicBSONObject config = ReplicaSetConfig.builder("rs")
+            .member("host-a", 1, 1)
+            .member("host-b", 2, 2)
+            .member("host-c", 3, 3)
+            .member("host-d", 4, 4)
+            .primary(HostAndPort.fromParts("host-d", 4))
+            .build();
+        assertEquals(votingMembersOfSet(config).size(), 3);
+        assertEquals(nonVotingMembersOfSet(config).size(), 1);
+        assertTrue(votingMembersOfSet(config).contains(HostAndPort.fromParts("host-d", 4)));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java
new file mode 100644
index 0000000..fb748e4
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import static org.testng.Assert.assertFalse;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBTestHelper;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBConfigServer;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.ApplicationBuilder;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.Asserts;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+
+import com.google.common.collect.ImmutableList;
+
+public class MongoDBConfigServerIntegrationTest {
+    private TestApplication app;
+    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        localhostProvisioningLocation = new LocalhostMachineProvisioningLocation();
+        app = ApplicationBuilder.newManagedApp(TestApplication.class);
+    }
+
+    @AfterMethod(alwaysRun=true)
+    public void tearDown() throws Exception {
+        if (app != null) Entities.destroyAll(app.getManagementContext());
+    }
+    
+    @Test(groups = "Integration")
+    public void testCanStartAndStop() throws Exception {
+        MongoDBConfigServer entity = app.createAndManageChild(EntitySpec.create(MongoDBConfigServer.class)
+                .configure(MongoDBServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-configserver.conf"));
+        app.start(ImmutableList.of(localhostProvisioningLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
+        Asserts.assertTrue(MongoDBTestHelper.isConfigServer(entity), "Server is not a config server");
+        entity.stop();
+        assertFalse(entity.getAttribute(Startable.SERVICE_UP));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java
new file mode 100644
index 0000000..9e8ff55
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import groovy.time.TimeDuration;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBConfigServer;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBRouter;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.Asserts;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+/**
+ * NOTE: These test will provision 9 machines in AWS, which can cause 'Request limit exceeded' and
+ * 'Exhausted available authentication methods' exceptions, depending upon current AWS load. You can
+ * mitigate this issue by adding the following lines to your brooklyn.properties:
+ *
+ * brooklyn.location.jclouds.machineCreateAttempts=3
+ * brooklyn.jclouds.aws-ec2.maxConcurrentMachineCreations=5
+ */
+@Test
+public class MongoDBShardedDeploymentEc2LiveTest extends AbstractEc2LiveTest {
+
+    private static final Integer ROUTER_CLUSTER_SIZE = 2;
+    private static final Integer REPLICASET_SIZE = 2;
+    private static final Integer SHARD_CLUSTER_SIZE = 3;
+    private static final TimeDuration TIMEOUT = new TimeDuration(0, 3, 0, 0);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        final MongoDBShardedDeployment deployment = app.createAndManageChild(EntitySpec.create(MongoDBShardedDeployment.class)
+                .configure(MongoDBShardedDeployment.INITIAL_ROUTER_CLUSTER_SIZE, ROUTER_CLUSTER_SIZE)
+                .configure(MongoDBShardedDeployment.SHARD_REPLICASET_SIZE, REPLICASET_SIZE)
+                .configure(MongoDBShardedDeployment.INITIAL_SHARD_CLUSTER_SIZE, SHARD_CLUSTER_SIZE)
+                .configure(MongoDBShardedDeployment.MONGODB_REPLICA_SET_SPEC, EntitySpec.create(MongoDBReplicaSet.class)
+                        .configure(MongoDBServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb.conf")
+                        .configure(MongoDBReplicaSet.MEMBER_SPEC, EntitySpec.create(MongoDBServer.class)))
+                .configure(MongoDBShardedDeployment.MONGODB_ROUTER_SPEC, EntitySpec.create(MongoDBRouter.class)
+                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-router.conf"))
+                .configure(MongoDBShardedDeployment.MONGODB_CONFIG_SERVER_SPEC, EntitySpec.create(MongoDBConfigServer.class)
+                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-configserver.conf")));
+
+        app.start(ImmutableList.of(loc));
+        
+        Entities.dumpInfo(app);
+
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            public void run() {
+                Assert.assertEquals(deployment.getRouterCluster().getCurrentSize(), ROUTER_CLUSTER_SIZE);
+                Assert.assertEquals(deployment.getShardCluster().getCurrentSize(), SHARD_CLUSTER_SIZE);
+                Assert.assertEquals(deployment.getConfigCluster().getCurrentSize(), MongoDBShardedDeployment.CONFIG_CLUSTER_SIZE.getDefaultValue());
+                for (Entity entity : deployment.getShardCluster().getMembers()) {
+                    Assert.assertEquals(((MongoDBReplicaSet) entity).getCurrentSize(), REPLICASET_SIZE);
+                }
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java
new file mode 100644
index 0000000..e4947d3
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import org.apache.brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBTestHelper;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBConfigServer;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBConfigServerCluster;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBRouter;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBRouterCluster;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardCluster;
+import org.apache.brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment;
+import org.testng.Assert;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.entity.Entity;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.mongodb.DBObject;
+
+public class MongoDBShardedDeploymentIntegrationTest extends BrooklynAppLiveTestSupport {
+    
+    private static final Integer ROUTER_CLUSTER_SIZE = 2;
+    private static final Integer REPLICASET_SIZE = 2;
+    private static final Integer SHARD_CLUSTER_SIZE = 3;
+    
+    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        super.setUp();
+        localhostProvisioningLocation = app.newLocalhostProvisioningLocation();
+    }
+
+    private MongoDBShardedDeployment makeAndStartDeployment() {
+        final MongoDBShardedDeployment deployment = app.createAndManageChild(EntitySpec.create(MongoDBShardedDeployment.class)
+                .configure(MongoDBShardedDeployment.INITIAL_ROUTER_CLUSTER_SIZE, ROUTER_CLUSTER_SIZE)
+                .configure(MongoDBShardedDeployment.SHARD_REPLICASET_SIZE, REPLICASET_SIZE)
+                .configure(MongoDBShardedDeployment.INITIAL_SHARD_CLUSTER_SIZE, SHARD_CLUSTER_SIZE)
+                .configure(MongoDBShardedDeployment.MONGODB_REPLICA_SET_SPEC, EntitySpec.create(MongoDBReplicaSet.class)
+                        .configure(MongoDBServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb.conf")
+                        .configure(MongoDBReplicaSet.MEMBER_SPEC, EntitySpec.create(MongoDBServer.class)))
+                .configure(MongoDBShardedDeployment.MONGODB_ROUTER_SPEC, EntitySpec.create(MongoDBRouter.class)
+                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-router.conf"))
+                .configure(MongoDBShardedDeployment.MONGODB_CONFIG_SERVER_SPEC, EntitySpec.create(MongoDBConfigServer.class)
+                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-configserver.conf")));
+        app.start(ImmutableList.of(localhostProvisioningLocation));
+        EntityTestUtils.assertAttributeEqualsEventually(deployment, Startable.SERVICE_UP, true);
+        return deployment;
+    }
+    
+    @Test(groups = "Integration")
+    public void testCanStartAndStopDeployment() {
+        MongoDBShardedDeployment deployment = makeAndStartDeployment();
+        deployment.stop();
+        EntityTestUtils.assertAttributeEqualsEventually(deployment, Startable.SERVICE_UP, false);
+    }
+    
+    @Test(groups = "Integration")
+    public void testDeployedStructure() {
+        MongoDBShardedDeployment deployment = makeAndStartDeployment();
+        MongoDBConfigServerCluster configServers = deployment.getConfigCluster();
+        MongoDBRouterCluster routers = deployment.getRouterCluster();
+        MongoDBShardCluster shards = deployment.getShardCluster();
+        Assert.assertNotNull(configServers);
+        Assert.assertNotNull(routers);
+        Assert.assertNotNull(shards);
+        Assert.assertEquals(configServers.getCurrentSize(), MongoDBShardedDeployment.CONFIG_CLUSTER_SIZE.getDefaultValue());
+        Assert.assertEquals(routers.getCurrentSize(), ROUTER_CLUSTER_SIZE);
+        Assert.assertEquals(shards.getCurrentSize(), SHARD_CLUSTER_SIZE);
+        for (Entity entity : deployment.getShardCluster().getMembers()) {
+            Assert.assertEquals(((MongoDBReplicaSet)entity).getCurrentSize(), REPLICASET_SIZE);
+        }
+        for (Entity entity : configServers.getMembers()) {
+            checkEntityTypeAndServiceUp(entity, MongoDBConfigServer.class);
+        }
+        for (Entity entity : routers.getMembers()) {
+            checkEntityTypeAndServiceUp(entity, MongoDBRouter.class);
+        }
+        for (Entity entity : shards.getMembers()) {
+            checkEntityTypeAndServiceUp(entity, MongoDBReplicaSet.class);
+        }
+    }
+    
+    @Test(groups = "Integration")
+    private void testReadAndWriteDifferentRouters() {
+        MongoDBShardedDeployment deployment = makeAndStartDeployment();
+        EntityTestUtils.assertAttributeEqualsEventually(deployment, Startable.SERVICE_UP, true);
+        MongoDBRouter router1 = (MongoDBRouter) Iterables.get(deployment.getRouterCluster().getMembers(), 0);
+        MongoDBRouter router2 = (MongoDBRouter) Iterables.get(deployment.getRouterCluster().getMembers(), 1);
+        EntityTestUtils.assertAttributeEqualsEventually(router1, Startable.SERVICE_UP, true);
+        EntityTestUtils.assertAttributeEqualsEventually(router2, Startable.SERVICE_UP, true);
+        
+        String documentId = MongoDBTestHelper.insert(router1, "meaning-of-life", 42);
+        DBObject docOut = MongoDBTestHelper.getById(router2, documentId);
+        Assert.assertEquals(docOut.get("meaning-of-life"), 42);
+        
+        for (Entity entity : Iterables.filter(app.getManagementContext().getEntityManager().getEntitiesInApplication(app), AbstractMongoDBServer.class)) {
+            EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
+        }
+    }
+    
+    private void checkEntityTypeAndServiceUp(Entity entity, Class<? extends Entity> expectedClass) {
+        Assert.assertNotNull(entity);
+        Assert.assertTrue(expectedClass.isAssignableFrom(entity.getClass()), "expected: " + expectedClass 
+                + " on interfaces, found: " + entity.getClass().getInterfaces());
+        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/JedisSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/JedisSupport.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/JedisSupport.java
new file mode 100644
index 0000000..81abb42
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/JedisSupport.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import org.apache.brooklyn.entity.nosql.redis.RedisStore;
+
+import redis.clients.jedis.Jedis;
+
+import com.google.common.base.Strings;
+
+/**
+ * {@link RedisStore} testing using Jedis API.
+ */
+public class JedisSupport {
+    private static final String TEST_DATA = Strings.repeat("0123456789", 16);
+
+    private RedisStore redis;
+
+    public JedisSupport(RedisStore redis) {
+        this.redis = redis;
+    }
+
+    /**
+     * Exercise the {@link RedisStore} using the Jedis API.
+     */
+    public void redisTest() throws Exception {
+        writeData("brooklyn", TEST_DATA);
+        String result = readData("brooklyn");
+        assertEquals(result, TEST_DATA);
+    }
+    
+    public void writeData(String key, String val) throws Exception {
+        Jedis client = getRedisClient(redis);
+        try {
+            client.set(key, val);
+        } finally {
+            client.disconnect();
+        }
+    }
+
+    public String readData(String key) throws Exception {
+        Jedis client = getRedisClient(redis);
+        try {
+            return client.get(key);
+        } finally {
+            client.disconnect();
+        }
+    }
+
+    private Jedis getRedisClient(RedisStore redis) {
+        int port = redis.getAttribute(RedisStore.REDIS_PORT);
+        String host = redis.getAttribute(RedisStore.HOSTNAME);
+        Jedis client = new Jedis(host, port);
+        client.connect();
+        assertTrue(client.isConnected());
+        return client;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java
new file mode 100644
index 0000000..6331662
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import static org.testng.Assert.assertEquals;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.apache.brooklyn.entity.nosql.redis.RedisCluster;
+import org.apache.brooklyn.entity.nosql.redis.RedisSlave;
+import org.apache.brooklyn.entity.nosql.redis.RedisStore;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.ApplicationBuilder;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.Asserts;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+
+import com.google.common.collect.ImmutableList;
+
+public class RedisClusterIntegrationTest {
+
+    private TestApplication app;
+    private Location loc;
+    private RedisCluster cluster;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setup() {
+        app = ApplicationBuilder.newManagedApp(TestApplication.class);
+        loc = new LocalhostMachineProvisioningLocation();
+    }
+
+    @AfterMethod(alwaysRun=true)
+    public void shutdown() {
+        if (app != null) Entities.destroyAll(app.getManagementContext());
+    }
+
+    @Test(groups = { "Integration" })
+    public void testRedisClusterReplicates() throws Exception {
+        final String key = "mykey";
+        final String val = "1234567890";
+        
+        cluster = app.createAndManageChild(EntitySpec.create(RedisCluster.class)
+                .configure(DynamicCluster.INITIAL_SIZE, 3));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, Startable.SERVICE_UP, true);
+
+        RedisStore master = cluster.getMaster();
+        List<RedisSlave> slaves = ImmutableList.<RedisSlave>copyOf((Collection)cluster.getSlaves().getMembers());
+        
+        assertEquals(slaves.size(), 3);
+        
+        JedisSupport viaMaster = new JedisSupport(master);
+        viaMaster.writeData(key, val);
+        assertEquals(viaMaster.readData(key), val);
+
+        for (RedisSlave slave : slaves) {
+            final JedisSupport viaSlave = new JedisSupport(slave);
+            Asserts.succeedsEventually(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    assertEquals(viaSlave.readData(key), val);
+                    return null;
+                }});
+        }
+
+        // Check that stopping slave will not stop anything else
+        // (it used to stop master because wasn't supplying port!)
+        slaves.get(0).stop();
+        EntityTestUtils.assertAttributeEqualsEventually(slaves.get(0), Startable.SERVICE_UP, false);
+        
+        assertEquals(master.getAttribute(Startable.SERVICE_UP), Boolean.TRUE);
+        for (RedisSlave slave : slaves.subList(1, slaves.size())) {
+            assertEquals(slave.getAttribute(Startable.SERVICE_UP), Boolean.TRUE);
+        }
+        
+        // Check that stopping cluster will stop everything
+        cluster.stop();
+
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, Startable.SERVICE_UP, false);
+        assertEquals(master.getAttribute(Startable.SERVICE_UP), Boolean.FALSE);
+        for (RedisSlave slave : slaves) {
+            assertEquals(slave.getAttribute(Startable.SERVICE_UP), Boolean.FALSE);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java
new file mode 100644
index 0000000..3d0b421
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import javax.annotation.Nullable;
+
+import org.apache.brooklyn.entity.nosql.redis.RedisStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+
+public class RedisEc2LiveTest extends AbstractEc2LiveTest {
+
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(RedisEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        RedisStore redis = app.createAndManageChild(EntitySpec.create(RedisStore.class));
+        app.start(ImmutableList.of(loc));
+        EntityTestUtils.assertAttributeEqualsEventually(redis, RedisStore.SERVICE_UP, true);
+
+        JedisSupport support = new JedisSupport(redis);
+        support.redisTest();
+        // Confirm sensors are valid
+        EntityTestUtils.assertPredicateEventuallyTrue(redis, new Predicate<RedisStore>() {
+            @Override public boolean apply(@Nullable RedisStore input) {
+                return input != null &&
+                        input.getAttribute(RedisStore.UPTIME) > 0 &&
+                        input.getAttribute(RedisStore.TOTAL_COMMANDS_PROCESSED) >= 0 &&
+                        input.getAttribute(RedisStore.TOTAL_CONNECTIONS_RECEIVED) >= 0 &&
+                        input.getAttribute(RedisStore.EXPIRED_KEYS) >= 0 &&
+                        input.getAttribute(RedisStore.EVICTED_KEYS) >= 0 &&
+                        input.getAttribute(RedisStore.KEYSPACE_HITS) >= 0 &&
+                        input.getAttribute(RedisStore.KEYSPACE_MISSES) >= 0;
+            }
+        });
+
+    }
+
+    @Test(enabled=false)
+    public void testDummy() {} // Convince testng IDE integration that this really does have test methods  
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisIntegrationTest.java
new file mode 100644
index 0000000..8af953f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/redis/RedisIntegrationTest.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import javax.annotation.Nullable;
+
+import org.apache.brooklyn.entity.nosql.redis.RedisStore;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.location.basic.PortRanges;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+/**
+ * Test the operation of the {@link RedisStore} class.
+ */
+public class RedisIntegrationTest {
+
+    private TestApplication app;
+    private Location loc;
+    private RedisStore redis;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setup() {
+        app = TestApplication.Factory.newManagedInstanceForTests();
+        loc = app.newLocalhostProvisioningLocation();
+    }
+
+    @AfterMethod(alwaysRun=true)
+    public void shutdown() {
+        if (app != null) Entities.destroyAll(app.getManagementContext());
+    }
+
+    /**
+     * Test that the server starts up and sets SERVICE_UP correctly.
+     */
+    @Test(groups = { "Integration" })
+    public void canStartupAndShutdown() throws Exception {
+        redis = app.createAndManageChild(EntitySpec.create(RedisStore.class));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, true);
+
+        redis.stop();
+
+        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, false);
+    }
+
+    /**
+     * Test that a client can connect to the service.
+     */
+    @Test(groups = { "Integration" })
+    public void testRedisConnection() throws Exception {
+        redis = app.createAndManageChild(EntitySpec.create(RedisStore.class));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, true);
+
+        JedisSupport support = new JedisSupport(redis);
+        support.redisTest();
+    }
+
+    /**
+     * Test we get sensors from an instance on a non-default port
+     */
+    @Test(groups = { "Integration" })
+    public void testNonStandardPort() throws Exception {
+        redis = app.createAndManageChild(EntitySpec.create(RedisStore.class)
+                .configure(RedisStore.REDIS_PORT, PortRanges.fromString("10000+")));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, true);
+        JedisSupport support = new JedisSupport(redis);
+        support.redisTest();
+
+        // Increase timeout because test was failing on jenkins sometimes. The log shows only one 
+        // call to `info server` (for obtaining uptime) which took 26 seconds; then 4 seconds later 
+        // this assert failed (with it checking every 500ms). The response did correctly contain
+        // `uptime_in_seconds:27`.
+        EntityTestUtils.assertPredicateEventuallyTrue(ImmutableMap.of("timeout", Duration.FIVE_MINUTES), redis, new Predicate<RedisStore>() {
+            @Override public boolean apply(@Nullable RedisStore input) {
+                return input != null &&
+                        input.getAttribute(RedisStore.UPTIME) > 0 &&
+                        input.getAttribute(RedisStore.TOTAL_COMMANDS_PROCESSED) >= 0 &&
+                        input.getAttribute(RedisStore.TOTAL_CONNECTIONS_RECEIVED) >= 0 &&
+                        input.getAttribute(RedisStore.EXPIRED_KEYS) >= 0 &&
+                        input.getAttribute(RedisStore.EVICTED_KEYS) >= 0 &&
+                        input.getAttribute(RedisStore.KEYSPACE_HITS) >= 0 &&
+                        input.getAttribute(RedisStore.KEYSPACE_MISSES) >= 0;
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java
new file mode 100644
index 0000000..883299f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import org.apache.brooklyn.entity.nosql.riak.RiakCluster;
+import org.apache.brooklyn.entity.nosql.riak.RiakNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+
+public class RiakClusterEc2LiveTest extends AbstractEc2LiveTest {
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(RiakNodeEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        RiakCluster cluster = app.createAndManageChild(EntitySpec.create(RiakCluster.class)
+                .configure(RiakCluster.INITIAL_SIZE, 3)
+                .configure(RiakCluster.MEMBER_SPEC, EntitySpec.create(RiakNode.class)));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, RiakNode.SERVICE_UP, true);
+
+        RiakNode first = (RiakNode) Iterables.get(cluster.getMembers(), 0);
+        RiakNode second = (RiakNode) Iterables.get(cluster.getMembers(), 1);
+
+        assertNodesUpAndInCluster(first, second);
+        
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, Attributes.SERVICE_UP, true);
+    }
+    
+    private void assertNodesUpAndInCluster(final RiakNode... nodes) {
+        for (final RiakNode node : nodes) {
+            EntityTestUtils.assertAttributeEqualsEventually(node, RiakNode.SERVICE_UP, true);
+            EntityTestUtils.assertAttributeEqualsEventually(node, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true);
+        }
+    }
+
+    @Test(enabled = false)
+    public void testDummy() {
+    } // Convince TestNG IDE integration that this really does have test methods
+
+
+    @Override
+    public void test_Ubuntu_12_0() throws Exception {
+        //Override to add the custom securityGroup for opening Riak ports.
+        // Image: {id=us-east-1/ami-d0f89fb9, providerId=ami-d0f89fb9, name=ubuntu/images/ebs/ubuntu-precise-12.04-amd64-server-20130411.1, location={scope=REGION, id=us-east-1, description=us-east-1, parent=aws-ec2, iso3166Codes=[US-VA]}, os={family=ubuntu, arch=paravirtual, version=12.04, description=099720109477/ubuntu/images/ebs/ubuntu-precise-12.04-amd64-server-20130411.1, is64Bit=true}, description=099720109477/ubuntu/images/ebs/ubuntu-precise-12.04-amd64-server-20130411.1, version=20130411.1, status=AVAILABLE[available], loginUser=ubuntu, userMetadata={owner=099720109477, rootDeviceType=ebs, virtualizationType=paravirtual, hypervisor=xen}}
+        runTest(ImmutableMap.of("imageId", "us-east-1/ami-d0f89fb9", "loginUser", "ubuntu", "hardwareId", SMALL_HARDWARE_ID, "securityGroups", "RiakSecurityGroup"));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java
new file mode 100644
index 0000000..1d69103
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import org.apache.brooklyn.entity.nosql.riak.RiakNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import com.google.common.collect.ImmutableList;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+public class RiakNodeEc2LiveTest extends AbstractEc2LiveTest {
+
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(RiakNodeEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        RiakNode entity = app.createAndManageChild(EntitySpec.create(RiakNode.class));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, RiakNode.SERVICE_UP, true);
+
+    }
+
+    @Test(enabled = false)
+    public void testDummy() {
+    } // Convince TestNG IDE integration that this really does have test methods
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java
new file mode 100644
index 0000000..a5877c4
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import org.apache.brooklyn.entity.nosql.riak.RiakCluster;
+import org.apache.brooklyn.entity.nosql.riak.RiakNode;
+import org.testng.annotations.Test;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+
+import brooklyn.entity.AbstractGoogleComputeLiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+public class RiakNodeGoogleComputeLiveTest extends AbstractGoogleComputeLiveTest {
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        RiakCluster cluster = app.createAndManageChild(EntitySpec.create(RiakCluster.class)
+                .configure(RiakCluster.INITIAL_SIZE, 2)
+                .configure(RiakCluster.MEMBER_SPEC, EntitySpec.create(RiakNode.class)));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, RiakCluster.SERVICE_UP, true);
+
+        RiakNode first = (RiakNode) Iterables.get(cluster.getMembers(), 0);
+        RiakNode second = (RiakNode) Iterables.get(cluster.getMembers(), 1);
+
+        EntityTestUtils.assertAttributeEqualsEventually(first, RiakNode.SERVICE_UP, true);
+        EntityTestUtils.assertAttributeEqualsEventually(second, RiakNode.SERVICE_UP, true);
+
+        EntityTestUtils.assertAttributeEqualsEventually(first, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true);
+        EntityTestUtils.assertAttributeEqualsEventually(second, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true);
+
+    }
+
+    @Test(groups = {"Live"})
+    @Override
+    public void test_DefaultImage() throws Exception {
+        super.test_DefaultImage();
+    }
+
+    @Test(enabled = false)
+    public void testDummy() {
+    } // Convince testng IDE integration that this really does have test methods
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java
new file mode 100644
index 0000000..50c946f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import static org.testng.Assert.assertFalse;
+
+import org.apache.brooklyn.entity.nosql.riak.RiakNode;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import com.google.common.collect.ImmutableList;
+
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+
+public class RiakNodeIntegrationTest {
+
+    private TestApplication app;
+    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
+
+    @BeforeMethod(alwaysRun = true)
+    public void setUp() throws Exception {
+        localhostProvisioningLocation = new LocalhostMachineProvisioningLocation();
+        app = TestApplication.Factory.newManagedInstanceForTests();
+    }
+
+    @AfterMethod(alwaysRun = true)
+    public void tearDown() throws Exception {
+        if (app != null) Entities.destroyAll(app.getManagementContext());
+    }
+
+
+    @Test(groups = "Integration")
+    public void testCanStartAndStop() throws Exception {
+        RiakNode entity = app.createAndManageChild(EntitySpec.create(RiakNode.class)
+                .configure(RiakNode.SUGGESTED_VERSION, "2.1.1"));
+        app.start(ImmutableList.of(localhostProvisioningLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
+        entity.stop();
+        assertFalse(entity.getAttribute(Startable.SERVICE_UP));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java
new file mode 100644
index 0000000..069a920
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import org.apache.brooklyn.entity.nosql.riak.RiakNode;
+import org.testng.annotations.BeforeMethod;
+
+import com.google.common.collect.ImmutableList;
+
+import brooklyn.entity.AbstractSoftlayerLiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+public class RiakNodeSoftlayerLiveTest extends AbstractSoftlayerLiveTest {
+
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        super.setUp();
+    }
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        RiakNode entity = app.createAndManageChild(EntitySpec.create(RiakNode.class)
+                .configure(RiakNode.SUGGESTED_VERSION, "2.1.1"));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, RiakNode.SERVICE_UP, true);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java
new file mode 100644
index 0000000..381a78d
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import org.apache.brooklyn.entity.nosql.solr.SolrServer;
+import org.testng.annotations.BeforeMethod;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.location.Location;
+
+/**
+ * Solr test framework for integration and live tests.
+ */
+public class AbstractSolrServerTest extends BrooklynAppLiveTestSupport {
+
+    protected Location testLocation;
+    protected SolrServer solr;
+
+    @BeforeMethod(alwaysRun = true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        testLocation = app.newLocalhostProvisioningLocation();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrJSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrJSupport.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrJSupport.java
new file mode 100644
index 0000000..d192c05
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrJSupport.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.solr.SolrServer;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrInputDocument;
+
+import brooklyn.entity.basic.Attributes;
+
+/**
+ * Solr testing using SolrJ API.
+ */
+public class SolrJSupport {
+
+    private final HttpSolrServer server;
+    
+    public SolrJSupport(SolrServer node, String core) {
+        this(node.getAttribute(Attributes.HOSTNAME), node.getSolrPort(), core);
+    }
+    
+    public SolrJSupport(String hostname, int solrPort, String core) {
+        server = new HttpSolrServer(String.format("http://%s:%d/solr/%s", hostname, solrPort, core));
+        server.setMaxRetries(1);
+        server.setConnectionTimeout(5000);
+        server.setSoTimeout(5000);
+    }
+
+    public void commit() throws Exception {
+        server.commit();
+    }
+
+    public void addDocument(Map<String, Object> fields) throws Exception {
+        SolrInputDocument doc = new SolrInputDocument();
+        for (String field : fields.keySet()) {
+            doc.setField(field, fields.get(field));
+        }
+        server.add(doc, 100);
+    }
+
+    public Iterable<SolrDocument> getDocuments() throws Exception {
+        SolrQuery solrQuery = new SolrQuery();
+        solrQuery.setQuery("*:*");
+        
+        return server.query(solrQuery).getResults();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java
new file mode 100644
index 0000000..ef2f166
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import org.apache.brooklyn.entity.nosql.solr.SolrServer;
+import org.apache.solr.common.SolrDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+
+public class SolrServerEc2LiveTest extends AbstractEc2LiveTest {
+
+    private static final Logger log = LoggerFactory.getLogger(SolrServerEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        log.info("Testing Solr on {}", loc);
+
+        SolrServer solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
+                .configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
+
+        SolrJSupport client = new SolrJSupport(solr, "example");
+
+        Iterable<SolrDocument> results = client.getDocuments();
+        assertTrue(Iterables.isEmpty(results));
+
+        client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
+        client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
+        client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
+        client.commit();
+
+        results = client.getDocuments();
+        assertEquals(Iterables.size(results), 3);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java
new file mode 100644
index 0000000..23ca974
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import org.apache.brooklyn.entity.nosql.solr.SolrServer;
+import org.apache.solr.common.SolrDocument;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+
+/**
+ * Solr integration tests.
+ *
+ * Test the operation of the {@link SolrServer} class.
+ */
+public class SolrServerIntegrationTest extends AbstractSolrServerTest {
+
+    /**
+     * Test that a node starts and sets SERVICE_UP correctly.
+     */
+    @Test(groups = "Integration")
+    public void canStartupAndShutdown() {
+        solr = app.createAndManageChild(EntitySpec.create(SolrServer.class));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
+        Entities.dumpInfo(app);
+
+        solr.stop();
+
+        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, false);
+    }
+
+    /**
+     * Test that a core can be created and used with SolrJ client.
+     */
+    @Test(groups = "Integration")
+    public void testConnection() throws Exception {
+        solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
+                .configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
+
+        SolrJSupport client = new SolrJSupport(solr, "example");
+
+        Iterable<SolrDocument> results = client.getDocuments();
+        assertTrue(Iterables.isEmpty(results));
+
+        client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
+        client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
+        client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
+        client.commit();
+
+        results = client.getDocuments();
+        assertEquals(Iterables.size(results), 3);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerLiveTest.java
new file mode 100644
index 0000000..d9fc27f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/solr/SolrServerLiveTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.solr.SolrServer;
+import org.apache.solr.common.SolrDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.text.Strings;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+
+/**
+ * Solr live tests.
+ *
+ * Test the operation of the {@link SolrServer} class using the jclouds {@code rackspace-cloudservers-uk}
+ * and {@code aws-ec2} providers, with different OS images. The tests use the {@link SolrJSupport} class
+ * to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
+ * set, usually in the {@code .brooklyn/brooklyn.properties} file.
+ */
+public class SolrServerLiveTest extends AbstractSolrServerTest {
+
+    private static final Logger log = LoggerFactory.getLogger(SolrServerLiveTest.class);
+
+    @DataProvider(name = "virtualMachineData")
+    public Object[][] provideVirtualMachineData() {
+        return new Object[][] { // ImageId, Provider, Region, Description (for logging)
+            new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
+            new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" }, 
+            new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" }, 
+        };
+    }
+
+    @Test(groups = "Live", dataProvider = "virtualMachineData")
+    protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
+        log.info("Testing Solr on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
+
+        Map<String, String> properties = MutableMap.of("imageId", imageId);
+        testLocation = app.getManagementContext().getLocationRegistry()
+                .resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
+        solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
+                .configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
+
+        SolrJSupport client = new SolrJSupport(solr, "example");
+
+        Iterable<SolrDocument> results = client.getDocuments();
+        assertTrue(Iterables.isEmpty(results));
+
+        client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
+        client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
+        client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
+        client.commit();
+
+        results = client.getDocuments();
+        assertEquals(Iterables.size(results), 3);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/cli/src/main/resources/brooklyn/default.catalog.bom
----------------------------------------------------------------------
diff --git a/usage/cli/src/main/resources/brooklyn/default.catalog.bom b/usage/cli/src/main/resources/brooklyn/default.catalog.bom
index 9cc4afd..839662a 100644
--- a/usage/cli/src/main/resources/brooklyn/default.catalog.bom
+++ b/usage/cli/src/main/resources/brooklyn/default.catalog.bom
@@ -183,7 +183,7 @@ brooklyn.catalog:
                             $brooklyn:entity("riak-cluster").attributeWhenReady("main.uri"))
                             
       # use the off-the-shelf Riak cluster
-      - type:           brooklyn.entity.nosql.riak.RiakCluster
+      - type:           org.apache.brooklyn.entity.nosql.riak.RiakCluster
         id:             riak-cluster
         initialSize:    3
         # and add a policy to scale based on ops per minute

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/cassandra-blueprint.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/cassandra-blueprint.yaml b/usage/launcher/src/test/resources/cassandra-blueprint.yaml
index 3af6e7b..1b08802 100644
--- a/usage/launcher/src/test/resources/cassandra-blueprint.yaml
+++ b/usage/launcher/src/test/resources/cassandra-blueprint.yaml
@@ -18,7 +18,7 @@
 #
 name: cassandra-cluster-app
 services:
-- type: brooklyn.entity.nosql.cassandra.CassandraCluster
+- type: org.apache.brooklyn.entity.nosql.cassandra.CassandraCluster
   name: Cassandra Cluster
   brooklyn.config:
     cluster.initial.size: 5

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/couchbase-cluster-singleNode.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/couchbase-cluster-singleNode.yaml b/usage/launcher/src/test/resources/couchbase-cluster-singleNode.yaml
index f40c779..f1f426c 100644
--- a/usage/launcher/src/test/resources/couchbase-cluster-singleNode.yaml
+++ b/usage/launcher/src/test/resources/couchbase-cluster-singleNode.yaml
@@ -21,7 +21,7 @@ name: Couchbase One Bucket
 location: softlayer:wdc01
 
 services:
-- type: brooklyn.entity.nosql.couchbase.CouchbaseCluster
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster
   initialSize: 1
   intialQuorumSize: 1
   adminUsername: Administrator

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/couchbase-cluster.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/couchbase-cluster.yaml b/usage/launcher/src/test/resources/couchbase-cluster.yaml
index b85884b..48fbc90 100644
--- a/usage/launcher/src/test/resources/couchbase-cluster.yaml
+++ b/usage/launcher/src/test/resources/couchbase-cluster.yaml
@@ -21,7 +21,7 @@ name: Couchbase One Bucket
 location: softlayer:wdc01
 
 services:
-- type: brooklyn.entity.nosql.couchbase.CouchbaseCluster
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster
   initialSize: 3
   intialQuorumSize: 2
   adminUsername: Administrator

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/usage/launcher/src/test/resources/couchbase-node.yaml
----------------------------------------------------------------------
diff --git a/usage/launcher/src/test/resources/couchbase-node.yaml b/usage/launcher/src/test/resources/couchbase-node.yaml
index 63a6348..01db0a8 100644
--- a/usage/launcher/src/test/resources/couchbase-node.yaml
+++ b/usage/launcher/src/test/resources/couchbase-node.yaml
@@ -21,6 +21,6 @@ name: Couchbase Node
 location: softlayer:wdc01
 
 services:
-- type: brooklyn.entity.nosql.couchbase.CouchbaseNode
+- type: org.apache.brooklyn.entity.nosql.couchbase.CouchbaseNode
   adminUsername: Administrator
   adminPassword: Password



[19/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java
deleted file mode 100644
index 83c81bf..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.nosql.mongodb.AbstractMongoDBSshDriver;
-import brooklyn.entity.nosql.mongodb.MongoDBDriver;
-import brooklyn.location.basic.SshMachineLocation;
-
-public class MongoDBConfigServerSshDriver extends AbstractMongoDBSshDriver implements MongoDBDriver {
-    
-    public MongoDBConfigServerSshDriver(MongoDBConfigServerImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-    
-    @Override
-    public MongoDBConfigServerImpl getEntity() {
-        return MongoDBConfigServerImpl.class.cast(super.getEntity());
-    }
-
-    @Override
-    public void launch() {
-        launch(getArgsBuilderWithDefaults(getEntity())
-                .add("--configsvr")
-                .add("--dbpath", getDataDirectory()));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java
deleted file mode 100644
index 00f35c6..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.time.Duration;
-
-import com.google.common.reflect.TypeToken;
-
-@Catalog(name="MongoDB Router",
-        description="MongoDB (from \"humongous\") is a scalable, high-performance, open source NoSQL database",
-        iconUrl="classpath:///mongodb-logo.png")
-@ImplementedBy(MongoDBRouterImpl.class)
-public interface MongoDBRouter extends AbstractMongoDBServer {
-
-    @SuppressWarnings("serial")
-    ConfigKey<Iterable<String>> CONFIG_SERVERS = ConfigKeys.newConfigKey(
-            new TypeToken<Iterable<String>>(){}, "mongodb.router.config.servers", "List of host names and ports of the config servers");
-    
-    AttributeSensor<Integer> SHARD_COUNT = Sensors.newIntegerSensor("mongodb.router.config.shard.count", "Number of shards that have been added");
-    
-    AttributeSensor<Boolean> RUNNING = Sensors.newBooleanSensor("mongodb.router.running", "Indicates that the router is running, "
-            + "and can be used to add shards, but is not necessarity available for CRUD operations (e.g. if no shards have been added)");
-
-    /**
-     * @throws IllegalStateException if times out.
-     */
-    public void waitForServiceUp(Duration duration);
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java
deleted file mode 100644
index 773af6b..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.util.Collection;
-
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-
-@ImplementedBy(MongoDBRouterClusterImpl.class)
-public interface MongoDBRouterCluster extends DynamicCluster {
-
-    AttributeSensor<MongoDBRouter> ANY_ROUTER = Sensors.newSensor(MongoDBRouter.class, "mongodb.routercluster.any", 
-            "When set, can be used to access one of the routers in the cluster (usually the first). This will only be set once "
-            + "at least one shard has been added, and the router is available for CRUD operations");
-    
-    AttributeSensor<MongoDBRouter> ANY_RUNNING_ROUTER = Sensors.newSensor(MongoDBRouter.class, "mongodb.routercluster.any.running", 
-            "When set, can be used to access one of the running routers in the cluster (usually the first). This should only be used " 
-            + "to add shards as it does not guarantee that the router is available for CRUD operations");
-
-    /**
-     * @return One of the routers in the cluster if available, null otherwise
-     */
-    MongoDBRouter getAnyRouter();
-    
-    /**
-     * @return One of the running routers in the cluster. This should only be used to add shards as it does not guarantee that 
-     * the router is available for CRUD operations
-     */
-    MongoDBRouter getAnyRunningRouter();
-    
-    /**
-     * @return All routers in the cluster
-     */
-    Collection<MongoDBRouter> getRouters();
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java
deleted file mode 100644
index 78c6c11..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.util.Collection;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.EntityPredicates;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.location.Location;
-import brooklyn.policy.PolicySpec;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-public class MongoDBRouterClusterImpl extends DynamicClusterImpl implements MongoDBRouterCluster {
-
-    @Override
-    public void init() {
-        super.init();
-        subscribeToChildren(this, MongoDBRouter.RUNNING, new SensorEventListener<Boolean>() {
-            @Override public void onEvent(SensorEvent<Boolean> event) {
-                setAnyRouter();
-            }
-        });
-    }
-    
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        super.start(locations);
-        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
-                .displayName("Router cluster membership tracker")
-                .configure("group", this));
-    }
-    
-    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override protected void onEntityEvent(EventType type, Entity member) {
-            ((MongoDBRouterClusterImpl)super.entity).setAnyRouter();
-        }
-        @Override protected void onEntityRemoved(Entity member) {
-            ((MongoDBRouterClusterImpl)super.entity).setAnyRouter();
-        }
-        @Override protected void onEntityChange(Entity member) {
-            ((MongoDBRouterClusterImpl)super.entity).setAnyRouter();
-        }
-    }
-    
-    protected void setAnyRouter() {
-        setAttribute(MongoDBRouterCluster.ANY_ROUTER, Iterables.tryFind(getRouters(), 
-                EntityPredicates.attributeEqualTo(Startable.SERVICE_UP, true)).orNull());
-
-        setAttribute(
-                MongoDBRouterCluster.ANY_RUNNING_ROUTER, 
-                Iterables.tryFind(getRouters(), EntityPredicates.attributeEqualTo(MongoDBRouter.RUNNING, true))
-                .orNull());
-    }
-    
-    @Override
-    public Collection<MongoDBRouter> getRouters() {
-        return ImmutableList.copyOf(Iterables.filter(getMembers(), MongoDBRouter.class));
-    }
-    
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        if (super.getMemberSpec() != null)
-            return super.getMemberSpec();
-        return EntitySpec.create(MongoDBRouter.class);
-    }
-
-    @Override
-    public MongoDBRouter getAnyRouter() {
-        return getAttribute(MongoDBRouterCluster.ANY_ROUTER);
-    }
-    
-    @Override
-    public MongoDBRouter getAnyRunningRouter() {
-        return getAttribute(MongoDBRouterCluster.ANY_RUNNING_ROUTER);
-    }
- 
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java
deleted file mode 100644
index 693750d..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface MongoDBRouterDriver extends SoftwareProcessDriver {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java
deleted file mode 100644
index 19398a8..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.entity.nosql.mongodb.MongoDBClientSupport;
-import brooklyn.event.feed.function.FunctionFeed;
-import brooklyn.event.feed.function.FunctionPollConfig;
-
-import com.google.common.base.Functions;
-
-public class MongoDBRouterImpl extends SoftwareProcessImpl implements MongoDBRouter {
-    
-    private volatile FunctionFeed functionFeed;
-
-    @Override
-    public Class<?> getDriverInterface() {
-        return MongoDBRouterDriver.class;
-    }
-
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        functionFeed = FunctionFeed.builder()
-                .entity(this)
-                .poll(new FunctionPollConfig<Boolean, Boolean>(RUNNING)
-                        .period(5, TimeUnit.SECONDS)
-                        .callable(new Callable<Boolean>() {
-                            @Override
-                            public Boolean call() throws Exception {
-                                MongoDBClientSupport clientSupport = MongoDBClientSupport.forServer(MongoDBRouterImpl.this);
-                                return clientSupport.ping();
-                            }
-                        })
-                        .onException(Functions.<Boolean>constant(false)))
-                .poll(new FunctionPollConfig<Boolean, Boolean>(SERVICE_UP)
-                        .period(5, TimeUnit.SECONDS)
-                        .callable(new Callable<Boolean>() {
-                            @Override
-                            public Boolean call() throws Exception {
-                                // TODO: This is the same as in AbstractMongoDBSshDriver.isRunning. 
-                                // This feels like the right place. But feels like can be more consistent with different 
-                                // MongoDB types using the FunctionFeed.
-                                MongoDBClientSupport clientSupport = MongoDBClientSupport.forServer(MongoDBRouterImpl.this);
-                                return clientSupport.ping() && MongoDBRouterImpl.this.getAttribute(SHARD_COUNT) > 0;
-                            }
-                        })
-                        .onException(Functions.<Boolean>constant(false)))
-                .poll(new FunctionPollConfig<Integer, Integer>(SHARD_COUNT)
-                        .period(5, TimeUnit.SECONDS)
-                        .callable(new Callable<Integer>() {
-                            public Integer call() throws Exception {
-                                MongoDBClientSupport clientSupport = MongoDBClientSupport.forServer(MongoDBRouterImpl.this);
-                                return (int) clientSupport.getShardCount();
-                            }    
-                        })
-                        .onException(Functions.<Integer>constant(-1)))
-                .build();
-    }
-
-    @Override
-    protected void disconnectSensors() {
-        super.disconnectSensors();
-        if (functionFeed != null) functionFeed.stop();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java
deleted file mode 100644
index 0f58a68..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.nosql.mongodb.AbstractMongoDBSshDriver;
-import brooklyn.location.basic.SshMachineLocation;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-
-public class MongoDBRouterSshDriver extends AbstractMongoDBSshDriver implements MongoDBRouterDriver {
-    
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBRouterSshDriver.class);
-
-    public MongoDBRouterSshDriver(MongoDBRouterImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-    
-    @Override
-    public void launch() {
-        String configdb = Joiner.on(",").join(getEntity().getConfig(MongoDBRouter.CONFIG_SERVERS));
-        ImmutableList.Builder<String> argsBuilder = getArgsBuilderWithDefaults(MongoDBRouterImpl.class.cast(getEntity()))
-                .add("--configdb", configdb);
-        
-        String args = Joiner.on(" ").join(argsBuilder.build());
-        String command = String.format("%s/bin/mongos %s > out.log 2> err.log < /dev/null", getExpandedInstallDir(), args);
-        LOG.info(command);
-        newScript(LAUNCHING)
-                .updateTaskAndFailOnNonZeroResultCode()
-                .body.append(command).execute();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java
deleted file mode 100644
index 8d131a7..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-
-@ImplementedBy(MongoDBShardClusterImpl.class)
-public interface MongoDBShardCluster extends DynamicCluster {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java
deleted file mode 100644
index 281f1c1..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import java.net.UnknownHostException;
-import java.util.Collection;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.nosql.mongodb.MongoDBClientSupport;
-import brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.location.Location;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.text.Strings;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Sets;
-
-public class MongoDBShardClusterImpl extends DynamicClusterImpl implements MongoDBShardCluster {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBShardClusterImpl.class);
-    
-    // TODO: Need to use attributes for this in order to support brooklyn restart 
-    private Set<Entity> addedMembers = Sets.newConcurrentHashSet();
-
-    // TODO: Need to use attributes for this in order to support brooklyn restart 
-    private Set<Entity> addingMembers = Sets.newConcurrentHashSet();
-
-    /**
-     * For shard addition and removal.
-     * Used for retrying.
-     * 
-     * TODO Should use ExecutionManager.
-     */
-    private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
-
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        EntitySpec<?> result = super.getMemberSpec();
-        if (result == null)
-            result = EntitySpec.create(MongoDBReplicaSet.class);
-        result.configure(DynamicClusterImpl.INITIAL_SIZE, getConfig(MongoDBShardedDeployment.SHARD_REPLICASET_SIZE));
-        return result;
-    }
-
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        subscribeToMembers(this, Startable.SERVICE_UP, new SensorEventListener<Boolean>() {
-            public void onEvent(SensorEvent<Boolean> event) {
-                addShards();
-            }
-        });
-
-        super.start(locations);
-        
-        MongoDBRouterCluster routers = getParent().getAttribute(MongoDBShardedDeployment.ROUTER_CLUSTER);
-        subscribe(routers, MongoDBRouterCluster.ANY_RUNNING_ROUTER, new SensorEventListener<MongoDBRouter>() {
-            public void onEvent(SensorEvent<MongoDBRouter> event) {
-                if (event.getValue() != null)
-                    addShards();
-            }
-        });
-    }
-
-    @Override
-    public void stop() {
-        // TODO Note that after this the executor will not run if the set is restarted.
-        executor.shutdownNow();
-        super.stop();
-    }
-    
-    @Override
-    public void onManagementStopped() {
-        super.onManagementStopped();
-        executor.shutdownNow();
-    }
-
-    protected void addShards() {
-        MongoDBRouter router = getParent().getAttribute(MongoDBShardedDeployment.ROUTER_CLUSTER).getAttribute(MongoDBRouterCluster.ANY_RUNNING_ROUTER);
-        if (router == null) {
-            if (LOG.isTraceEnabled()) LOG.trace("Not adding shards because no running router in {}", this);
-            return;
-        }
-        
-        for (Entity member : this.getMembers()) {
-            if (member.getAttribute(Startable.SERVICE_UP) && !addingMembers.contains(member)) {
-                LOG.info("{} adding shard {}", new Object[] {MongoDBShardClusterImpl.this, member});
-                addingMembers.add(member);
-                addShardAsync(member);
-            }
-        }
-    }
-    
-    protected void addShardAsync(final Entity replicaSet) {
-        final Duration timeout = Duration.minutes(20);
-        final Stopwatch stopwatch = Stopwatch.createStarted();
-        final AtomicInteger attempts = new AtomicInteger();
-        
-        // TODO Don't use executor, use ExecutionManager; but following pattern in MongoDBReplicaSetImpl for now.
-        executor.submit(new Runnable() {
-            @Override
-            public void run() {
-                boolean reschedule;
-                MongoDBRouter router = getParent().getAttribute(MongoDBShardedDeployment.ROUTER_CLUSTER).getAttribute(MongoDBRouterCluster.ANY_RUNNING_ROUTER);
-                if (router == null) {
-                    LOG.debug("Rescheduling adding shard {} because no running router for cluster {}", replicaSet, this);
-                    reschedule = true;
-                } else {
-                    MongoDBClientSupport client;
-                    try {
-                        client = MongoDBClientSupport.forServer(router);
-                    } catch (UnknownHostException e) {
-                        throw Exceptions.propagate(e);
-                    }
-                    
-                    MongoDBServer primary = replicaSet.getAttribute(MongoDBReplicaSet.PRIMARY_ENTITY);
-                    if (primary != null) {
-                        String addr = String.format("%s:%d", primary.getAttribute(MongoDBServer.SUBNET_HOSTNAME), primary.getAttribute(MongoDBServer.PORT));
-                        String replicaSetURL = ((MongoDBReplicaSet) replicaSet).getName() + "/" + addr;
-                        boolean added = client.addShardToRouter(replicaSetURL);
-                        if (added) {
-                            LOG.info("{} added shard {} via {}", new Object[] {MongoDBShardClusterImpl.this, replicaSetURL, router});
-                            addedMembers.add(replicaSet);
-                            reschedule = false;
-                        } else {
-                            LOG.debug("Rescheduling addition of shard {} because add failed via router {}", replicaSetURL, router);
-                            reschedule = true;
-                        }
-                    } else {
-                        LOG.debug("Rescheduling addition of shard {} because primary is null", replicaSet);
-                        reschedule = true;
-                    }
-                }
-                
-                if (reschedule) {
-                    int numAttempts = attempts.incrementAndGet();
-                    if (numAttempts > 1 && timeout.toMilliseconds() > stopwatch.elapsed(TimeUnit.MILLISECONDS)) {
-                        executor.schedule(this, 3, TimeUnit.SECONDS);
-                    } else {
-                        LOG.warn("Timeout after {} attempts ({}) adding shard {}; aborting", 
-                                new Object[] {numAttempts, Time.makeTimeStringRounded(stopwatch), replicaSet});
-                        addingMembers.remove(replicaSet);
-                    }
-                }
-            }
-        });
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java
deleted file mode 100644
index c214572..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.Group;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.time.Duration;
-
-import com.google.common.reflect.TypeToken;
-
-@Catalog(name="MongoDB Sharded Deployment",
-        description="MongoDB (from \"humongous\") is a scalable, high-performance, open source NoSQL database",
-        iconUrl="classpath:///mongodb-logo.png")
-@ImplementedBy(MongoDBShardedDeploymentImpl.class)
-public interface MongoDBShardedDeployment extends Entity, Startable {
-    @SetFromFlag("configClusterSize")
-    ConfigKey<Integer> CONFIG_CLUSTER_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.config.cluster.size", 
-            "Number of config servers", 3);
-    
-    @SetFromFlag("initialRouterClusterSize")
-    ConfigKey<Integer> INITIAL_ROUTER_CLUSTER_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.router.cluster.initial.size", 
-            "Initial number of routers (mongos)", 0);
-    
-    @SetFromFlag("initialShardClusterSize")
-    ConfigKey<Integer> INITIAL_SHARD_CLUSTER_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.shard.cluster.initial.size", 
-            "Initial number of shards (replicasets)", 2);
-    
-    @SetFromFlag("shardReplicaSetSize")
-    ConfigKey<Integer> SHARD_REPLICASET_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.shard.replicaset.size", 
-            "Number of servers (mongod) in each shard (replicaset)", 3);
-    
-    @SetFromFlag("routerUpTimeout")
-    ConfigKey<Duration> ROUTER_UP_TIMEOUT = ConfigKeys.newConfigKey(Duration.class, "mongodb.router.up.timeout", 
-            "Maximum time to wait for the routers to become available before adding the shards", Duration.FIVE_MINUTES);
-    
-    @SetFromFlag("coLocatedRouterGroup")
-    ConfigKey<Group> CO_LOCATED_ROUTER_GROUP = ConfigKeys.newConfigKey(Group.class, "mongodb.colocated.router.group", 
-            "Group to be monitored for the addition of new CoLocatedMongoDBRouter entities");
-    
-    @SuppressWarnings("serial")
-    ConfigKey<EntitySpec<?>> MONGODB_ROUTER_SPEC = ConfigKeys.newConfigKey(
-            new TypeToken<EntitySpec<?>>() {},
-            "mongodb.router.spec", 
-            "Spec for Router instances",
-            EntitySpec.create(MongoDBRouter.class));
-
-    @SuppressWarnings("serial")
-    ConfigKey<EntitySpec<?>> MONGODB_REPLICA_SET_SPEC = ConfigKeys.newConfigKey(
-            new TypeToken<EntitySpec<?>>() {},
-            "mongodb.replicaset.spec", 
-            "Spec for Replica Set",
-            EntitySpec.create(MongoDBReplicaSet.class)
-                    .configure(MongoDBReplicaSet.MEMBER_SPEC, EntitySpec.create(MongoDBServer.class)));
-
-    @SuppressWarnings("serial")
-    ConfigKey<EntitySpec<?>> MONGODB_CONFIG_SERVER_SPEC = ConfigKeys.newConfigKey(
-            new TypeToken<EntitySpec<?>>() {},
-            "mongodb.configserver.spec", 
-            "Spec for Config Server instances",
-            EntitySpec.create(MongoDBConfigServer.class));
-
-    public static AttributeSensor<MongoDBConfigServerCluster> CONFIG_SERVER_CLUSTER = Sensors.newSensor(
-            MongoDBConfigServerCluster.class, "mongodbshardeddeployment.configservers", "Config servers");
-    public static AttributeSensor<MongoDBRouterCluster> ROUTER_CLUSTER = Sensors.newSensor(
-            MongoDBRouterCluster.class, "mongodbshardeddeployment.routers", "Routers");
-    
-    public static AttributeSensor<MongoDBShardCluster> SHARD_CLUSTER = Sensors.newSensor(
-            MongoDBShardCluster.class, "mongodbshardeddeployment.shards", "Shards");
-    
-    public MongoDBConfigServerCluster getConfigCluster();
-    
-    public MongoDBRouterCluster getRouterCluster();
-    
-    public MongoDBShardCluster getShardCluster();
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java
deleted file mode 100644
index d4d04de..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import static brooklyn.event.basic.DependentConfiguration.attributeWhenReady;
-
-import java.util.Collection;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.Entity;
-import brooklyn.entity.Group;
-import brooklyn.entity.basic.AbstractEntity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.basic.ServiceStateLogic.ServiceNotUpLogic;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.policy.PolicySpec;
-import brooklyn.util.exceptions.Exceptions;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-public class MongoDBShardedDeploymentImpl extends AbstractEntity implements MongoDBShardedDeployment {
-    
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBShardedDeploymentImpl.class);
-    
-    @Override
-    public void init() {
-        super.init();
-        
-        setAttribute(CONFIG_SERVER_CLUSTER, addChild(EntitySpec.create(MongoDBConfigServerCluster.class)
-                .configure(MongoDBConfigServerCluster.MEMBER_SPEC, getConfig(MONGODB_CONFIG_SERVER_SPEC))
-                .configure(DynamicCluster.INITIAL_SIZE, getConfig(CONFIG_CLUSTER_SIZE))));
-        setAttribute(ROUTER_CLUSTER, addChild(EntitySpec.create(MongoDBRouterCluster.class)
-                .configure(MongoDBRouterCluster.MEMBER_SPEC, getConfig(MONGODB_ROUTER_SPEC))
-                .configure(DynamicCluster.INITIAL_SIZE, getConfig(INITIAL_ROUTER_CLUSTER_SIZE))
-                .configure(MongoDBRouter.CONFIG_SERVERS, attributeWhenReady(getAttribute(CONFIG_SERVER_CLUSTER), MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES))));
-        setAttribute(SHARD_CLUSTER, addChild(EntitySpec.create(MongoDBShardCluster.class)
-                .configure(MongoDBShardCluster.MEMBER_SPEC, getConfig(MONGODB_REPLICA_SET_SPEC))
-                .configure(DynamicCluster.INITIAL_SIZE, getConfig(INITIAL_SHARD_CLUSTER_SIZE))));
-        addEnricher(Enrichers.builder()
-                .propagating(MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES)
-                .from(getAttribute(CONFIG_SERVER_CLUSTER))
-                .build());
-        
-        ServiceNotUpLogic.updateNotUpIndicator(this, Attributes.SERVICE_STATE_ACTUAL, "stopped");
-    }
-
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
-        try {
-            final MongoDBRouterCluster routers = getAttribute(ROUTER_CLUSTER);
-            final MongoDBShardCluster shards = getAttribute(SHARD_CLUSTER);
-            List<DynamicCluster> clusters = ImmutableList.of(getAttribute(CONFIG_SERVER_CLUSTER), routers, shards);
-            Entities.invokeEffectorList(this, clusters, Startable.START, ImmutableMap.of("locations", locations))
-                .get();
-
-            if (getConfigRaw(MongoDBShardedDeployment.CO_LOCATED_ROUTER_GROUP, true).isPresent()) {
-                addPolicy(PolicySpec.create(ColocatedRouterTrackingPolicy.class)
-                        .displayName("Co-located router tracker")
-                        .configure("group", (Group)getConfig(MongoDBShardedDeployment.CO_LOCATED_ROUTER_GROUP)));
-            }
-            ServiceNotUpLogic.clearNotUpIndicator(this, Attributes.SERVICE_STATE_ACTUAL);
-            ServiceStateLogic.setExpectedState(this, Lifecycle.RUNNING);
-        } catch (Exception e) {
-            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
-            // no need to log here; the effector invocation should do that
-            throw Exceptions.propagate(e);
-        }
-    }
-
-    public static class ColocatedRouterTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override
-        protected void onEntityAdded(Entity member) {
-            MongoDBRouterCluster cluster = entity.getAttribute(ROUTER_CLUSTER);
-            cluster.addMember(member.getAttribute(CoLocatedMongoDBRouter.ROUTER));
-        }
-        @Override
-        protected void onEntityRemoved(Entity member) {
-            MongoDBRouterCluster cluster = entity.getAttribute(ROUTER_CLUSTER);
-            cluster.removeMember(member.getAttribute(CoLocatedMongoDBRouter.ROUTER));
-        }
-    };
-
-    @Override
-    public void stop() {
-        ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPING);
-        try {
-            Entities.invokeEffectorList(this, ImmutableList.of(getAttribute(CONFIG_SERVER_CLUSTER), getAttribute(ROUTER_CLUSTER), 
-                    getAttribute(SHARD_CLUSTER)), Startable.STOP).get();
-        } catch (Exception e) {
-            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
-            throw Exceptions.propagate(e);
-        }
-        ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPED);
-        ServiceNotUpLogic.updateNotUpIndicator(this, Attributes.SERVICE_STATE_ACTUAL, "stopped");
-    }
-    
-    @Override
-    public void restart() {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public MongoDBConfigServerCluster getConfigCluster() {
-        return getAttribute(CONFIG_SERVER_CLUSTER);
-    }
-
-    @Override
-    public MongoDBRouterCluster getRouterCluster() {
-        return getAttribute(ROUTER_CLUSTER);
-    }
-
-    @Override
-    public MongoDBShardCluster getShardCluster() {
-        return getAttribute(SHARD_CLUSTER);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisCluster.java
deleted file mode 100644
index b1cf51a..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisCluster.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.entity.Entity;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.entity.trait.Startable;
-
-/**
- * A cluster of {@link RedisStore}s with one master and a group of slaves.
- *
- * The slaves are contained in a {@link DynamicCluster} which can be resized by a policy if required.
- *
- * TODO add sensors with aggregated Redis statistics from cluster
- */
-@Catalog(name="Redis Cluster", description="Redis is an open-source, networked, in-memory, key-value data store with optional durability", iconUrl="classpath:///redis-logo.png")
-@ImplementedBy(RedisClusterImpl.class)
-public interface RedisCluster extends Entity, Startable {
-    
-    public RedisStore getMaster();
-    
-    public DynamicCluster getSlaves();
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisClusterImpl.java
deleted file mode 100644
index 89ca43c..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisClusterImpl.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import java.util.Collection;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.basic.AbstractEntity;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.basic.ServiceStateLogic.ComputeServiceIndicatorsFromChildrenAndMembers;
-import brooklyn.entity.basic.ServiceStateLogic.ServiceProblemsLogic;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.location.Location;
-import brooklyn.util.collections.QuorumCheck.QuorumChecks;
-import brooklyn.util.exceptions.Exceptions;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-public class RedisClusterImpl extends AbstractEntity implements RedisCluster {
-
-    private static AttributeSensor<RedisStore> MASTER = Sensors.newSensor(RedisStore.class, "redis.master");
-    private static AttributeSensor<DynamicCluster> SLAVES = Sensors.newSensor(DynamicCluster.class, "redis.slaves");
-
-    public RedisClusterImpl() {
-    }
-
-    @Override
-    public RedisStore getMaster() {
-        return getAttribute(MASTER);
-    }
-    
-    @Override
-    public DynamicCluster getSlaves() {
-        return getAttribute(SLAVES);
-    }
-
-    @Override
-    public void init() {
-        super.init();
-
-        RedisStore master = addChild(EntitySpec.create(RedisStore.class));
-        setAttribute(MASTER, master);
-
-        DynamicCluster slaves = addChild(EntitySpec.create(DynamicCluster.class)
-                .configure(DynamicCluster.MEMBER_SPEC, EntitySpec.create(RedisSlave.class).configure(RedisSlave.MASTER, master)));
-        setAttribute(SLAVES, slaves);
-
-        addEnricher(Enrichers.builder()
-                .propagating(RedisStore.HOSTNAME, RedisStore.ADDRESS, RedisStore.SUBNET_HOSTNAME, RedisStore.SUBNET_ADDRESS, RedisStore.REDIS_PORT)
-                .from(master)
-                .build());
-    }
-
-    @Override
-    protected void initEnrichers() {
-        super.initEnrichers();
-        ServiceStateLogic.newEnricherFromChildrenUp().
-            checkChildrenOnly().
-            requireUpChildren(QuorumChecks.all()).
-            configure(ComputeServiceIndicatorsFromChildrenAndMembers.IGNORE_ENTITIES_WITH_THESE_SERVICE_STATES, ImmutableSet.<Lifecycle>of()).
-            addTo(this);
-    }
-    
-    @Override
-    public void start(Collection<? extends Location> locations) {
-        ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
-        ServiceProblemsLogic.clearProblemsIndicator(this, START);
-        try {
-            doStart(locations);
-            ServiceStateLogic.setExpectedState(this, Lifecycle.RUNNING);
-        } catch (Exception e) {
-            ServiceProblemsLogic.updateProblemsIndicator(this, START, "Start failed with error: "+e);
-            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
-            throw Exceptions.propagate(e);
-        }
-    }
-
-    private void doStart(Collection<? extends Location> locations) {
-        RedisStore master = getMaster();
-        master.invoke(RedisStore.START, ImmutableMap.<String, Object>of("locations", ImmutableList.copyOf(locations))).getUnchecked();
-
-        DynamicCluster slaves = getSlaves();
-        slaves.invoke(DynamicCluster.START, ImmutableMap.<String, Object>of("locations", ImmutableList.copyOf(locations))).getUnchecked();
-    }
-
-    @Override
-    public void stop() {
-        ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPING);
-        try {
-            doStop();
-            ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPED);
-        } catch (Exception e) {
-            ServiceProblemsLogic.updateProblemsIndicator(this, STOP, "Stop failed with error: "+e);
-            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
-            throw Exceptions.propagate(e);
-        }
-    }
-
-    private void doStop() {
-        getSlaves().invoke(DynamicCluster.STOP, ImmutableMap.<String, Object>of()).getUnchecked();
-        getMaster().invoke(RedisStore.STOP, ImmutableMap.<String, Object>of()).getUnchecked();
-    }
-
-    @Override
-    public void restart() {
-        throw new UnsupportedOperationException();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShard.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShard.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShard.java
deleted file mode 100644
index 38ac482..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShard.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.proxying.ImplementedBy;
-
-@ImplementedBy(RedisShardImpl.class)
-public interface RedisShard extends Entity {
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShardImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShardImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShardImpl.java
deleted file mode 100644
index bf1cca6..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisShardImpl.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import brooklyn.entity.basic.AbstractEntity;
-
-public class RedisShardImpl extends AbstractEntity implements RedisShard {
-    public RedisShardImpl() {
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlave.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlave.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlave.java
deleted file mode 100644
index db57c2c..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlave.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.basic.BasicConfigKey;
-import brooklyn.util.flags.SetFromFlag;
-
-/**
- * A {@link RedisStore} configured as a slave.
- */
-@ImplementedBy(RedisSlaveImpl.class)
-public interface RedisSlave extends RedisStore {
-
-    @SetFromFlag("master")
-    ConfigKey<RedisStore> MASTER = new BasicConfigKey<RedisStore>(RedisStore.class, "redis.master", "Redis master");
-
-    @SetFromFlag("redisConfigTemplateUrl")
-    ConfigKey<String> REDIS_CONFIG_TEMPLATE_URL = new BasicConfigKey<String>(
-            String.class, "redis.config.templateUrl", "Template file (in freemarker format) for the redis.conf config file", 
-            "classpath://brooklyn/entity/nosql/redis/slave.conf");
-
-    RedisStore getMaster();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlaveImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlaveImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlaveImpl.java
deleted file mode 100644
index 8e89058..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisSlaveImpl.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-
-/**
- * A {@link RedisStore} configured as a slave.
- */
-public class RedisSlaveImpl extends RedisStoreImpl implements RedisSlave {
-
-    public RedisSlaveImpl() {
-    }
-
-    @Override
-    public RedisStore getMaster() {
-        return getConfig(MASTER);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStore.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStore.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStore.java
deleted file mode 100644
index a05c314..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStore.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-/**
- * An entity that represents a Redis key-value store service.
- */
-@Catalog(name="Redis Server", description="Redis is an open-source, networked, in-memory, key-value data store with optional durability", iconUrl="classpath:///redis-logo.png")
-@ImplementedBy(RedisStoreImpl.class)
-public interface RedisStore extends SoftwareProcess {
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION =
-            ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "2.8.4");
-
-    @SetFromFlag("downloadUrl")
-    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "http://download.redis.io/releases/redis-${version}.tar.gz");
-
-    @SetFromFlag("redisPort")
-    PortAttributeSensorAndConfigKey REDIS_PORT = new PortAttributeSensorAndConfigKey("redis.port", "Redis port number", "6379+");
-
-    @SetFromFlag("redisConfigTemplateUrl")
-    ConfigKey<String> REDIS_CONFIG_TEMPLATE_URL = ConfigKeys.newConfigKey(
-            "redis.config.templateUrl", "Template file (in freemarker format) for the redis.conf config file", 
-            "classpath://brooklyn/entity/nosql/redis/redis.conf");
-
-    AttributeSensor<Integer> UPTIME = Sensors.newIntegerSensor("redis.uptime", "Redis uptime in seconds");
-
-    // See http://redis.io/commands/info for details of all information available
-    AttributeSensor<Integer> TOTAL_CONNECTIONS_RECEIVED = Sensors.newIntegerSensor("redis.connections.received.total", "Total number of connections accepted by the server");
-    AttributeSensor<Integer> TOTAL_COMMANDS_PROCESSED = Sensors.newIntegerSensor("redis.commands.processed.total", "Total number of commands processed by the server");
-    AttributeSensor<Integer> EXPIRED_KEYS = Sensors.newIntegerSensor("redis.keys.expired", "Total number of key expiration events");
-    AttributeSensor<Integer> EVICTED_KEYS = Sensors.newIntegerSensor("redis.keys.evicted", "Number of evicted keys due to maxmemory limit");
-    AttributeSensor<Integer> KEYSPACE_HITS = Sensors.newIntegerSensor("redis.keyspace.hits", "Number of successful lookup of keys in the main dictionary");
-    AttributeSensor<Integer> KEYSPACE_MISSES = Sensors.newIntegerSensor("redis.keyspace.misses", "Number of failed lookup of keys in the main dictionary");
-
-    String getAddress();
-
-    Integer getRedisPort();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreDriver.java
deleted file mode 100644
index 5ea4c40..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreDriver.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface RedisStoreDriver extends SoftwareProcessDriver {
-
-    String getRunDir();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreImpl.java
deleted file mode 100644
index 7957099..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreImpl.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import java.util.concurrent.TimeUnit;
-
-import javax.annotation.Nullable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.event.feed.ssh.SshFeed;
-import brooklyn.event.feed.ssh.SshPollConfig;
-import brooklyn.event.feed.ssh.SshPollValue;
-import brooklyn.event.feed.ssh.SshValueFunctions;
-import brooklyn.location.Location;
-import brooklyn.location.MachineLocation;
-import brooklyn.location.basic.SshMachineLocation;
-
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-import com.google.common.base.Optional;
-import com.google.common.base.Predicates;
-import com.google.common.base.Splitter;
-import com.google.common.collect.Iterables;
-
-/**
- * An entity that represents a Redis key-value store service.
- */
-public class RedisStoreImpl extends SoftwareProcessImpl implements RedisStore {
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(RedisStore.class);
-
-    private transient SshFeed sshFeed;
-
-    public RedisStoreImpl() {
-    }
-
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-
-        connectServiceUpIsRunning();
-
-        // Find an SshMachineLocation for the UPTIME feed
-        Optional<Location> location = Iterables.tryFind(getLocations(), Predicates.instanceOf(SshMachineLocation.class));
-        if (!location.isPresent()) throw new IllegalStateException("Could not find SshMachineLocation in list of locations");
-        SshMachineLocation machine = (SshMachineLocation) location.get();
-        String statsCommand = getDriver().getRunDir() + "/bin/redis-cli -p " + getRedisPort() + " info stats";
-
-        sshFeed = SshFeed.builder()
-                .entity(this)
-                .machine(machine)
-                .period(5, TimeUnit.SECONDS)
-                .poll(new SshPollConfig<Integer>(UPTIME)
-                        .command(getDriver().getRunDir() + "/bin/redis-cli -p " + getRedisPort() + " info server")
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("uptime_in_seconds")))
-                .poll(new SshPollConfig<Integer>(TOTAL_CONNECTIONS_RECEIVED)
-                        .command(statsCommand)
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("total_connections_received")))
-                .poll(new SshPollConfig<Integer>(TOTAL_COMMANDS_PROCESSED)
-                        .command(statsCommand)
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("total_commands_processed")))
-                .poll(new SshPollConfig<Integer>(EXPIRED_KEYS)
-                        .command(statsCommand)
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("expired_keys")))
-                .poll(new SshPollConfig<Integer>(EVICTED_KEYS)
-                        .command(statsCommand)
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("evicted_keys")))
-                .poll(new SshPollConfig<Integer>(KEYSPACE_HITS)
-                        .command(statsCommand)
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("keyspace_hits")))
-                .poll(new SshPollConfig<Integer>(KEYSPACE_MISSES)
-                        .command(statsCommand)
-                        .onFailureOrException(Functions.constant(-1))
-                        .onSuccess(infoFunction("keyspace_misses")))
-                .build();
-    }
-
-    /**
-     * Create a {@link Function} to retrieve a particular field value from a {@code redis-cli info}
-     * command.
-     * 
-     * @param field the info field to retrieve and convert
-     * @return a new function that converts a {@link SshPollValue} to an {@link Integer}
-     */
-    private static Function<SshPollValue, Integer> infoFunction(final String field) {
-        return Functions.compose(new Function<String, Integer>() {
-            @Override
-            public Integer apply(@Nullable String input) {
-                Optional<String> line = Iterables.tryFind(Splitter.on('\n').split(input), Predicates.containsPattern(field + ":"));
-                if (line.isPresent()) {
-                    String data = line.get().trim();
-                    int colon = data.indexOf(":");
-                    return Integer.parseInt(data.substring(colon + 1));
-                } else {
-                    throw new IllegalStateException("Data for field "+field+" not found: "+input);
-                }
-            }
-        }, SshValueFunctions.stdout());
-    }
-
-    @Override
-    public void disconnectSensors() {
-        disconnectServiceUpIsRunning();
-        if (sshFeed != null) sshFeed.stop();
-        super.disconnectSensors();
-    }
-
-    @Override
-    public Class<?> getDriverInterface() {
-        return RedisStoreDriver.class;
-    }
-
-    @Override
-    public RedisStoreDriver getDriver() {
-        return (RedisStoreDriver) super.getDriver();
-    }
-
-    @Override
-    public String getAddress() {
-        MachineLocation machine = getMachineOrNull();
-        return (machine != null) ? machine.getAddress().getHostAddress() : null;
-    }
-
-    @Override
-    public Integer getRedisPort() {
-        return getAttribute(RedisStore.REDIS_PORT);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java
deleted file mode 100644
index eece028..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import static java.lang.String.format;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Entities;
-import brooklyn.location.Location;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.os.Os;
-import brooklyn.util.ssh.BashCommands;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * Start a {@link RedisStore} in a {@link Location} accessible over ssh.
- */
-public class RedisStoreSshDriver extends AbstractSoftwareProcessSshDriver implements RedisStoreDriver {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RedisStoreSshDriver.class);
-
-    public RedisStoreSshDriver(RedisStoreImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("redis-%s", getVersion()))));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        MutableMap<String, String> installGccPackageFlags = MutableMap.of(
-                "onlyifmissing", "gcc",
-                "yum", "gcc",
-                "apt", "gcc",
-                "port", null);
-        MutableMap<String, String> installMakePackageFlags = MutableMap.of(
-                "onlyifmissing", "make",
-                "yum", "make",
-                "apt", "make",
-                "port", null);
-
-        List<String> commands = ImmutableList.<String>builder()
-                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
-                .add(BashCommands.INSTALL_TAR)
-                .add(BashCommands.INSTALL_CURL)
-                .add(BashCommands.installPackage(installGccPackageFlags, "redis-prerequisites-gcc"))
-                .add(BashCommands.installPackage(installMakePackageFlags, "redis-prerequisites-make"))
-                .add("tar xzfv " + saveAs)
-                .add(format("cd redis-%s", getVersion()))
-                .add("pushd deps")
-                .add("make lua hiredis linenoise")
-                .add("popd")
-                .add("make clean && make")
-                .build();
-
-        newScript(INSTALLING)
-                .failOnNonZeroResultCode()
-                .body.append(commands).execute();
-    }
-
-    @Override
-    public void customize() {
-        newScript(MutableMap.of("usePidFile", false), CUSTOMIZING)
-                .failOnNonZeroResultCode()
-                .body.append(
-                        format("cd %s", getExpandedInstallDir()),
-                        "make install PREFIX="+getRunDir())
-                .execute();
-
-        copyTemplate(getEntity().getConfig(RedisStore.REDIS_CONFIG_TEMPLATE_URL), "redis.conf");
-    }
-
-    @Override
-    public void launch() {
-        // TODO Should we redirect stdout/stderr: format(" >> %s/console 2>&1 </dev/null &", getRunDir())
-        newScript(MutableMap.of("usePidFile", false), LAUNCHING)
-                .failOnNonZeroResultCode()
-                .body.append("./bin/redis-server redis.conf")
-                .execute();
-    }
-
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of("usePidFile", false), CHECK_RUNNING)
-                .body.append("./bin/redis-cli -p " + getEntity().getAttribute(RedisStore.REDIS_PORT) + " ping > /dev/null")
-                .execute() == 0;
-    }
-
-    /**
-     * Restarts redis with the current configuration.
-     */
-    @Override
-    public void stop() {
-        int exitCode = newScript(MutableMap.of("usePidFile", false), STOPPING)
-                .body.append("./bin/redis-cli -p " + getEntity().getAttribute(RedisStore.REDIS_PORT) + " shutdown")
-                .execute();
-        // TODO: Good enough? Will cause warnings when trying to stop a server that is already not running.
-        if (exitCode != 0) {
-            LOG.warn("Unexpected exit code when stopping {}: {}", entity, exitCode);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakCluster.java
deleted file mode 100644
index 467e639..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakCluster.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import java.net.URI;
-import java.util.Map;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.time.Duration;
-
-import com.google.common.reflect.TypeToken;
-
-@Catalog(name="Riak Cluster", description="Riak is a distributed NoSQL key-value data store that offers "
-        + "extremely high availability, fault tolerance, operational simplicity and scalability.")
-@ImplementedBy(RiakClusterImpl.class)
-public interface RiakCluster extends DynamicCluster {
-
-    @SuppressWarnings("serial")
-    AttributeSensor<Map<Entity, String>> RIAK_CLUSTER_NODES = Sensors.newSensor(
-            new TypeToken<Map<Entity, String>>() {}, 
-            "riak.cluster.nodes", "Names of all active Riak nodes in the cluster <Entity,Riak Name>");
-
-    @SetFromFlag("delayBeforeAdvertisingCluster")
-    ConfigKey<Duration> DELAY_BEFORE_ADVERTISING_CLUSTER = ConfigKeys.newConfigKey(Duration.class, "riak.cluster.delayBeforeAdvertisingCluster", "Delay after cluster is started before checking and advertising its availability", Duration.seconds(2 * 60));
-
-    AttributeSensor<Boolean> IS_CLUSTER_INIT = Sensors.newBooleanSensor("riak.cluster.isClusterInit", "Flag to determine if the cluster was already initialized");
-
-    AttributeSensor<Boolean> IS_FIRST_NODE_SET = Sensors.newBooleanSensor("riak.cluster.isFirstNodeSet", "Flag to determine if the first node has been set");
-
-    AttributeSensor<String> NODE_LIST = Sensors.newStringSensor("riak.cluster.nodeList", "List of nodes (including ports), comma separated");
-
-    AttributeSensor<String> NODE_LIST_PB_PORT = Sensors.newStringSensor("riak.cluster.nodeListPbPort", "List of nodes (including ports for riak db clients), comma separated");
-
-    AttributeSensor<URI> RIAK_CONSOLE_URI = Attributes.MAIN_URI;
-
-    AttributeSensor<Integer> NODE_GETS_1MIN_PER_NODE = Sensors.newIntegerSensor("riak.node.gets.1m.perNode", "Gets in the last minute, averaged across cluster");
-    AttributeSensor<Integer> NODE_PUTS_1MIN_PER_NODE = Sensors.newIntegerSensor("riak.node.puts.1m.perNode", "Puts in the last minute, averaged across cluster");
-    AttributeSensor<Integer> NODE_OPS_1MIN_PER_NODE = Sensors.newIntegerSensor("riak.node.ops.1m.perNode", "Sum of node gets and puts in the last minute, averaged across cluster");
-
-}


[26/26] incubator-brooklyn git commit: This closes #800

Posted by al...@apache.org.
This closes #800


Project: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/commit/56e8c398
Tree: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/tree/56e8c398
Diff: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/diff/56e8c398

Branch: refs/heads/master
Commit: 56e8c3989a136999ea03dcb43cb0ebd209cdf2bc
Parents: 08662a7 d5cf528
Author: Aled Sage <al...@gmail.com>
Authored: Thu Aug 6 17:31:48 2015 +0100
Committer: Aled Sage <al...@gmail.com>
Committed: Thu Aug 6 17:31:48 2015 +0100

----------------------------------------------------------------------
 .../nosql-cassandra/cassandra.include.md        |   4 +-
 docs/guide/ops/catalog/index.md                 |   4 +-
 .../brooklyn/demo/CumulusRDFApplication.java    |  11 +-
 .../demo/HighAvailabilityCassandraCluster.java  |   5 +-
 .../brooklyn/demo/ResilientMongoDbApp.java      |   5 +-
 .../brooklyn/demo/RiakClusterExample.java       |   5 +-
 .../brooklyn/demo/SimpleCassandraCluster.java   |   3 +-
 .../brooklyn/demo/SimpleCouchDBCluster.java     |   3 +-
 .../brooklyn/demo/SimpleMongoDBReplicaSet.java  |   5 +-
 .../brooklyn/demo/SimpleRedisCluster.java       |   3 +-
 .../brooklyn/demo/WideAreaCassandraCluster.java |  11 +-
 .../brooklyn/demo/ha-cassandra-cluster.yaml     |   4 +-
 .../brooklyn/demo/simple-cassandra-cluster.yaml |   2 +-
 .../demo/wide-area-cassandra-cluster.yaml       |   8 +-
 .../brooklyn/demo/NodeJsTodoApplication.java    |   3 +-
 .../apache/brooklyn/demo/nodejs-riak-todo.yaml  |   2 +-
 .../org/apache/brooklyn/demo/nodejs-todo.yaml   |   2 +-
 software/nosql/pom.xml                          |  28 +-
 .../nosql/cassandra/CassandraCluster.java       |  30 -
 .../nosql/cassandra/CassandraClusterImpl.java   |  27 -
 .../nosql/cassandra/CassandraDatacenter.java    | 214 ------
 .../cassandra/CassandraDatacenterImpl.java      | 625 -----------------
 .../entity/nosql/cassandra/CassandraFabric.java |  80 ---
 .../nosql/cassandra/CassandraFabricImpl.java    | 395 -----------
 .../entity/nosql/cassandra/CassandraNode.java   | 231 -------
 .../nosql/cassandra/CassandraNodeDriver.java    |  47 --
 .../nosql/cassandra/CassandraNodeImpl.java      | 594 ----------------
 .../nosql/cassandra/CassandraNodeSshDriver.java | 420 -----------
 .../entity/nosql/cassandra/TokenGenerator.java  |  49 --
 .../entity/nosql/cassandra/TokenGenerators.java | 192 ------
 .../nosql/couchbase/CouchbaseCluster.java       | 134 ----
 .../nosql/couchbase/CouchbaseClusterImpl.java   | 597 ----------------
 .../entity/nosql/couchbase/CouchbaseNode.java   | 159 -----
 .../nosql/couchbase/CouchbaseNodeDriver.java    |  41 --
 .../nosql/couchbase/CouchbaseNodeImpl.java      | 269 --------
 .../nosql/couchbase/CouchbaseNodeSshDriver.java | 512 --------------
 .../nosql/couchbase/CouchbaseSyncGateway.java   |  75 --
 .../couchbase/CouchbaseSyncGatewayDriver.java   |  27 -
 .../couchbase/CouchbaseSyncGatewayImpl.java     |  82 ---
 .../CouchbaseSyncGatewaySshDriver.java          | 167 -----
 .../entity/nosql/couchdb/CouchDBCluster.java    |  48 --
 .../nosql/couchdb/CouchDBClusterImpl.java       |  51 --
 .../entity/nosql/couchdb/CouchDBNode.java       |  66 --
 .../entity/nosql/couchdb/CouchDBNodeDriver.java |  37 -
 .../entity/nosql/couchdb/CouchDBNodeImpl.java   | 106 ---
 .../nosql/couchdb/CouchDBNodeSshDriver.java     | 153 -----
 .../elasticsearch/ElasticSearchCluster.java     |  40 --
 .../elasticsearch/ElasticSearchClusterImpl.java |  45 --
 .../nosql/elasticsearch/ElasticSearchNode.java  |  88 ---
 .../elasticsearch/ElasticSearchNodeDriver.java  |  25 -
 .../elasticsearch/ElasticSearchNodeImpl.java    | 110 ---
 .../ElasticSearchNodeSshDriver.java             | 139 ----
 .../nosql/mongodb/AbstractMongoDBServer.java    |  61 --
 .../nosql/mongodb/AbstractMongoDBSshDriver.java | 175 -----
 .../entity/nosql/mongodb/MongoDBClient.java     |  65 --
 .../nosql/mongodb/MongoDBClientDriver.java      |  25 -
 .../entity/nosql/mongodb/MongoDBClientImpl.java |  43 --
 .../nosql/mongodb/MongoDBClientSshDriver.java   | 147 ----
 .../nosql/mongodb/MongoDBClientSupport.java     | 263 -------
 .../entity/nosql/mongodb/MongoDBDriver.java     |  24 -
 .../entity/nosql/mongodb/MongoDBReplicaSet.java |  84 ---
 .../nosql/mongodb/MongoDBReplicaSetImpl.java    | 404 -----------
 .../entity/nosql/mongodb/MongoDBServer.java     | 152 ----
 .../entity/nosql/mongodb/MongoDBServerImpl.java | 214 ------
 .../entity/nosql/mongodb/MongoDBSshDriver.java  |  57 --
 .../entity/nosql/mongodb/ReplicaSetConfig.java  | 278 --------
 .../nosql/mongodb/ReplicaSetMemberStatus.java   |  66 --
 .../sharding/CoLocatedMongoDBRouter.java        |  59 --
 .../sharding/CoLocatedMongoDBRouterImpl.java    |  70 --
 .../mongodb/sharding/MongoDBConfigServer.java   |  27 -
 .../sharding/MongoDBConfigServerCluster.java    |  35 -
 .../MongoDBConfigServerClusterImpl.java         |  57 --
 .../sharding/MongoDBConfigServerDriver.java     |  25 -
 .../sharding/MongoDBConfigServerImpl.java       |  36 -
 .../sharding/MongoDBConfigServerSshDriver.java  |  43 --
 .../nosql/mongodb/sharding/MongoDBRouter.java   |  51 --
 .../mongodb/sharding/MongoDBRouterCluster.java  |  54 --
 .../sharding/MongoDBRouterClusterImpl.java      | 101 ---
 .../mongodb/sharding/MongoDBRouterDriver.java   |  25 -
 .../mongodb/sharding/MongoDBRouterImpl.java     |  85 ---
 .../sharding/MongoDBRouterSshDriver.java        |  52 --
 .../mongodb/sharding/MongoDBShardCluster.java   |  27 -
 .../sharding/MongoDBShardClusterImpl.java       | 179 -----
 .../sharding/MongoDBShardedDeployment.java      | 102 ---
 .../sharding/MongoDBShardedDeploymentImpl.java  | 147 ----
 .../entity/nosql/redis/RedisCluster.java        |  41 --
 .../entity/nosql/redis/RedisClusterImpl.java    | 130 ----
 .../brooklyn/entity/nosql/redis/RedisShard.java |  26 -
 .../entity/nosql/redis/RedisShardImpl.java      |  26 -
 .../brooklyn/entity/nosql/redis/RedisSlave.java |  42 --
 .../entity/nosql/redis/RedisSlaveImpl.java      |  34 -
 .../brooklyn/entity/nosql/redis/RedisStore.java |  69 --
 .../entity/nosql/redis/RedisStoreDriver.java    |  27 -
 .../entity/nosql/redis/RedisStoreImpl.java      | 154 -----
 .../entity/nosql/redis/RedisStoreSshDriver.java | 131 ----
 .../brooklyn/entity/nosql/riak/RiakCluster.java |  65 --
 .../entity/nosql/riak/RiakClusterImpl.java      | 264 -------
 .../brooklyn/entity/nosql/riak/RiakNode.java    | 238 -------
 .../entity/nosql/riak/RiakNodeDriver.java       |  48 --
 .../entity/nosql/riak/RiakNodeImpl.java         | 306 ---------
 .../entity/nosql/riak/RiakNodeSshDriver.java    | 614 -----------------
 .../brooklyn/entity/nosql/solr/SolrServer.java  |  81 ---
 .../entity/nosql/solr/SolrServerDriver.java     |  30 -
 .../entity/nosql/solr/SolrServerImpl.java       |  75 --
 .../entity/nosql/solr/SolrServerSshDriver.java  | 158 -----
 .../nosql/cassandra/CassandraCluster.java       |  30 +
 .../nosql/cassandra/CassandraClusterImpl.java   |  27 +
 .../nosql/cassandra/CassandraDatacenter.java    | 215 ++++++
 .../cassandra/CassandraDatacenterImpl.java      | 625 +++++++++++++++++
 .../entity/nosql/cassandra/CassandraFabric.java |  80 +++
 .../nosql/cassandra/CassandraFabricImpl.java    | 395 +++++++++++
 .../entity/nosql/cassandra/CassandraNode.java   | 231 +++++++
 .../nosql/cassandra/CassandraNodeDriver.java    |  47 ++
 .../nosql/cassandra/CassandraNodeImpl.java      | 594 ++++++++++++++++
 .../nosql/cassandra/CassandraNodeSshDriver.java | 420 +++++++++++
 .../entity/nosql/cassandra/TokenGenerator.java  |  49 ++
 .../entity/nosql/cassandra/TokenGenerators.java | 192 ++++++
 .../nosql/couchbase/CouchbaseCluster.java       | 134 ++++
 .../nosql/couchbase/CouchbaseClusterImpl.java   | 597 ++++++++++++++++
 .../entity/nosql/couchbase/CouchbaseNode.java   | 159 +++++
 .../nosql/couchbase/CouchbaseNodeDriver.java    |  41 ++
 .../nosql/couchbase/CouchbaseNodeImpl.java      | 269 ++++++++
 .../nosql/couchbase/CouchbaseNodeSshDriver.java | 512 ++++++++++++++
 .../nosql/couchbase/CouchbaseSyncGateway.java   |  75 ++
 .../couchbase/CouchbaseSyncGatewayDriver.java   |  27 +
 .../couchbase/CouchbaseSyncGatewayImpl.java     |  82 +++
 .../CouchbaseSyncGatewaySshDriver.java          | 167 +++++
 .../entity/nosql/couchdb/CouchDBCluster.java    |  48 ++
 .../nosql/couchdb/CouchDBClusterImpl.java       |  51 ++
 .../entity/nosql/couchdb/CouchDBNode.java       |  66 ++
 .../entity/nosql/couchdb/CouchDBNodeDriver.java |  37 +
 .../entity/nosql/couchdb/CouchDBNodeImpl.java   | 106 +++
 .../nosql/couchdb/CouchDBNodeSshDriver.java     | 153 +++++
 .../elasticsearch/ElasticSearchCluster.java     |  40 ++
 .../elasticsearch/ElasticSearchClusterImpl.java |  45 ++
 .../nosql/elasticsearch/ElasticSearchNode.java  |  88 +++
 .../elasticsearch/ElasticSearchNodeDriver.java  |  25 +
 .../elasticsearch/ElasticSearchNodeImpl.java    | 110 +++
 .../ElasticSearchNodeSshDriver.java             | 139 ++++
 .../nosql/mongodb/AbstractMongoDBServer.java    |  61 ++
 .../nosql/mongodb/AbstractMongoDBSshDriver.java | 175 +++++
 .../entity/nosql/mongodb/MongoDBClient.java     |  65 ++
 .../nosql/mongodb/MongoDBClientDriver.java      |  25 +
 .../entity/nosql/mongodb/MongoDBClientImpl.java |  43 ++
 .../nosql/mongodb/MongoDBClientSshDriver.java   | 147 ++++
 .../nosql/mongodb/MongoDBClientSupport.java     | 263 +++++++
 .../entity/nosql/mongodb/MongoDBDriver.java     |  24 +
 .../entity/nosql/mongodb/MongoDBReplicaSet.java |  84 +++
 .../nosql/mongodb/MongoDBReplicaSetImpl.java    | 404 +++++++++++
 .../entity/nosql/mongodb/MongoDBServer.java     | 152 ++++
 .../entity/nosql/mongodb/MongoDBServerImpl.java | 214 ++++++
 .../entity/nosql/mongodb/MongoDBSshDriver.java  |  57 ++
 .../entity/nosql/mongodb/ReplicaSetConfig.java  | 278 ++++++++
 .../nosql/mongodb/ReplicaSetMemberStatus.java   |  66 ++
 .../sharding/CoLocatedMongoDBRouter.java        |  59 ++
 .../sharding/CoLocatedMongoDBRouterImpl.java    |  70 ++
 .../mongodb/sharding/MongoDBConfigServer.java   |  28 +
 .../sharding/MongoDBConfigServerCluster.java    |  35 +
 .../MongoDBConfigServerClusterImpl.java         |  57 ++
 .../sharding/MongoDBConfigServerDriver.java     |  25 +
 .../sharding/MongoDBConfigServerImpl.java       |  36 +
 .../sharding/MongoDBConfigServerSshDriver.java  |  44 ++
 .../nosql/mongodb/sharding/MongoDBRouter.java   |  52 ++
 .../mongodb/sharding/MongoDBRouterCluster.java  |  54 ++
 .../sharding/MongoDBRouterClusterImpl.java      | 101 +++
 .../mongodb/sharding/MongoDBRouterDriver.java   |  25 +
 .../mongodb/sharding/MongoDBRouterImpl.java     |  86 +++
 .../sharding/MongoDBRouterSshDriver.java        |  52 ++
 .../mongodb/sharding/MongoDBShardCluster.java   |  27 +
 .../sharding/MongoDBShardClusterImpl.java       | 179 +++++
 .../sharding/MongoDBShardedDeployment.java      | 103 +++
 .../sharding/MongoDBShardedDeploymentImpl.java  | 147 ++++
 .../entity/nosql/redis/RedisCluster.java        |  41 ++
 .../entity/nosql/redis/RedisClusterImpl.java    | 130 ++++
 .../brooklyn/entity/nosql/redis/RedisShard.java |  26 +
 .../entity/nosql/redis/RedisShardImpl.java      |  26 +
 .../brooklyn/entity/nosql/redis/RedisSlave.java |  42 ++
 .../entity/nosql/redis/RedisSlaveImpl.java      |  34 +
 .../brooklyn/entity/nosql/redis/RedisStore.java |  69 ++
 .../entity/nosql/redis/RedisStoreDriver.java    |  27 +
 .../entity/nosql/redis/RedisStoreImpl.java      | 154 +++++
 .../entity/nosql/redis/RedisStoreSshDriver.java | 131 ++++
 .../brooklyn/entity/nosql/riak/RiakCluster.java |  65 ++
 .../entity/nosql/riak/RiakClusterImpl.java      | 264 +++++++
 .../brooklyn/entity/nosql/riak/RiakNode.java    | 238 +++++++
 .../entity/nosql/riak/RiakNodeDriver.java       |  48 ++
 .../entity/nosql/riak/RiakNodeImpl.java         | 306 +++++++++
 .../entity/nosql/riak/RiakNodeSshDriver.java    | 614 +++++++++++++++++
 .../brooklyn/entity/nosql/solr/SolrServer.java  |  81 +++
 .../entity/nosql/solr/SolrServerDriver.java     |  30 +
 .../entity/nosql/solr/SolrServerImpl.java       |  75 ++
 .../entity/nosql/solr/SolrServerSshDriver.java  | 158 +++++
 .../entity/nosql/cassandra/cassandra-1.2.yaml   | 644 -----------------
 .../entity/nosql/cassandra/cassandra-2.0.yaml   | 688 -------------------
 .../cassandra/cassandra-multicloud-snitch.jar   | Bin 4729 -> 0 bytes
 .../cassandra/cassandra-multicloud-snitch.txt   |  33 -
 .../nosql/cassandra/cassandra-rackdc.properties |   6 -
 .../entity/nosql/couchbase/pillowfight.yaml     |  77 ---
 .../brooklyn/entity/nosql/couchdb/couch.ini     |  17 -
 .../brooklyn/entity/nosql/couchdb/couch.uri     |   2 -
 .../entity/nosql/mongodb/default-mongod.conf    |   7 -
 .../brooklyn/entity/nosql/mongodb/default.conf  |   2 -
 .../brooklyn/entity/nosql/redis/redis.conf      |  13 -
 .../brooklyn/entity/nosql/redis/slave.conf      |  16 -
 .../brooklyn/entity/nosql/riak/app.config       | 353 ----------
 .../nosql/riak/riak-cluster-with-solr.yaml      |  35 -
 .../brooklyn/entity/nosql/riak/riak-mac.conf    | 494 -------------
 .../nosql/riak/riak-with-webapp-cluster.yaml    |  42 --
 .../entity/nosql/riak/riak-with-webapp.yaml     |  36 -
 .../brooklyn/entity/nosql/riak/riak.conf        | 494 -------------
 .../brooklyn/entity/nosql/riak/riak.md          |  67 --
 .../brooklyn/entity/nosql/riak/riak.png         | Bin 110651 -> 0 bytes
 .../brooklyn/entity/nosql/riak/vm.args          |  64 --
 .../brooklyn/entity/nosql/solr/solr.xml         |  19 -
 .../entity/nosql/cassandra/cassandra-1.2.yaml   | 644 +++++++++++++++++
 .../entity/nosql/cassandra/cassandra-2.0.yaml   | 688 +++++++++++++++++++
 .../cassandra/cassandra-multicloud-snitch.jar   | Bin 0 -> 4729 bytes
 .../cassandra/cassandra-multicloud-snitch.txt   |  33 +
 .../nosql/cassandra/cassandra-rackdc.properties |   6 +
 .../entity/nosql/couchbase/pillowfight.yaml     |  77 +++
 .../brooklyn/entity/nosql/couchdb/couch.ini     |  17 +
 .../brooklyn/entity/nosql/couchdb/couch.uri     |   2 +
 .../entity/nosql/mongodb/default-mongod.conf    |   7 +
 .../brooklyn/entity/nosql/mongodb/default.conf  |   2 +
 .../brooklyn/entity/nosql/redis/redis.conf      |  13 +
 .../brooklyn/entity/nosql/redis/slave.conf      |  16 +
 .../brooklyn/entity/nosql/riak/app.config       | 353 ++++++++++
 .../nosql/riak/riak-cluster-with-solr.yaml      |  35 +
 .../brooklyn/entity/nosql/riak/riak-mac.conf    | 494 +++++++++++++
 .../nosql/riak/riak-with-webapp-cluster.yaml    |  42 ++
 .../entity/nosql/riak/riak-with-webapp.yaml     |  36 +
 .../apache/brooklyn/entity/nosql/riak/riak.conf | 494 +++++++++++++
 .../apache/brooklyn/entity/nosql/riak/riak.md   |  67 ++
 .../apache/brooklyn/entity/nosql/riak/riak.png  | Bin 0 -> 110651 bytes
 .../apache/brooklyn/entity/nosql/riak/vm.args   |  64 ++
 .../apache/brooklyn/entity/nosql/solr/solr.xml  |  19 +
 .../cassandra/AbstractCassandraNodeTest.java    |  41 --
 .../entity/nosql/cassandra/AstyanaxSupport.java | 330 ---------
 .../CassandraDatacenterIntegrationTest.java     | 149 ----
 .../cassandra/CassandraDatacenterLiveTest.java  | 308 ---------
 ...assandraDatacenterRebindIntegrationTest.java |  97 ---
 .../cassandra/CassandraDatacenterTest.java      | 233 -------
 .../nosql/cassandra/CassandraFabricTest.java    | 184 -----
 .../cassandra/CassandraNodeEc2LiveTest.java     |  50 --
 .../cassandra/CassandraNodeIntegrationTest.java | 190 -----
 .../nosql/cassandra/CassandraNodeLiveTest.java  |  74 --
 .../cassandra/NonNegTokenGeneratorTest.java     | 117 ----
 .../cassandra/PosNegTokenGeneratorTest.java     |  58 --
 .../nosql/couchbase/CouchbaseOfflineTest.java   |  62 --
 .../CouchbaseSyncGatewayEc2LiveTest.java        | 137 ----
 .../nosql/couchdb/AbstractCouchDBNodeTest.java  |  59 --
 .../nosql/couchdb/CouchDBClusterLiveTest.java   |  90 ---
 .../nosql/couchdb/CouchDBNodeEc2LiveTest.java   |  49 --
 .../couchdb/CouchDBNodeIntegrationTest.java     |  66 --
 .../nosql/couchdb/CouchDBNodeLiveTest.java      |  74 --
 .../entity/nosql/couchdb/JcouchdbSupport.java   |  77 ---
 .../ElasticSearchClusterIntegrationTest.java    | 128 ----
 .../ElasticSearchNodeIntegrationTest.java       | 112 ---
 .../nosql/mongodb/MongoDBEc2LiveTest.java       |  54 --
 .../nosql/mongodb/MongoDBIntegrationTest.java   |  91 ---
 .../mongodb/MongoDBRebindIntegrationTest.java   |  60 --
 .../mongodb/MongoDBReplicaSetEc2LiveTest.java   |  96 ---
 .../MongoDBReplicaSetIntegrationTest.java       | 206 ------
 .../mongodb/MongoDBRestartIntegrationTest.java  |  42 --
 .../nosql/mongodb/MongoDBSoftLayerLiveTest.java |  56 --
 .../entity/nosql/mongodb/MongoDBTestHelper.java | 124 ----
 .../nosql/mongodb/ReplicaSetConfigTest.java     | 239 -------
 .../MongoDBConfigServerIntegrationTest.java     |  66 --
 .../MongoDBShardedDeploymentEc2LiveTest.java    |  83 ---
 ...MongoDBShardedDeploymentIntegrationTest.java | 129 ----
 .../entity/nosql/redis/JedisSupport.java        |  74 --
 .../redis/RedisClusterIntegrationTest.java      | 109 ---
 .../entity/nosql/redis/RedisEc2LiveTest.java    |  66 --
 .../nosql/redis/RedisIntegrationTest.java       | 119 ----
 .../nosql/riak/RiakClusterEc2LiveTest.java      |  74 --
 .../entity/nosql/riak/RiakNodeEc2LiveTest.java  |  51 --
 .../riak/RiakNodeGoogleComputeLiveTest.java     |  62 --
 .../nosql/riak/RiakNodeIntegrationTest.java     |  64 --
 .../nosql/riak/RiakNodeSoftlayerLiveTest.java   |  45 --
 .../nosql/solr/AbstractSolrServerTest.java      |  41 --
 .../entity/nosql/solr/SolrJSupport.java         |  66 --
 .../nosql/solr/SolrServerEc2LiveTest.java       |  66 --
 .../nosql/solr/SolrServerIntegrationTest.java   |  84 ---
 .../entity/nosql/solr/SolrServerLiveTest.java   |  89 ---
 .../cassandra/AbstractCassandraNodeTest.java    |  42 ++
 .../entity/nosql/cassandra/AstyanaxSupport.java | 331 +++++++++
 .../CassandraDatacenterIntegrationTest.java     | 151 ++++
 .../cassandra/CassandraDatacenterLiveTest.java  | 310 +++++++++
 ...assandraDatacenterRebindIntegrationTest.java |  99 +++
 .../cassandra/CassandraDatacenterTest.java      | 235 +++++++
 .../nosql/cassandra/CassandraFabricTest.java    | 186 +++++
 .../cassandra/CassandraNodeEc2LiveTest.java     |  51 ++
 .../cassandra/CassandraNodeIntegrationTest.java | 191 +++++
 .../nosql/cassandra/CassandraNodeLiveTest.java  |  75 ++
 .../cassandra/NonNegTokenGeneratorTest.java     | 116 ++++
 .../cassandra/PosNegTokenGeneratorTest.java     |  57 ++
 .../nosql/couchbase/CouchbaseOfflineTest.java   |  63 ++
 .../CouchbaseSyncGatewayEc2LiveTest.java        | 140 ++++
 .../nosql/couchdb/AbstractCouchDBNodeTest.java  |  60 ++
 .../nosql/couchdb/CouchDBClusterLiveTest.java   |  92 +++
 .../nosql/couchdb/CouchDBNodeEc2LiveTest.java   |  50 ++
 .../couchdb/CouchDBNodeIntegrationTest.java     |  67 ++
 .../nosql/couchdb/CouchDBNodeLiveTest.java      |  75 ++
 .../entity/nosql/couchdb/JcouchdbSupport.java   |  78 +++
 .../ElasticSearchClusterIntegrationTest.java    | 130 ++++
 .../ElasticSearchNodeIntegrationTest.java       | 113 +++
 .../nosql/mongodb/MongoDBEc2LiveTest.java       |  56 ++
 .../nosql/mongodb/MongoDBIntegrationTest.java   |  92 +++
 .../mongodb/MongoDBRebindIntegrationTest.java   |  61 ++
 .../mongodb/MongoDBReplicaSetEc2LiveTest.java   |  98 +++
 .../MongoDBReplicaSetIntegrationTest.java       | 208 ++++++
 .../mongodb/MongoDBRestartIntegrationTest.java  |  43 ++
 .../nosql/mongodb/MongoDBSoftLayerLiveTest.java |  57 ++
 .../entity/nosql/mongodb/MongoDBTestHelper.java | 126 ++++
 .../nosql/mongodb/ReplicaSetConfigTest.java     | 240 +++++++
 .../MongoDBConfigServerIntegrationTest.java     |  67 ++
 .../MongoDBShardedDeploymentEc2LiveTest.java    |  86 +++
 ...MongoDBShardedDeploymentIntegrationTest.java | 135 ++++
 .../entity/nosql/redis/JedisSupport.java        |  77 +++
 .../redis/RedisClusterIntegrationTest.java      | 112 +++
 .../entity/nosql/redis/RedisEc2LiveTest.java    |  67 ++
 .../nosql/redis/RedisIntegrationTest.java       | 120 ++++
 .../nosql/riak/RiakClusterEc2LiveTest.java      |  76 ++
 .../entity/nosql/riak/RiakNodeEc2LiveTest.java  |  52 ++
 .../riak/RiakNodeGoogleComputeLiveTest.java     |  64 ++
 .../nosql/riak/RiakNodeIntegrationTest.java     |  65 ++
 .../nosql/riak/RiakNodeSoftlayerLiveTest.java   |  46 ++
 .../nosql/solr/AbstractSolrServerTest.java      |  42 ++
 .../entity/nosql/solr/SolrJSupport.java         |  67 ++
 .../nosql/solr/SolrServerEc2LiveTest.java       |  67 ++
 .../nosql/solr/SolrServerIntegrationTest.java   |  85 +++
 .../entity/nosql/solr/SolrServerLiveTest.java   |  90 +++
 .../main/resources/brooklyn/default.catalog.bom |   2 +-
 .../src/test/resources/cassandra-blueprint.yaml |   2 +-
 .../resources/couchbase-cluster-singleNode.yaml |   2 +-
 .../src/test/resources/couchbase-cluster.yaml   |   2 +-
 .../src/test/resources/couchbase-node.yaml      |   2 +-
 .../couchbase-replication-w-pillowfight.yaml    |   6 +-
 .../src/test/resources/couchbase-w-loadgen.yaml |   4 +-
 .../test/resources/couchbase-w-pillowfight.yaml |   4 +-
 .../src/test/resources/mongo-blueprint.yaml     |   2 +-
 .../resources/mongo-client-single-server.yaml   |   4 +-
 .../src/test/resources/mongo-scripts.yaml       |   4 +-
 .../src/test/resources/mongo-sharded.yaml       |   4 +-
 .../mongo-single-server-blueprint.yaml          |   2 +-
 usage/launcher/src/test/resources/playing.yaml  |   2 +-
 .../ApplicationResourceIntegrationTest.java     |   4 +-
 .../ApplicationResourceIntegrationTest.java     |   2 +-
 .../rest/resources/CatalogResourceTest.java     |   6 +-
 349 files changed, 19879 insertions(+), 19793 deletions(-)
----------------------------------------------------------------------



[12/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java
new file mode 100644
index 0000000..535bab6
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerSshDriver.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import org.apache.brooklyn.entity.nosql.mongodb.AbstractMongoDBSshDriver;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBDriver;
+
+import brooklyn.location.basic.SshMachineLocation;
+
+public class MongoDBConfigServerSshDriver extends AbstractMongoDBSshDriver implements MongoDBDriver {
+    
+    public MongoDBConfigServerSshDriver(MongoDBConfigServerImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+    
+    @Override
+    public MongoDBConfigServerImpl getEntity() {
+        return MongoDBConfigServerImpl.class.cast(super.getEntity());
+    }
+
+    @Override
+    public void launch() {
+        launch(getArgsBuilderWithDefaults(getEntity())
+                .add("--configsvr")
+                .add("--dbpath", getDataDirectory()));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java
new file mode 100644
index 0000000..195b10f
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouter.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import org.apache.brooklyn.catalog.Catalog;
+import org.apache.brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.time.Duration;
+
+import com.google.common.reflect.TypeToken;
+
+@Catalog(name="MongoDB Router",
+        description="MongoDB (from \"humongous\") is a scalable, high-performance, open source NoSQL database",
+        iconUrl="classpath:///mongodb-logo.png")
+@ImplementedBy(MongoDBRouterImpl.class)
+public interface MongoDBRouter extends AbstractMongoDBServer {
+
+    @SuppressWarnings("serial")
+    ConfigKey<Iterable<String>> CONFIG_SERVERS = ConfigKeys.newConfigKey(
+            new TypeToken<Iterable<String>>(){}, "mongodb.router.config.servers", "List of host names and ports of the config servers");
+    
+    AttributeSensor<Integer> SHARD_COUNT = Sensors.newIntegerSensor("mongodb.router.config.shard.count", "Number of shards that have been added");
+    
+    AttributeSensor<Boolean> RUNNING = Sensors.newBooleanSensor("mongodb.router.running", "Indicates that the router is running, "
+            + "and can be used to add shards, but is not necessarity available for CRUD operations (e.g. if no shards have been added)");
+
+    /**
+     * @throws IllegalStateException if times out.
+     */
+    public void waitForServiceUp(Duration duration);
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java
new file mode 100644
index 0000000..333a1bd
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterCluster.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.util.Collection;
+
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+
+@ImplementedBy(MongoDBRouterClusterImpl.class)
+public interface MongoDBRouterCluster extends DynamicCluster {
+
+    AttributeSensor<MongoDBRouter> ANY_ROUTER = Sensors.newSensor(MongoDBRouter.class, "mongodb.routercluster.any", 
+            "When set, can be used to access one of the routers in the cluster (usually the first). This will only be set once "
+            + "at least one shard has been added, and the router is available for CRUD operations");
+    
+    AttributeSensor<MongoDBRouter> ANY_RUNNING_ROUTER = Sensors.newSensor(MongoDBRouter.class, "mongodb.routercluster.any.running", 
+            "When set, can be used to access one of the running routers in the cluster (usually the first). This should only be used " 
+            + "to add shards as it does not guarantee that the router is available for CRUD operations");
+
+    /**
+     * @return One of the routers in the cluster if available, null otherwise
+     */
+    MongoDBRouter getAnyRouter();
+    
+    /**
+     * @return One of the running routers in the cluster. This should only be used to add shards as it does not guarantee that 
+     * the router is available for CRUD operations
+     */
+    MongoDBRouter getAnyRunningRouter();
+    
+    /**
+     * @return All routers in the cluster
+     */
+    Collection<MongoDBRouter> getRouters();
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java
new file mode 100644
index 0000000..b905c10
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterClusterImpl.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.util.Collection;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.EntityPredicates;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.location.Location;
+import brooklyn.policy.PolicySpec;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+
+public class MongoDBRouterClusterImpl extends DynamicClusterImpl implements MongoDBRouterCluster {
+
+    @Override
+    public void init() {
+        super.init();
+        subscribeToChildren(this, MongoDBRouter.RUNNING, new SensorEventListener<Boolean>() {
+            @Override public void onEvent(SensorEvent<Boolean> event) {
+                setAnyRouter();
+            }
+        });
+    }
+    
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        super.start(locations);
+        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
+                .displayName("Router cluster membership tracker")
+                .configure("group", this));
+    }
+    
+    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override protected void onEntityEvent(EventType type, Entity member) {
+            ((MongoDBRouterClusterImpl)super.entity).setAnyRouter();
+        }
+        @Override protected void onEntityRemoved(Entity member) {
+            ((MongoDBRouterClusterImpl)super.entity).setAnyRouter();
+        }
+        @Override protected void onEntityChange(Entity member) {
+            ((MongoDBRouterClusterImpl)super.entity).setAnyRouter();
+        }
+    }
+    
+    protected void setAnyRouter() {
+        setAttribute(MongoDBRouterCluster.ANY_ROUTER, Iterables.tryFind(getRouters(), 
+                EntityPredicates.attributeEqualTo(Startable.SERVICE_UP, true)).orNull());
+
+        setAttribute(
+                MongoDBRouterCluster.ANY_RUNNING_ROUTER, 
+                Iterables.tryFind(getRouters(), EntityPredicates.attributeEqualTo(MongoDBRouter.RUNNING, true))
+                .orNull());
+    }
+    
+    @Override
+    public Collection<MongoDBRouter> getRouters() {
+        return ImmutableList.copyOf(Iterables.filter(getMembers(), MongoDBRouter.class));
+    }
+    
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        if (super.getMemberSpec() != null)
+            return super.getMemberSpec();
+        return EntitySpec.create(MongoDBRouter.class);
+    }
+
+    @Override
+    public MongoDBRouter getAnyRouter() {
+        return getAttribute(MongoDBRouterCluster.ANY_ROUTER);
+    }
+    
+    @Override
+    public MongoDBRouter getAnyRunningRouter() {
+        return getAttribute(MongoDBRouterCluster.ANY_RUNNING_ROUTER);
+    }
+ 
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java
new file mode 100644
index 0000000..3c7a30c
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterDriver.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface MongoDBRouterDriver extends SoftwareProcessDriver {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java
new file mode 100644
index 0000000..cbbc6b8
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterImpl.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBClientSupport;
+
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.event.feed.function.FunctionFeed;
+import brooklyn.event.feed.function.FunctionPollConfig;
+
+import com.google.common.base.Functions;
+
+public class MongoDBRouterImpl extends SoftwareProcessImpl implements MongoDBRouter {
+    
+    private volatile FunctionFeed functionFeed;
+
+    @Override
+    public Class<?> getDriverInterface() {
+        return MongoDBRouterDriver.class;
+    }
+
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+        functionFeed = FunctionFeed.builder()
+                .entity(this)
+                .poll(new FunctionPollConfig<Boolean, Boolean>(RUNNING)
+                        .period(5, TimeUnit.SECONDS)
+                        .callable(new Callable<Boolean>() {
+                            @Override
+                            public Boolean call() throws Exception {
+                                MongoDBClientSupport clientSupport = MongoDBClientSupport.forServer(MongoDBRouterImpl.this);
+                                return clientSupport.ping();
+                            }
+                        })
+                        .onException(Functions.<Boolean>constant(false)))
+                .poll(new FunctionPollConfig<Boolean, Boolean>(SERVICE_UP)
+                        .period(5, TimeUnit.SECONDS)
+                        .callable(new Callable<Boolean>() {
+                            @Override
+                            public Boolean call() throws Exception {
+                                // TODO: This is the same as in AbstractMongoDBSshDriver.isRunning. 
+                                // This feels like the right place. But feels like can be more consistent with different 
+                                // MongoDB types using the FunctionFeed.
+                                MongoDBClientSupport clientSupport = MongoDBClientSupport.forServer(MongoDBRouterImpl.this);
+                                return clientSupport.ping() && MongoDBRouterImpl.this.getAttribute(SHARD_COUNT) > 0;
+                            }
+                        })
+                        .onException(Functions.<Boolean>constant(false)))
+                .poll(new FunctionPollConfig<Integer, Integer>(SHARD_COUNT)
+                        .period(5, TimeUnit.SECONDS)
+                        .callable(new Callable<Integer>() {
+                            public Integer call() throws Exception {
+                                MongoDBClientSupport clientSupport = MongoDBClientSupport.forServer(MongoDBRouterImpl.this);
+                                return (int) clientSupport.getShardCount();
+                            }    
+                        })
+                        .onException(Functions.<Integer>constant(-1)))
+                .build();
+    }
+
+    @Override
+    protected void disconnectSensors() {
+        super.disconnectSensors();
+        if (functionFeed != null) functionFeed.stop();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java
new file mode 100644
index 0000000..422b9ac
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBRouterSshDriver.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import org.apache.brooklyn.entity.nosql.mongodb.AbstractMongoDBSshDriver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.location.basic.SshMachineLocation;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+
+public class MongoDBRouterSshDriver extends AbstractMongoDBSshDriver implements MongoDBRouterDriver {
+    
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBRouterSshDriver.class);
+
+    public MongoDBRouterSshDriver(MongoDBRouterImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+    
+    @Override
+    public void launch() {
+        String configdb = Joiner.on(",").join(getEntity().getConfig(MongoDBRouter.CONFIG_SERVERS));
+        ImmutableList.Builder<String> argsBuilder = getArgsBuilderWithDefaults(MongoDBRouterImpl.class.cast(getEntity()))
+                .add("--configdb", configdb);
+        
+        String args = Joiner.on(" ").join(argsBuilder.build());
+        String command = String.format("%s/bin/mongos %s > out.log 2> err.log < /dev/null", getExpandedInstallDir(), args);
+        LOG.info(command);
+        newScript(LAUNCHING)
+                .updateTaskAndFailOnNonZeroResultCode()
+                .body.append(command).execute();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java
new file mode 100644
index 0000000..edf7d7a
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardCluster.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+
+@ImplementedBy(MongoDBShardClusterImpl.class)
+public interface MongoDBShardCluster extends DynamicCluster {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java
new file mode 100644
index 0000000..7eb2571
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardClusterImpl.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import java.net.UnknownHostException;
+import java.util.Collection;
+import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBClientSupport;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.location.Location;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.text.Strings;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Sets;
+
+public class MongoDBShardClusterImpl extends DynamicClusterImpl implements MongoDBShardCluster {
+
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBShardClusterImpl.class);
+    
+    // TODO: Need to use attributes for this in order to support brooklyn restart 
+    private Set<Entity> addedMembers = Sets.newConcurrentHashSet();
+
+    // TODO: Need to use attributes for this in order to support brooklyn restart 
+    private Set<Entity> addingMembers = Sets.newConcurrentHashSet();
+
+    /**
+     * For shard addition and removal.
+     * Used for retrying.
+     * 
+     * TODO Should use ExecutionManager.
+     */
+    private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
+
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        EntitySpec<?> result = super.getMemberSpec();
+        if (result == null)
+            result = EntitySpec.create(MongoDBReplicaSet.class);
+        result.configure(DynamicClusterImpl.INITIAL_SIZE, getConfig(MongoDBShardedDeployment.SHARD_REPLICASET_SIZE));
+        return result;
+    }
+
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        subscribeToMembers(this, Startable.SERVICE_UP, new SensorEventListener<Boolean>() {
+            public void onEvent(SensorEvent<Boolean> event) {
+                addShards();
+            }
+        });
+
+        super.start(locations);
+        
+        MongoDBRouterCluster routers = getParent().getAttribute(MongoDBShardedDeployment.ROUTER_CLUSTER);
+        subscribe(routers, MongoDBRouterCluster.ANY_RUNNING_ROUTER, new SensorEventListener<MongoDBRouter>() {
+            public void onEvent(SensorEvent<MongoDBRouter> event) {
+                if (event.getValue() != null)
+                    addShards();
+            }
+        });
+    }
+
+    @Override
+    public void stop() {
+        // TODO Note that after this the executor will not run if the set is restarted.
+        executor.shutdownNow();
+        super.stop();
+    }
+    
+    @Override
+    public void onManagementStopped() {
+        super.onManagementStopped();
+        executor.shutdownNow();
+    }
+
+    protected void addShards() {
+        MongoDBRouter router = getParent().getAttribute(MongoDBShardedDeployment.ROUTER_CLUSTER).getAttribute(MongoDBRouterCluster.ANY_RUNNING_ROUTER);
+        if (router == null) {
+            if (LOG.isTraceEnabled()) LOG.trace("Not adding shards because no running router in {}", this);
+            return;
+        }
+        
+        for (Entity member : this.getMembers()) {
+            if (member.getAttribute(Startable.SERVICE_UP) && !addingMembers.contains(member)) {
+                LOG.info("{} adding shard {}", new Object[] {MongoDBShardClusterImpl.this, member});
+                addingMembers.add(member);
+                addShardAsync(member);
+            }
+        }
+    }
+    
+    protected void addShardAsync(final Entity replicaSet) {
+        final Duration timeout = Duration.minutes(20);
+        final Stopwatch stopwatch = Stopwatch.createStarted();
+        final AtomicInteger attempts = new AtomicInteger();
+        
+        // TODO Don't use executor, use ExecutionManager; but following pattern in MongoDBReplicaSetImpl for now.
+        executor.submit(new Runnable() {
+            @Override
+            public void run() {
+                boolean reschedule;
+                MongoDBRouter router = getParent().getAttribute(MongoDBShardedDeployment.ROUTER_CLUSTER).getAttribute(MongoDBRouterCluster.ANY_RUNNING_ROUTER);
+                if (router == null) {
+                    LOG.debug("Rescheduling adding shard {} because no running router for cluster {}", replicaSet, this);
+                    reschedule = true;
+                } else {
+                    MongoDBClientSupport client;
+                    try {
+                        client = MongoDBClientSupport.forServer(router);
+                    } catch (UnknownHostException e) {
+                        throw Exceptions.propagate(e);
+                    }
+                    
+                    MongoDBServer primary = replicaSet.getAttribute(MongoDBReplicaSet.PRIMARY_ENTITY);
+                    if (primary != null) {
+                        String addr = String.format("%s:%d", primary.getAttribute(MongoDBServer.SUBNET_HOSTNAME), primary.getAttribute(MongoDBServer.PORT));
+                        String replicaSetURL = ((MongoDBReplicaSet) replicaSet).getName() + "/" + addr;
+                        boolean added = client.addShardToRouter(replicaSetURL);
+                        if (added) {
+                            LOG.info("{} added shard {} via {}", new Object[] {MongoDBShardClusterImpl.this, replicaSetURL, router});
+                            addedMembers.add(replicaSet);
+                            reschedule = false;
+                        } else {
+                            LOG.debug("Rescheduling addition of shard {} because add failed via router {}", replicaSetURL, router);
+                            reschedule = true;
+                        }
+                    } else {
+                        LOG.debug("Rescheduling addition of shard {} because primary is null", replicaSet);
+                        reschedule = true;
+                    }
+                }
+                
+                if (reschedule) {
+                    int numAttempts = attempts.incrementAndGet();
+                    if (numAttempts > 1 && timeout.toMilliseconds() > stopwatch.elapsed(TimeUnit.MILLISECONDS)) {
+                        executor.schedule(this, 3, TimeUnit.SECONDS);
+                    } else {
+                        LOG.warn("Timeout after {} attempts ({}) adding shard {}; aborting", 
+                                new Object[] {numAttempts, Time.makeTimeStringRounded(stopwatch), replicaSet});
+                        addingMembers.remove(replicaSet);
+                    }
+                }
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java
new file mode 100644
index 0000000..3383887
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeployment.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import org.apache.brooklyn.catalog.Catalog;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.Group;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.time.Duration;
+
+import com.google.common.reflect.TypeToken;
+
+@Catalog(name="MongoDB Sharded Deployment",
+        description="MongoDB (from \"humongous\") is a scalable, high-performance, open source NoSQL database",
+        iconUrl="classpath:///mongodb-logo.png")
+@ImplementedBy(MongoDBShardedDeploymentImpl.class)
+public interface MongoDBShardedDeployment extends Entity, Startable {
+    @SetFromFlag("configClusterSize")
+    ConfigKey<Integer> CONFIG_CLUSTER_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.config.cluster.size", 
+            "Number of config servers", 3);
+    
+    @SetFromFlag("initialRouterClusterSize")
+    ConfigKey<Integer> INITIAL_ROUTER_CLUSTER_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.router.cluster.initial.size", 
+            "Initial number of routers (mongos)", 0);
+    
+    @SetFromFlag("initialShardClusterSize")
+    ConfigKey<Integer> INITIAL_SHARD_CLUSTER_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.shard.cluster.initial.size", 
+            "Initial number of shards (replicasets)", 2);
+    
+    @SetFromFlag("shardReplicaSetSize")
+    ConfigKey<Integer> SHARD_REPLICASET_SIZE = ConfigKeys.newIntegerConfigKey("mongodb.shard.replicaset.size", 
+            "Number of servers (mongod) in each shard (replicaset)", 3);
+    
+    @SetFromFlag("routerUpTimeout")
+    ConfigKey<Duration> ROUTER_UP_TIMEOUT = ConfigKeys.newConfigKey(Duration.class, "mongodb.router.up.timeout", 
+            "Maximum time to wait for the routers to become available before adding the shards", Duration.FIVE_MINUTES);
+    
+    @SetFromFlag("coLocatedRouterGroup")
+    ConfigKey<Group> CO_LOCATED_ROUTER_GROUP = ConfigKeys.newConfigKey(Group.class, "mongodb.colocated.router.group", 
+            "Group to be monitored for the addition of new CoLocatedMongoDBRouter entities");
+    
+    @SuppressWarnings("serial")
+    ConfigKey<EntitySpec<?>> MONGODB_ROUTER_SPEC = ConfigKeys.newConfigKey(
+            new TypeToken<EntitySpec<?>>() {},
+            "mongodb.router.spec", 
+            "Spec for Router instances",
+            EntitySpec.create(MongoDBRouter.class));
+
+    @SuppressWarnings("serial")
+    ConfigKey<EntitySpec<?>> MONGODB_REPLICA_SET_SPEC = ConfigKeys.newConfigKey(
+            new TypeToken<EntitySpec<?>>() {},
+            "mongodb.replicaset.spec", 
+            "Spec for Replica Set",
+            EntitySpec.create(MongoDBReplicaSet.class)
+                    .configure(MongoDBReplicaSet.MEMBER_SPEC, EntitySpec.create(MongoDBServer.class)));
+
+    @SuppressWarnings("serial")
+    ConfigKey<EntitySpec<?>> MONGODB_CONFIG_SERVER_SPEC = ConfigKeys.newConfigKey(
+            new TypeToken<EntitySpec<?>>() {},
+            "mongodb.configserver.spec", 
+            "Spec for Config Server instances",
+            EntitySpec.create(MongoDBConfigServer.class));
+
+    public static AttributeSensor<MongoDBConfigServerCluster> CONFIG_SERVER_CLUSTER = Sensors.newSensor(
+            MongoDBConfigServerCluster.class, "mongodbshardeddeployment.configservers", "Config servers");
+    public static AttributeSensor<MongoDBRouterCluster> ROUTER_CLUSTER = Sensors.newSensor(
+            MongoDBRouterCluster.class, "mongodbshardeddeployment.routers", "Routers");
+    
+    public static AttributeSensor<MongoDBShardCluster> SHARD_CLUSTER = Sensors.newSensor(
+            MongoDBShardCluster.class, "mongodbshardeddeployment.shards", "Shards");
+    
+    public MongoDBConfigServerCluster getConfigCluster();
+    
+    public MongoDBRouterCluster getRouterCluster();
+    
+    public MongoDBShardCluster getShardCluster();
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java
new file mode 100644
index 0000000..74f0623
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentImpl.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb.sharding;
+
+import static brooklyn.event.basic.DependentConfiguration.attributeWhenReady;
+
+import java.util.Collection;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.Entity;
+import brooklyn.entity.Group;
+import brooklyn.entity.basic.AbstractEntity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.basic.ServiceStateLogic.ServiceNotUpLogic;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.policy.PolicySpec;
+import brooklyn.util.exceptions.Exceptions;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+public class MongoDBShardedDeploymentImpl extends AbstractEntity implements MongoDBShardedDeployment {
+    
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBShardedDeploymentImpl.class);
+    
+    @Override
+    public void init() {
+        super.init();
+        
+        setAttribute(CONFIG_SERVER_CLUSTER, addChild(EntitySpec.create(MongoDBConfigServerCluster.class)
+                .configure(MongoDBConfigServerCluster.MEMBER_SPEC, getConfig(MONGODB_CONFIG_SERVER_SPEC))
+                .configure(DynamicCluster.INITIAL_SIZE, getConfig(CONFIG_CLUSTER_SIZE))));
+        setAttribute(ROUTER_CLUSTER, addChild(EntitySpec.create(MongoDBRouterCluster.class)
+                .configure(MongoDBRouterCluster.MEMBER_SPEC, getConfig(MONGODB_ROUTER_SPEC))
+                .configure(DynamicCluster.INITIAL_SIZE, getConfig(INITIAL_ROUTER_CLUSTER_SIZE))
+                .configure(MongoDBRouter.CONFIG_SERVERS, attributeWhenReady(getAttribute(CONFIG_SERVER_CLUSTER), MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES))));
+        setAttribute(SHARD_CLUSTER, addChild(EntitySpec.create(MongoDBShardCluster.class)
+                .configure(MongoDBShardCluster.MEMBER_SPEC, getConfig(MONGODB_REPLICA_SET_SPEC))
+                .configure(DynamicCluster.INITIAL_SIZE, getConfig(INITIAL_SHARD_CLUSTER_SIZE))));
+        addEnricher(Enrichers.builder()
+                .propagating(MongoDBConfigServerCluster.CONFIG_SERVER_ADDRESSES)
+                .from(getAttribute(CONFIG_SERVER_CLUSTER))
+                .build());
+        
+        ServiceNotUpLogic.updateNotUpIndicator(this, Attributes.SERVICE_STATE_ACTUAL, "stopped");
+    }
+
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
+        try {
+            final MongoDBRouterCluster routers = getAttribute(ROUTER_CLUSTER);
+            final MongoDBShardCluster shards = getAttribute(SHARD_CLUSTER);
+            List<DynamicCluster> clusters = ImmutableList.of(getAttribute(CONFIG_SERVER_CLUSTER), routers, shards);
+            Entities.invokeEffectorList(this, clusters, Startable.START, ImmutableMap.of("locations", locations))
+                .get();
+
+            if (getConfigRaw(MongoDBShardedDeployment.CO_LOCATED_ROUTER_GROUP, true).isPresent()) {
+                addPolicy(PolicySpec.create(ColocatedRouterTrackingPolicy.class)
+                        .displayName("Co-located router tracker")
+                        .configure("group", (Group)getConfig(MongoDBShardedDeployment.CO_LOCATED_ROUTER_GROUP)));
+            }
+            ServiceNotUpLogic.clearNotUpIndicator(this, Attributes.SERVICE_STATE_ACTUAL);
+            ServiceStateLogic.setExpectedState(this, Lifecycle.RUNNING);
+        } catch (Exception e) {
+            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
+            // no need to log here; the effector invocation should do that
+            throw Exceptions.propagate(e);
+        }
+    }
+
+    public static class ColocatedRouterTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override
+        protected void onEntityAdded(Entity member) {
+            MongoDBRouterCluster cluster = entity.getAttribute(ROUTER_CLUSTER);
+            cluster.addMember(member.getAttribute(CoLocatedMongoDBRouter.ROUTER));
+        }
+        @Override
+        protected void onEntityRemoved(Entity member) {
+            MongoDBRouterCluster cluster = entity.getAttribute(ROUTER_CLUSTER);
+            cluster.removeMember(member.getAttribute(CoLocatedMongoDBRouter.ROUTER));
+        }
+    };
+
+    @Override
+    public void stop() {
+        ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPING);
+        try {
+            Entities.invokeEffectorList(this, ImmutableList.of(getAttribute(CONFIG_SERVER_CLUSTER), getAttribute(ROUTER_CLUSTER), 
+                    getAttribute(SHARD_CLUSTER)), Startable.STOP).get();
+        } catch (Exception e) {
+            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
+            throw Exceptions.propagate(e);
+        }
+        ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPED);
+        ServiceNotUpLogic.updateNotUpIndicator(this, Attributes.SERVICE_STATE_ACTUAL, "stopped");
+    }
+    
+    @Override
+    public void restart() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public MongoDBConfigServerCluster getConfigCluster() {
+        return getAttribute(CONFIG_SERVER_CLUSTER);
+    }
+
+    @Override
+    public MongoDBRouterCluster getRouterCluster() {
+        return getAttribute(ROUTER_CLUSTER);
+    }
+
+    @Override
+    public MongoDBShardCluster getShardCluster() {
+        return getAttribute(SHARD_CLUSTER);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisCluster.java
new file mode 100644
index 0000000..26f4f1c
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisCluster.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.entity.Entity;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.entity.trait.Startable;
+
+/**
+ * A cluster of {@link RedisStore}s with one master and a group of slaves.
+ *
+ * The slaves are contained in a {@link DynamicCluster} which can be resized by a policy if required.
+ *
+ * TODO add sensors with aggregated Redis statistics from cluster
+ */
+@Catalog(name="Redis Cluster", description="Redis is an open-source, networked, in-memory, key-value data store with optional durability", iconUrl="classpath:///redis-logo.png")
+@ImplementedBy(RedisClusterImpl.class)
+public interface RedisCluster extends Entity, Startable {
+    
+    public RedisStore getMaster();
+    
+    public DynamicCluster getSlaves();
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterImpl.java
new file mode 100644
index 0000000..39c9dbe
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisClusterImpl.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import java.util.Collection;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.basic.AbstractEntity;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.basic.ServiceStateLogic.ComputeServiceIndicatorsFromChildrenAndMembers;
+import brooklyn.entity.basic.ServiceStateLogic.ServiceProblemsLogic;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.location.Location;
+import brooklyn.util.collections.QuorumCheck.QuorumChecks;
+import brooklyn.util.exceptions.Exceptions;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+public class RedisClusterImpl extends AbstractEntity implements RedisCluster {
+
+    private static AttributeSensor<RedisStore> MASTER = Sensors.newSensor(RedisStore.class, "redis.master");
+    private static AttributeSensor<DynamicCluster> SLAVES = Sensors.newSensor(DynamicCluster.class, "redis.slaves");
+
+    public RedisClusterImpl() {
+    }
+
+    @Override
+    public RedisStore getMaster() {
+        return getAttribute(MASTER);
+    }
+    
+    @Override
+    public DynamicCluster getSlaves() {
+        return getAttribute(SLAVES);
+    }
+
+    @Override
+    public void init() {
+        super.init();
+
+        RedisStore master = addChild(EntitySpec.create(RedisStore.class));
+        setAttribute(MASTER, master);
+
+        DynamicCluster slaves = addChild(EntitySpec.create(DynamicCluster.class)
+                .configure(DynamicCluster.MEMBER_SPEC, EntitySpec.create(RedisSlave.class).configure(RedisSlave.MASTER, master)));
+        setAttribute(SLAVES, slaves);
+
+        addEnricher(Enrichers.builder()
+                .propagating(RedisStore.HOSTNAME, RedisStore.ADDRESS, RedisStore.SUBNET_HOSTNAME, RedisStore.SUBNET_ADDRESS, RedisStore.REDIS_PORT)
+                .from(master)
+                .build());
+    }
+
+    @Override
+    protected void initEnrichers() {
+        super.initEnrichers();
+        ServiceStateLogic.newEnricherFromChildrenUp().
+            checkChildrenOnly().
+            requireUpChildren(QuorumChecks.all()).
+            configure(ComputeServiceIndicatorsFromChildrenAndMembers.IGNORE_ENTITIES_WITH_THESE_SERVICE_STATES, ImmutableSet.<Lifecycle>of()).
+            addTo(this);
+    }
+    
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
+        ServiceProblemsLogic.clearProblemsIndicator(this, START);
+        try {
+            doStart(locations);
+            ServiceStateLogic.setExpectedState(this, Lifecycle.RUNNING);
+        } catch (Exception e) {
+            ServiceProblemsLogic.updateProblemsIndicator(this, START, "Start failed with error: "+e);
+            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
+            throw Exceptions.propagate(e);
+        }
+    }
+
+    private void doStart(Collection<? extends Location> locations) {
+        RedisStore master = getMaster();
+        master.invoke(RedisStore.START, ImmutableMap.<String, Object>of("locations", ImmutableList.copyOf(locations))).getUnchecked();
+
+        DynamicCluster slaves = getSlaves();
+        slaves.invoke(DynamicCluster.START, ImmutableMap.<String, Object>of("locations", ImmutableList.copyOf(locations))).getUnchecked();
+    }
+
+    @Override
+    public void stop() {
+        ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPING);
+        try {
+            doStop();
+            ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPED);
+        } catch (Exception e) {
+            ServiceProblemsLogic.updateProblemsIndicator(this, STOP, "Stop failed with error: "+e);
+            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
+            throw Exceptions.propagate(e);
+        }
+    }
+
+    private void doStop() {
+        getSlaves().invoke(DynamicCluster.STOP, ImmutableMap.<String, Object>of()).getUnchecked();
+        getMaster().invoke(RedisStore.STOP, ImmutableMap.<String, Object>of()).getUnchecked();
+    }
+
+    @Override
+    public void restart() {
+        throw new UnsupportedOperationException();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShard.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShard.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShard.java
new file mode 100644
index 0000000..09d71b3
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShard.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.proxying.ImplementedBy;
+
+@ImplementedBy(RedisShardImpl.class)
+public interface RedisShard extends Entity {
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShardImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShardImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShardImpl.java
new file mode 100644
index 0000000..87396f5
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisShardImpl.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import brooklyn.entity.basic.AbstractEntity;
+
+public class RedisShardImpl extends AbstractEntity implements RedisShard {
+    public RedisShardImpl() {
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlave.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlave.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlave.java
new file mode 100644
index 0000000..af91beb
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlave.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.basic.BasicConfigKey;
+import brooklyn.util.flags.SetFromFlag;
+
+/**
+ * A {@link RedisStore} configured as a slave.
+ */
+@ImplementedBy(RedisSlaveImpl.class)
+public interface RedisSlave extends RedisStore {
+
+    @SetFromFlag("master")
+    ConfigKey<RedisStore> MASTER = new BasicConfigKey<RedisStore>(RedisStore.class, "redis.master", "Redis master");
+
+    @SetFromFlag("redisConfigTemplateUrl")
+    ConfigKey<String> REDIS_CONFIG_TEMPLATE_URL = new BasicConfigKey<String>(
+            String.class, "redis.config.templateUrl", "Template file (in freemarker format) for the redis.conf config file", 
+            "classpath://org/apache/brooklyn/entity/nosql/redis/slave.conf");
+
+    RedisStore getMaster();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlaveImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlaveImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlaveImpl.java
new file mode 100644
index 0000000..b58ce7d
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisSlaveImpl.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+
+/**
+ * A {@link RedisStore} configured as a slave.
+ */
+public class RedisSlaveImpl extends RedisStoreImpl implements RedisSlave {
+
+    public RedisSlaveImpl() {
+    }
+
+    @Override
+    public RedisStore getMaster() {
+        return getConfig(MASTER);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStore.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStore.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStore.java
new file mode 100644
index 0000000..8d2cef1
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStore.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+/**
+ * An entity that represents a Redis key-value store service.
+ */
+@Catalog(name="Redis Server", description="Redis is an open-source, networked, in-memory, key-value data store with optional durability", iconUrl="classpath:///redis-logo.png")
+@ImplementedBy(RedisStoreImpl.class)
+public interface RedisStore extends SoftwareProcess {
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION =
+            ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "2.8.4");
+
+    @SetFromFlag("downloadUrl")
+    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "http://download.redis.io/releases/redis-${version}.tar.gz");
+
+    @SetFromFlag("redisPort")
+    PortAttributeSensorAndConfigKey REDIS_PORT = new PortAttributeSensorAndConfigKey("redis.port", "Redis port number", "6379+");
+
+    @SetFromFlag("redisConfigTemplateUrl")
+    ConfigKey<String> REDIS_CONFIG_TEMPLATE_URL = ConfigKeys.newConfigKey(
+            "redis.config.templateUrl", "Template file (in freemarker format) for the redis.conf config file", 
+            "classpath://org/apache/brooklyn/entity/nosql/redis/redis.conf");
+
+    AttributeSensor<Integer> UPTIME = Sensors.newIntegerSensor("redis.uptime", "Redis uptime in seconds");
+
+    // See http://redis.io/commands/info for details of all information available
+    AttributeSensor<Integer> TOTAL_CONNECTIONS_RECEIVED = Sensors.newIntegerSensor("redis.connections.received.total", "Total number of connections accepted by the server");
+    AttributeSensor<Integer> TOTAL_COMMANDS_PROCESSED = Sensors.newIntegerSensor("redis.commands.processed.total", "Total number of commands processed by the server");
+    AttributeSensor<Integer> EXPIRED_KEYS = Sensors.newIntegerSensor("redis.keys.expired", "Total number of key expiration events");
+    AttributeSensor<Integer> EVICTED_KEYS = Sensors.newIntegerSensor("redis.keys.evicted", "Number of evicted keys due to maxmemory limit");
+    AttributeSensor<Integer> KEYSPACE_HITS = Sensors.newIntegerSensor("redis.keyspace.hits", "Number of successful lookup of keys in the main dictionary");
+    AttributeSensor<Integer> KEYSPACE_MISSES = Sensors.newIntegerSensor("redis.keyspace.misses", "Number of failed lookup of keys in the main dictionary");
+
+    String getAddress();
+
+    Integer getRedisPort();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreDriver.java
new file mode 100644
index 0000000..ba77cfd
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreDriver.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+public interface RedisStoreDriver extends SoftwareProcessDriver {
+
+    String getRunDir();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreImpl.java
new file mode 100644
index 0000000..f556bcf
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreImpl.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.event.feed.ssh.SshFeed;
+import brooklyn.event.feed.ssh.SshPollConfig;
+import brooklyn.event.feed.ssh.SshPollValue;
+import brooklyn.event.feed.ssh.SshValueFunctions;
+import brooklyn.location.Location;
+import brooklyn.location.MachineLocation;
+import brooklyn.location.basic.SshMachineLocation;
+
+import com.google.common.base.Function;
+import com.google.common.base.Functions;
+import com.google.common.base.Optional;
+import com.google.common.base.Predicates;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+
+/**
+ * An entity that represents a Redis key-value store service.
+ */
+public class RedisStoreImpl extends SoftwareProcessImpl implements RedisStore {
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(RedisStore.class);
+
+    private transient SshFeed sshFeed;
+
+    public RedisStoreImpl() {
+    }
+
+    @Override
+    protected void connectSensors() {
+        super.connectSensors();
+
+        connectServiceUpIsRunning();
+
+        // Find an SshMachineLocation for the UPTIME feed
+        Optional<Location> location = Iterables.tryFind(getLocations(), Predicates.instanceOf(SshMachineLocation.class));
+        if (!location.isPresent()) throw new IllegalStateException("Could not find SshMachineLocation in list of locations");
+        SshMachineLocation machine = (SshMachineLocation) location.get();
+        String statsCommand = getDriver().getRunDir() + "/bin/redis-cli -p " + getRedisPort() + " info stats";
+
+        sshFeed = SshFeed.builder()
+                .entity(this)
+                .machine(machine)
+                .period(5, TimeUnit.SECONDS)
+                .poll(new SshPollConfig<Integer>(UPTIME)
+                        .command(getDriver().getRunDir() + "/bin/redis-cli -p " + getRedisPort() + " info server")
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("uptime_in_seconds")))
+                .poll(new SshPollConfig<Integer>(TOTAL_CONNECTIONS_RECEIVED)
+                        .command(statsCommand)
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("total_connections_received")))
+                .poll(new SshPollConfig<Integer>(TOTAL_COMMANDS_PROCESSED)
+                        .command(statsCommand)
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("total_commands_processed")))
+                .poll(new SshPollConfig<Integer>(EXPIRED_KEYS)
+                        .command(statsCommand)
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("expired_keys")))
+                .poll(new SshPollConfig<Integer>(EVICTED_KEYS)
+                        .command(statsCommand)
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("evicted_keys")))
+                .poll(new SshPollConfig<Integer>(KEYSPACE_HITS)
+                        .command(statsCommand)
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("keyspace_hits")))
+                .poll(new SshPollConfig<Integer>(KEYSPACE_MISSES)
+                        .command(statsCommand)
+                        .onFailureOrException(Functions.constant(-1))
+                        .onSuccess(infoFunction("keyspace_misses")))
+                .build();
+    }
+
+    /**
+     * Create a {@link Function} to retrieve a particular field value from a {@code redis-cli info}
+     * command.
+     * 
+     * @param field the info field to retrieve and convert
+     * @return a new function that converts a {@link SshPollValue} to an {@link Integer}
+     */
+    private static Function<SshPollValue, Integer> infoFunction(final String field) {
+        return Functions.compose(new Function<String, Integer>() {
+            @Override
+            public Integer apply(@Nullable String input) {
+                Optional<String> line = Iterables.tryFind(Splitter.on('\n').split(input), Predicates.containsPattern(field + ":"));
+                if (line.isPresent()) {
+                    String data = line.get().trim();
+                    int colon = data.indexOf(":");
+                    return Integer.parseInt(data.substring(colon + 1));
+                } else {
+                    throw new IllegalStateException("Data for field "+field+" not found: "+input);
+                }
+            }
+        }, SshValueFunctions.stdout());
+    }
+
+    @Override
+    public void disconnectSensors() {
+        disconnectServiceUpIsRunning();
+        if (sshFeed != null) sshFeed.stop();
+        super.disconnectSensors();
+    }
+
+    @Override
+    public Class<?> getDriverInterface() {
+        return RedisStoreDriver.class;
+    }
+
+    @Override
+    public RedisStoreDriver getDriver() {
+        return (RedisStoreDriver) super.getDriver();
+    }
+
+    @Override
+    public String getAddress() {
+        MachineLocation machine = getMachineOrNull();
+        return (machine != null) ? machine.getAddress().getHostAddress() : null;
+    }
+
+    @Override
+    public Integer getRedisPort() {
+        return getAttribute(RedisStore.REDIS_PORT);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java
new file mode 100644
index 0000000..c362e4e
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/redis/RedisStoreSshDriver.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.redis;
+
+import static java.lang.String.format;
+
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Entities;
+import brooklyn.location.Location;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.os.Os;
+import brooklyn.util.ssh.BashCommands;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Start a {@link RedisStore} in a {@link Location} accessible over ssh.
+ */
+public class RedisStoreSshDriver extends AbstractSoftwareProcessSshDriver implements RedisStoreDriver {
+
+    private static final Logger LOG = LoggerFactory.getLogger(RedisStoreSshDriver.class);
+
+    public RedisStoreSshDriver(RedisStoreImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("redis-%s", getVersion()))));
+    }
+
+    @Override
+    public void install() {
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+
+        MutableMap<String, String> installGccPackageFlags = MutableMap.of(
+                "onlyifmissing", "gcc",
+                "yum", "gcc",
+                "apt", "gcc",
+                "port", null);
+        MutableMap<String, String> installMakePackageFlags = MutableMap.of(
+                "onlyifmissing", "make",
+                "yum", "make",
+                "apt", "make",
+                "port", null);
+
+        List<String> commands = ImmutableList.<String>builder()
+                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
+                .add(BashCommands.INSTALL_TAR)
+                .add(BashCommands.INSTALL_CURL)
+                .add(BashCommands.installPackage(installGccPackageFlags, "redis-prerequisites-gcc"))
+                .add(BashCommands.installPackage(installMakePackageFlags, "redis-prerequisites-make"))
+                .add("tar xzfv " + saveAs)
+                .add(format("cd redis-%s", getVersion()))
+                .add("pushd deps")
+                .add("make lua hiredis linenoise")
+                .add("popd")
+                .add("make clean && make")
+                .build();
+
+        newScript(INSTALLING)
+                .failOnNonZeroResultCode()
+                .body.append(commands).execute();
+    }
+
+    @Override
+    public void customize() {
+        newScript(MutableMap.of("usePidFile", false), CUSTOMIZING)
+                .failOnNonZeroResultCode()
+                .body.append(
+                        format("cd %s", getExpandedInstallDir()),
+                        "make install PREFIX="+getRunDir())
+                .execute();
+
+        copyTemplate(getEntity().getConfig(RedisStore.REDIS_CONFIG_TEMPLATE_URL), "redis.conf");
+    }
+
+    @Override
+    public void launch() {
+        // TODO Should we redirect stdout/stderr: format(" >> %s/console 2>&1 </dev/null &", getRunDir())
+        newScript(MutableMap.of("usePidFile", false), LAUNCHING)
+                .failOnNonZeroResultCode()
+                .body.append("./bin/redis-server redis.conf")
+                .execute();
+    }
+
+    @Override
+    public boolean isRunning() {
+        return newScript(MutableMap.of("usePidFile", false), CHECK_RUNNING)
+                .body.append("./bin/redis-cli -p " + getEntity().getAttribute(RedisStore.REDIS_PORT) + " ping > /dev/null")
+                .execute() == 0;
+    }
+
+    /**
+     * Restarts redis with the current configuration.
+     */
+    @Override
+    public void stop() {
+        int exitCode = newScript(MutableMap.of("usePidFile", false), STOPPING)
+                .body.append("./bin/redis-cli -p " + getEntity().getAttribute(RedisStore.REDIS_PORT) + " shutdown")
+                .execute();
+        // TODO: Good enough? Will cause warnings when trying to stop a server that is already not running.
+        if (exitCode != 0) {
+            LOG.warn("Unexpected exit code when stopping {}: {}", entity, exitCode);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakCluster.java
new file mode 100644
index 0000000..99df1a2
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakCluster.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import java.net.URI;
+import java.util.Map;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.time.Duration;
+
+import com.google.common.reflect.TypeToken;
+
+@Catalog(name="Riak Cluster", description="Riak is a distributed NoSQL key-value data store that offers "
+        + "extremely high availability, fault tolerance, operational simplicity and scalability.")
+@ImplementedBy(RiakClusterImpl.class)
+public interface RiakCluster extends DynamicCluster {
+
+    @SuppressWarnings("serial")
+    AttributeSensor<Map<Entity, String>> RIAK_CLUSTER_NODES = Sensors.newSensor(
+            new TypeToken<Map<Entity, String>>() {}, 
+            "riak.cluster.nodes", "Names of all active Riak nodes in the cluster <Entity,Riak Name>");
+
+    @SetFromFlag("delayBeforeAdvertisingCluster")
+    ConfigKey<Duration> DELAY_BEFORE_ADVERTISING_CLUSTER = ConfigKeys.newConfigKey(Duration.class, "riak.cluster.delayBeforeAdvertisingCluster", "Delay after cluster is started before checking and advertising its availability", Duration.seconds(2 * 60));
+
+    AttributeSensor<Boolean> IS_CLUSTER_INIT = Sensors.newBooleanSensor("riak.cluster.isClusterInit", "Flag to determine if the cluster was already initialized");
+
+    AttributeSensor<Boolean> IS_FIRST_NODE_SET = Sensors.newBooleanSensor("riak.cluster.isFirstNodeSet", "Flag to determine if the first node has been set");
+
+    AttributeSensor<String> NODE_LIST = Sensors.newStringSensor("riak.cluster.nodeList", "List of nodes (including ports), comma separated");
+
+    AttributeSensor<String> NODE_LIST_PB_PORT = Sensors.newStringSensor("riak.cluster.nodeListPbPort", "List of nodes (including ports for riak db clients), comma separated");
+
+    AttributeSensor<URI> RIAK_CONSOLE_URI = Attributes.MAIN_URI;
+
+    AttributeSensor<Integer> NODE_GETS_1MIN_PER_NODE = Sensors.newIntegerSensor("riak.node.gets.1m.perNode", "Gets in the last minute, averaged across cluster");
+    AttributeSensor<Integer> NODE_PUTS_1MIN_PER_NODE = Sensors.newIntegerSensor("riak.node.puts.1m.perNode", "Puts in the last minute, averaged across cluster");
+    AttributeSensor<Integer> NODE_OPS_1MIN_PER_NODE = Sensors.newIntegerSensor("riak.node.ops.1m.perNode", "Sum of node gets and puts in the last minute, averaged across cluster");
+
+}


[10/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml b/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml
deleted file mode 100644
index 045bb45..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml
+++ /dev/null
@@ -1,644 +0,0 @@
-[#ftl]
-#
-# Cassandra storage config YAML 
-
-# NOTE:
-#   See http://wiki.apache.org/cassandra/StorageConfiguration for
-#   full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: '${entity.clusterName}'
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to 
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-num_tokens: ${entity.numTokensPerNode?c}
-
-# If you haven't specified num_tokens, or have set it to the default of 1 then
-# you should always specify InitialToken when setting up a production
-# cluster for the first time, and often when adding capacity later.
-# The principle is that each node should be given an equal slice of
-# the token ring; see http://wiki.apache.org/cassandra/Operations
-# for more details.
-#
-# If blank, Cassandra will request a token bisecting the range of
-# the heaviest-loaded existing node.  If there is no load information
-# available, such as is the case with a new cluster, it will pick
-# a random token, which will lead to hot spots.
-initial_token: ${entity.tokensAsString}
-
-# See http://wiki.apache.org/cassandra/HintedHandoff
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated.  After it has been dead this long, hints will be dropped.
-max_hint_window_in_ms: 10800000 # 3 hours
-# throttle in KB's per second, per delivery thread
-hinted_handoff_throttle_in_kb: 1024
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# The following setting populates the page cache on memtable flush and compaction
-# WARNING: Enable this setting only when the whole node's data fits in memory.
-# Defaults to: false
-# populate_io_cache_on_flush: false
-
-# authentication backend, implementing IAuthenticator; used to identify users
-authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-
-# authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
-
-# The partitioner is responsible for distributing rows (by key) across
-# nodes in the cluster.  Any IPartitioner may be used, including your
-# own as long as it is on the classpath.  Out of the box, Cassandra
-# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
-# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
-# 
-# - RandomPartitioner distributes rows across the cluster evenly by md5.
-#   This is the default prior to 1.2 and is retained for compatibility.
-# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
-#   Hash Function instead of md5.  When in doubt, this is the best option.
-# - ByteOrderedPartitioner orders rows lexically by key bytes.  BOP allows
-#   scanning rows in key order, but the ordering can generate hot spots
-#   for sequential insertion workloads.
-# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
-# - keys in a less-efficient format and only works with keys that are
-#   UTF8-encoded Strings.
-# - CollatingOPP colates according to EN,US rules rather than lexical byte
-#   ordering.  Use this as an example if you need custom collation.
-#
-# See http://wiki.apache.org/cassandra/Operations for more on
-# partitioners and token selection.
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# directories where Cassandra should store data on disk.
-data_file_directories:
-    - ${driver.runDir}/data
-
-# commit log
-commitlog_directory: ${driver.runDir}/commitlog
-
-# policy for data disk failures:
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       still inspectable via JMX.
-# best_effort: stop using the failed disk and respond to requests based on
-#              remaining available sstables.  This means you WILL see obsolete
-#              data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must store the whole values of
-# its rows, so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# safe the keys cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Maximum size of the row cache in memory.
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should
-# safe the row cache. Caches are saved to saved_caches_directory as specified
-# in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save
-# Disabled by default, meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# The provider for the row cache to use.
-#
-# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
-#
-# SerializingCacheProvider serialises the contents of the row and stores
-# it in native memory, i.e., off the JVM Heap. Serialized rows take
-# significantly less memory than "live" rows in the JVM, so you can cache
-# more rows in a given memory footprint.  And storing the cache off-heap
-# means you can use smaller heap sizes, reducing the impact of GC pauses.
-#
-# It is also valid to specify the fully-qualified class name to a class
-# that implements org.apache.cassandra.cache.IRowCacheProvider.
-#
-# Defaults to SerializingCacheProvider
-row_cache_provider: SerializingCacheProvider
-
-# saved caches
-saved_caches_directory: ${driver.runDir}/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch." 
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk.  It will wait up to
-# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
-# performing the sync.
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 50
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments.  A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentally from each columnfamily in the system) has been 
-# flushed to sstables.  
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-          - seeds: "${entity.seeds}"
-
-# emergency pressure valve: each time heap usage after a full (CMS)
-# garbage collection is above this fraction of the max, Cassandra will
-# flush the largest memtables.  
-#
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-#
-# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
-# it is most effective under light to moderate load, or read-heavy
-# workloads; under truly massive write load, it will often be too
-# little, too late.
-flush_largest_memtables_at: 0.75
-
-# emergency pressure valve #2: the first time heap usage after a full
-# (CMS) garbage collection is above this fraction of the max,
-# Cassandra will reduce cache maximum _capacity_ to the given fraction
-# of the current _size_.  Should usually be set substantially above
-# flush_largest_memtables_at, since that will have less long-term
-# impact on the system.  
-# 
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-
-# Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.
-# If omitted, Cassandra will set it to 1/3 of the heap.
-# memtable_total_space_in_mb: 2048
-
-# Total space to use for commitlogs.  Since commitlog segments are
-# mmapped, and hence use up address space, the default size is 32
-# on 32-bit JVMs, and 1024 on 64-bit JVMs.
-#
-# If space gets above this value (it will round up to the next nearest
-# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.  So a small total commitlog space will tend
-# to cause more flush activity on less-active columnfamilies.
-# commitlog_total_space_in_mb: 4096
-
-# This sets the amount of memtable flush writer threads.  These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. If you have a large heap and many data directories,
-# you can increase this value for better flush performance.
-# By default this will be set to the amount of data directories defined.
-#memtable_flush_writers: 1
-
-# the number of full memtables to allow pending flush, that is,
-# waiting for a writer thread.  At a minimum, this should be set to
-# the maximum number of secondary indexes created on a single CF.
-memtable_flush_queue_size: 4
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSD:s; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-storage_port: ${entity.gossipPort?c}
-
-# SSL port, for encrypted communication.  Unused unless enabled in
-# encryption_options
-ssl_storage_port: ${entity.sslGossipPort?c}
-
-# Address to bind to and tell other Cassandra nodes to connect to. You
-# _must_ change this if you want multiple nodes to be able to
-# communicate!
-# 
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing *if* the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting this to 0.0.0.0 is always wrong.
-listen_address: ${entity.listenAddress}
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-broadcast_address: ${entity.broadcastAddress}
-
-# Whether to start the native transport server.
-# Currently, only the thrift server is started by default because the native
-# transport is considered beta.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-native_transport_port: ${entity.nativeTransportPort?c}
-# The minimum and maximum threads for handling requests when the native
-# transport is used. The meaning is those is similar to the one of
-# rpc_min_threads and rpc_max_threads, though the default differ slightly and
-# are the ones below:
-#native_transport_min_threads: 16
-#native_transport_max_threads: 128
-
-
-# Whether to start the thrift rpc server.
-start_rpc: true
-# The address to bind the Thrift RPC service to -- clients connect
-# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
-# you want Thrift to listen on all interfaces.
-# 
-# Leaving this blank has the same effect it does for ListenAddress,
-# (i.e. it will be based on the configured hostname of the node).
-rpc_address: ${entity.rpcAddress}
-# port for Thrift to listen for clients on
-rpc_port: ${entity.thriftPort?c}
-
-# enable or disable keepalive on rpc connections
-rpc_keepalive: true
-
-# Cassandra provides three out-of-the-box options for the RPC Server:
-#
-# sync  -> One thread per thrift connection. For a very large number of clients, memory
-#          will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
-#          per thread, and that will correspond to your use of virtual memory (but physical memory
-#          may be limited depending on use of stack space).
-#
-# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
-#          asynchronously using a small number of threads that does not vary with the amount
-#          of thrift clients (and thus scales well to many clients). The rpc requests are still
-#          synchronous (one thread per active request).
-#
-# The default is sync because on Windows hsha is about 30% slower.  On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum field length).
-thrift_framed_transport_size_in_mb: 15
-
-# The max length of a thrift message, including all fields and
-# internal thrift overhead.
-thrift_max_message_length_in_mb: 16
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# Keyspace data.  Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction.  Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you.  Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true 
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# Add column indexes to a row after its contents reach this size.
-# Increase if your column values are large, or if you have a very large
-# number of columns.  The competing causes are, Cassandra has to
-# deserialize this much of the row to read a single column, so you want
-# it to be small - at least if you do many partial-row reads - but all
-# the index data is read for each access, so you don't want to generate
-# that wastefully either.
-column_index_size_in_kb: 64
-
-# Size limit for rows being compacted in memory.  Larger rows will spill
-# over to disk and use a slower two-pass compaction process.  A message
-# will be logged specifying the row key.
-in_memory_compaction_limit_in_mb: 64
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair.  Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the number of cores.
-# Uncomment to make compaction mono-threaded, the pre-0.8 default.
-#concurrent_compactors: 1
-
-# Multi-threaded compaction. When enabled, each compaction will use
-# up to one thread per core, plus one thread per sstable being merged.
-# This is usually only useful for SSD-based hardware: otherwise, 
-# your concern is usually to get compaction to do LESS i/o (see:
-# compaction_throughput_mb_per_sec), not more.
-multithreaded_compaction: false
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Track cached row keys during compaction, and re-cache their new
-# positions in the compacted sstable.  Disable if you use really large
-# key caches.
-compaction_preheat_key_cache: true
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 400 Mbps or 50 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 400
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 10000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 10000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts, If disabled cassandra will assuming the request
-# was forwarded to the replica instantly by the coordinator
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This *can* involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 0, which never timeout streams.
-# streaming_socket_timeout_in_ms: 0
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch.  The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-#   requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-#   correlated failures. It does this by grouping machines into
-#   "datacenters" and "racks."  Cassandra will do its best not to have
-#   more than one replica on the same "rack" (which may not actually
-#   be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
-#    Treats Strategy order as proximity. This improves cache locality
-#    when disabling read repair, which can further improve throughput.
-#    Only appropriate for single-datacenter deployments.
-#  - PropertyFileSnitch:
-#    Proximity is determined by rack and data center, which are
-#    explicitly configured in cassandra-topology.properties.
-#  - GossipingPropertyFileSnitch
-#    The rack and datacenter for the local node are defined in
-#    cassandra-rackdc.properties and propagated to other nodes via gossip.  If
-#    cassandra-topology.properties exists, it is used as a fallback, allowing
-#    migration from the PropertyFileSnitch.
-#  - RackInferringSnitch:
-#    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's
-#    IP address, respectively.  Unless this happens to match your
-#    deployment conventions (as it did Facebook's), this is best used
-#    as an example of writing a custom Snitch class.
-#  - Ec2Snitch:
-#    Appropriate for EC2 deployments in a single Region.  Loads Region
-#    and Availability Zone information from the EC2 API. The Region is
-#    treated as the Datacenter, and the Availability Zone as the rack.
-#    Only private IPs are used, so this will not work across multiple
-#    Regions.
-#  - Ec2MultiRegionSnitch:
-#    Uses public IPs as broadcast_address to allow cross-region
-#    connectivity.  (Thus, you should set seed addresses to the public
-#    IP as well.) You will need to open the storage_port or
-#    ssl_storage_port on the public IP firewall.  (For intra-Region
-#    traffic, Cassandra will switch to the private IP after
-#    establishing a connection.)
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: ${driver.endpointSnitchName}
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100 
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it.  This is
-# expressed as a double which represents a percentage.  Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-# request_scheduler_id -- An identifer based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# index_interval controls the sampling of entries from the primrary
-# row index in terms of space versus time.  The larger the interval,
-# the smaller and less effective the sampling will be.  In technicial
-# terms, the interval coresponds to the number of index entries that
-# are skipped between taking each sample.  All the sampled entries
-# must fit in memory.  Generally, a value between 128 and 512 here
-# coupled with a large key cache size on CFs results in the best trade
-# offs.  This value is not often changed, however if you have many
-# very small rows (many to an OS page), then increasing this will
-# often lower memory usage without a impact on performance.
-index_interval: 128
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore.  For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
-
-# enable or disable client/server encryption.
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# can be:  all  - all traffic is compressed
-#          dc   - traffic between different datacenters is compressed
-#          none - nothing is compressed.
-internode_compression: all

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml b/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml
deleted file mode 100644
index 518379c..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml
+++ /dev/null
@@ -1,688 +0,0 @@
-[#ftl]
-#
-# Cassandra storage config YAML 
-
-# NOTE:
-#   See http://wiki.apache.org/cassandra/StorageConfiguration for
-#   full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: '${entity.clusterName}'
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to 
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-num_tokens: ${entity.numTokensPerNode?c}
-
-# initial_token allows you to specify tokens manually.  While you can use it with
-# vnodes (num_tokens > 1, above) -- in which case you should provide a 
-# comma-separated list -- it's primarily used when adding nodes to legacy clusters 
-# that do not have vnodes enabled.
-# initial_token: ${entity.tokensAsString}
-
-# May either be "true" or "false" to enable globally, or contain a list
-# of data centers to enable per-datacenter.
-# hinted_handoff_enabled: DC1,DC2
-# See http://wiki.apache.org/cassandra/HintedHandoff
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated.  After it has been dead this long, new hints for it will not be
-# created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
-# Maximum throttle in KBs per second, per delivery thread.  This will be
-# reduced proportionally to the number of nodes in the cluster.  (If there
-# are two nodes in the cluster, each delivery thread will use the maximum
-# rate; if there are three, each will throttle to half of the maximum,
-# since we expect two nodes to be delivering hints simultaneously.)
-hinted_handoff_throttle_in_kb: 1024
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# Maximum throttle in KBs per second, total. This will be
-# reduced proportionally to the number of nodes in the cluster.
-batchlog_replay_throttle_in_kb: 1024
-
-# Authentication backend, implementing IAuthenticator; used to identify users
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
-# PasswordAuthenticator}.
-#
-# - AllowAllAuthenticator performs no checks - set it to disable authentication.
-# - PasswordAuthenticator relies on username/password pairs to authenticate
-#   users. It keeps usernames and hashed passwords in system_auth.credentials table.
-#   Please increase system_auth keyspace replication factor if you use this authenticator.
-authenticator: AllowAllAuthenticator
-
-# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
-# CassandraAuthorizer}.
-#
-# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
-# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
-#   increase system_auth keyspace replication factor if you use this authorizer.
-authorizer: AllowAllAuthorizer
-
-# Validity period for permissions cache (fetching permissions can be an
-# expensive operation depending on the authorizer, CassandraAuthorizer is
-# one example). Defaults to 2000, set to 0 to disable.
-# Will be disabled automatically for AllowAllAuthorizer.
-permissions_validity_in_ms: 2000
-
-# The partitioner is responsible for distributing groups of rows (by
-# partition key) across nodes in the cluster.  You should leave this
-# alone for new clusters.  The partitioner can NOT be changed without
-# reloading all data, so when upgrading you should set this to the
-# same partitioner you were already using.
-#
-# Besides Murmur3Partitioner, partitioners included for backwards
-# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
-# OrderPreservingPartitioner.
-#
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# Directories where Cassandra should store data on disk.  Cassandra
-# will spread data evenly across them, subject to the granularity of
-# the configured compaction strategy.
-data_file_directories:
-    - ${driver.runDir}/data
-
-# commit log
-commitlog_directory: ${driver.runDir}/commitlog
-
-# policy for data disk failures:
-# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       can still be inspected via JMX.
-# best_effort: stop using the failed disk and respond to requests based on
-#              remaining available sstables.  This means you WILL see obsolete
-#              data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# policy for commit disk failures:
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       can still be inspected via JMX.
-# stop_commit: shutdown the commit log, letting writes collect but 
-#              continuing to service reads, as in pre-2.0.5 Cassandra
-# ignore: ignore fatal errors and let the batches fail
-commit_failure_policy: stop
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must contain the entire row,
-# so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the key cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Maximum size of the row cache in memory.
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should
-# safe the row cache. Caches are saved to saved_caches_directory as specified
-# in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save
-# Disabled by default, meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# The off-heap memory allocator.  Affects storage engine metadata as
-# well as caches.  Experiments show that JEMAlloc saves some memory
-# than the native GCC allocator (i.e., JEMalloc is more
-# fragmentation-resistant).
-# 
-# Supported values are: NativeAllocator, JEMallocAllocator
-#
-# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
-# modify cassandra-env.sh as directed in the file.
-#
-# Defaults to NativeAllocator
-# memory_allocator: NativeAllocator
-
-# saved caches
-saved_caches_directory: ${driver.runDir}/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch." 
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk.  It will wait up to
-# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
-# performing the sync.
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 50
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.  By default this allows 1024*(CPU cores) pending
-# entries on the commitlog queue.  If you are writing very large blobs,
-# you should reduce that; 16*cores works reasonably well for 1MB blobs.
-# It should be at least as large as the concurrent_writes setting.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-# commitlog_periodic_queue_size:
-
-# The size of the individual commitlog file segments.  A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentially from each columnfamily in the system) has been
-# flushed to sstables.  
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-          - seeds: "${entity.seeds}"
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-
-# Total memory to use for sstable-reading buffers.  Defaults to
-# the smaller of 1/4 of heap or 512MB.
-# file_cache_size_in_mb: 512
-
-# Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.
-# If omitted, Cassandra will set it to 1/4 of the heap.
-# memtable_total_space_in_mb: 2048
-
-# Total space to use for commitlogs.  Since commitlog segments are
-# mmapped, and hence use up address space, the default size is 32
-# on 32-bit JVMs, and 1024 on 64-bit JVMs.
-#
-# If space gets above this value (it will round up to the next nearest
-# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.  So a small total commitlog space will tend
-# to cause more flush activity on less-active columnfamilies.
-# commitlog_total_space_in_mb: 4096
-
-# This sets the amount of memtable flush writer threads.  These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. If you have a large heap and many data directories,
-# you can increase this value for better flush performance.
-# By default this will be set to the amount of data directories defined.
-#memtable_flush_writers: 1
-
-# the number of full memtables to allow pending flush, that is,
-# waiting for a writer thread.  At a minimum, this should be set to
-# the maximum number of secondary indexes created on a single CF.
-memtable_flush_queue_size: 4
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSDs; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-storage_port: ${entity.gossipPort?c}
-
-# SSL port, for encrypted communication.  Unused unless enabled in
-# encryption_options
-ssl_storage_port: ${entity.sslGossipPort?c}
-
-# Address to bind to and tell other Cassandra nodes to connect to. You
-# _must_ change this if you want multiple nodes to be able to
-# communicate!
-# 
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing _if_ the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting this to 0.0.0.0 is always wrong.
-listen_address: ${entity.listenAddress}
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-broadcast_address: ${entity.broadcastAddress}
-
-# Internode authentication backend, implementing IInternodeAuthenticator;
-# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
-
-# Whether to start the native transport server.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-native_transport_port: ${entity.nativeTransportPort?c}
-# The maximum threads for handling requests when the native transport is used.
-# This is similar to rpc_max_threads though the default differs slightly (and
-# there is no native_transport_min_threads, idle threads will always be stopped
-# after 30 seconds).
-# native_transport_max_threads: 128
-#
-# The maximum size of allowed frame. Frame (requests) larger than this will
-# be rejected as invalid. The default is 256MB.
-# native_transport_max_frame_size_in_mb: 256
-
-# Whether to start the thrift rpc server.
-start_rpc: true
-
-# The address to bind the Thrift RPC service and native transport
-# server -- clients connect here.
-#
-# Leaving this blank has the same effect it does for ListenAddress,
-# (i.e. it will be based on the configured hostname of the node).
-#
-# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
-# here if you want to listen on all interfaces, but that will break clients 
-# that rely on node auto-discovery.
-rpc_address: ${entity.rpcAddress}
-# port for Thrift to listen for clients on
-rpc_port: ${entity.thriftPort?c}
-
-# enable or disable keepalive on rpc connections
-rpc_keepalive: true
-
-# Cassandra provides two out-of-the-box options for the RPC Server:
-#
-# sync  -> One thread per thrift connection. For a very large number of clients, memory
-#          will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
-#          per thread, and that will correspond to your use of virtual memory (but physical memory
-#          may be limited depending on use of stack space).
-#
-# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
-#          asynchronously using a small number of threads that does not vary with the amount
-#          of thrift clients (and thus scales well to many clients). The rpc requests are still
-#          synchronous (one thread per active request).
-#
-# The default is sync because on Windows hsha is about 30% slower.  On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# See:
-# /proc/sys/net/core/wmem_max
-# /proc/sys/net/core/rmem_max
-# /proc/sys/net/ipv4/tcp_wmem
-# /proc/sys/net/ipv4/tcp_wmem
-# and: man tcp
-# internode_send_buff_size_in_bytes:
-# internode_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum message length).
-thrift_framed_transport_size_in_mb: 15
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# keyspace data.  Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction.  Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you.  Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true 
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# When executing a scan, within or across a partition, we need to keep the
-# tombstones seen in memory so we can return them to the coordinator, which
-# will use them to make sure other replicas also know about the deleted rows.
-# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
-# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
-# Adjust the thresholds here if you understand the dangers and want to
-# scan more tombstones anyway.  These thresholds may also be adjusted at runtime
-# using the StorageService mbean.
-tombstone_warn_threshold: 1000
-tombstone_failure_threshold: 100000
-
-# Add column indexes to a row after its contents reach this size.
-# Increase if your column values are large, or if you have a very large
-# number of columns.  The competing causes are, Cassandra has to
-# deserialize this much of the row to read a single column, so you want
-# it to be small - at least if you do many partial-row reads - but all
-# the index data is read for each access, so you don't want to generate
-# that wastefully either.
-column_index_size_in_kb: 64
-
-
-# Log WARN on any batch size exceeding this value. 5kb per batch by default.
-# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
-batch_size_warn_threshold_in_kb: 5
-
-# Size limit for rows being compacted in memory.  Larger rows will spill
-# over to disk and use a slower two-pass compaction process.  A message
-# will be logged specifying the row key.
-in_memory_compaction_limit_in_mb: 64
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair.  Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the number of cores.
-# Uncomment to make compaction mono-threaded, the pre-0.8 default.
-#concurrent_compactors: 1
-
-# Multi-threaded compaction. When enabled, each compaction will use
-# up to one thread per core, plus one thread per sstable being merged.
-# This is usually only useful for SSD-based hardware: otherwise, 
-# your concern is usually to get compaction to do LESS i/o (see:
-# compaction_throughput_mb_per_sec), not more.
-multithreaded_compaction: false
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Track cached row keys during compaction, and re-cache their new
-# positions in the compacted sstable.  Disable if you use really large
-# key caches.
-compaction_preheat_key_cache: true
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 200 Mbps or 25 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 200
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 5000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 2000
-# How long a coordinator should continue to retry a CAS operation
-# that contends with other proposals for the same row
-cas_contention_timeout_in_ms: 1000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts.  If disabled, replicas will assume that requests
-# were forwarded to them instantly by the coordinator, which means that
-# under overload conditions we will waste that much extra time processing 
-# already-timed-out requests.
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This _can_ involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 0, which never timeout streams.
-# streaming_socket_timeout_in_ms: 0
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch.  The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-#   requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-#   correlated failures. It does this by grouping machines into
-#   "datacenters" and "racks."  Cassandra will do its best not to have
-#   more than one replica on the same "rack" (which may not actually
-#   be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
-#    Treats Strategy order as proximity. This can improve cache
-#    locality when disabling read repair.  Only appropriate for
-#    single-datacenter deployments.
-#  - GossipingPropertyFileSnitch
-#    This should be your go-to snitch for production use.  The rack
-#    and datacenter for the local node are defined in
-#    cassandra-rackdc.properties and propagated to other nodes via
-#    gossip.  If cassandra-topology.properties exists, it is used as a
-#    fallback, allowing migration from the PropertyFileSnitch.
-#  - PropertyFileSnitch:
-#    Proximity is determined by rack and data center, which are
-#    explicitly configured in cassandra-topology.properties.
-#  - Ec2Snitch:
-#    Appropriate for EC2 deployments in a single Region. Loads Region
-#    and Availability Zone information from the EC2 API. The Region is
-#    treated as the datacenter, and the Availability Zone as the rack.
-#    Only private IPs are used, so this will not work across multiple
-#    Regions.
-#  - Ec2MultiRegionSnitch:
-#    Uses public IPs as broadcast_address to allow cross-region
-#    connectivity.  (Thus, you should set seed addresses to the public
-#    IP as well.) You will need to open the storage_port or
-#    ssl_storage_port on the public IP firewall.  (For intra-Region
-#    traffic, Cassandra will switch to the private IP after
-#    establishing a connection.)
-#  - RackInferringSnitch:
-#    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's IP
-#    address, respectively.  Unless this happens to match your
-#    deployment conventions, this is best used as an example of
-#    writing a custom Snitch class and is provided in that spirit.
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: ${driver.endpointSnitchName}
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100 
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it.  This is
-# expressed as a double which represents a percentage.  Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-# request_scheduler_id -- An identifier based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore.  For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-    # require_client_auth: false
-
-# enable or disable client/server encryption.
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # require_client_auth: false
-    # Set trustore and truststore_password if require_client_auth is true
-    # truststore: conf/.truststore
-    # truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# can be:  all  - all traffic is compressed
-#          dc   - traffic between different datacenters is compressed
-#          none - nothing is compressed.
-internode_compression: all
-
-# Enable or disable tcp_nodelay for inter-dc communication.
-# Disabling it will result in larger (but fewer) network packets being sent,
-# reducing overhead from the TCP protocol itself, at the cost of increasing
-# latency if you block for cross-datacenter responses.
-inter_dc_tcp_nodelay: false
-
-# Enable or disable kernel page cache preheating from contents of the key cache after compaction.
-# When enabled it would preheat only first "page" (4KB) of each row to optimize
-# for sequential access. Note: This could be harmful for fat rows, see CASSANDRA-4937
-# for further details on that topic.
-preheat_kernel_page_cache: false

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar b/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar
deleted file mode 100644
index b1c1b94..0000000
Binary files a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt b/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt
deleted file mode 100644
index 205b18d..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-The file cassandra-multicloud-snitch.jar is a snitch implementation
-that handles Cassandra datacenters in different cloud providers.
-
-The source code for cassandra-multicloud-snitch.jar is in sandbox/cassandra-multicloud-snitch.
-
-The source will be contributed to the Cassandra project; when it is available in the 
-Cassandra distro (and when we don't want to give backwards compatibility support for
-older Cassandra versions), then we can delete it from Brooklyn.
-
-The jar can be uploaded to a Cassandra Node as part of deployment, for if
-this multi-cloud snitch is desired.
-
-Under Apache conventions, binary files are not part of the source
-release. If you are using the source release, you may add this file
-by copying it from the master repository, which is accessible on the
-web at https://github.com/apache/incubator-brooklyn

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties b/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties
deleted file mode 100644
index 8fc323a..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-# See http://www.datastax.com/docs/1.1/cluster_architecture/replication
-# Note publicip/privateip added for use by custom MultiCloudSnitch
-dc=${entity.datacenterName}
-rack=${entity.rackName}
-publicip=${entity.publicIp}
-privateip=${entity.privateIp}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/couchbase/pillowfight.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/couchbase/pillowfight.yaml b/software/nosql/src/main/resources/brooklyn/entity/nosql/couchbase/pillowfight.yaml
deleted file mode 100644
index 01f4027..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/couchbase/pillowfight.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-services:
-- type: brooklyn.entity.basic.VanillaSoftwareProcess
-  name: CBC Pillowfight
-  launch.command: |
-    sudo wget -O/etc/apt/sources.list.d/couchbase.list http://packages.couchbase.com/ubuntu/couchbase-ubuntu1204.list
-    sudo wget -O- http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
-    sudo apt-get update
-    sudo apt-get install -y libcouchbase2-libevent libcouchbase-dev libcouchbase2-bin
-  provisioning.properties:
-    # CentOS requires a different launch command, see below
-    osFamily: ubuntu
-  checkRunning.command: ""
-  stop.command: ""
-    
-  brooklyn.config:
-    base_url: http://127.0.0.1:8091/
-    
-  brooklyn.initializers:
-  - type: brooklyn.entity.software.ssh.SshCommandEffector
-    brooklyn.config:
-      name: pillow_fight
-      description: runs cbc pillowfight
-      command: |
-        cbc-pillowfight -U ${base_url}${bucket} \
-          `if [ -n "$username" ]; then echo -u $username; fi` \
-          `if [ -n "$password" ]; then echo -P $password; fi` \
-          `if [ -n "$num_cycles" ]; then echo -c $num_cycles; fi` \
-          `if [ -n "$min_size" ]; then echo -m $min_size; fi` \
-          `if [ -n "$max_size" ]; then echo -M $max_size; fi` \
-          `if [ -n "$ratio" ]; then echo -r $ratio; fi`
-      parameters:
-        base_url:
-          description: base URL (http or couchbases) and list of hosts/port to connect to, including trailing slash
-          defaultValue: $brooklyn:config("base_url")
-        bucket:
-          description: bucket to use
-          defaultValue: default
-        username:
-          description: username to authenticate to the bucket
-        password:
-          description: password to authenticate to the bucket
-        num_cycles:
-          description: number of iterations to run
-          defaultValue: 1
-        min_size:
-          description: minimum payload size
-          defaultValue: 50
-        max_size:
-          description: maximum payload size
-          defaultValue: 5120
-        ratio:
-          description: "specify SET/GET command ratio (default: 33, i.e. 33% SETs and 67% GETs)"
-          defaultValue: 33
-
-# For CentOS, use the following launch command:
-#  launch.command: |
-#    sudo wget -O/etc/yum.repos.d/couchbase.repo http://packages.couchbase.com/rpm/couchbase-centos55-x86_64.repo
-#    sudo yum check-update
-#    sudo yum install -y libcouchbase2-libevent libcouchbase-devel libcouchbase2-bin

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.ini
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.ini b/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.ini
deleted file mode 100644
index fe58a0c..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.ini
+++ /dev/null
@@ -1,17 +0,0 @@
-[#ftl]
-#
-[couchdb]
-database_dir = ${driver.runDir}
-view_index_dir = ${driver.runDir}
-uri_file = ${driver.runDir}/couch.uri
-
-[httpd]
-port = ${entity.httpPort?c}
-bind_address = 0.0.0.0
-
-[ssl]
-port = ${entity.httpsPort?c}
-
-[log]
-file = ${driver.runDir}/couch.log
-level = info
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.uri
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.uri b/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.uri
deleted file mode 100644
index 0997fc7..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/couchdb/couch.uri
+++ /dev/null
@@ -1,2 +0,0 @@
-[#ftl]
-http://${driver.hostname}:${entity.httpPort?c}/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default-mongod.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default-mongod.conf b/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default-mongod.conf
deleted file mode 100644
index e7f02fd..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default-mongod.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-# Default configuration for a mongod process. The use of noprealloc and smallfiles
-# mean this configuration file should not be used in a production environment.
-
-quiet = false
-
-noprealloc = true
-smallfiles = true

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default.conf b/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default.conf
deleted file mode 100644
index c3c279f..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/mongodb/default.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-# Default Brooklyn configuration for a MongoDB process.
-quiet = false

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/redis.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/redis.conf b/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/redis.conf
deleted file mode 100644
index 0554eb2..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/redis.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-# Redis configuration file
-
-# Start as daemon
-daemonize yes
-pidfile ${driver.runDir}/pid.txt
-
-# Set port and optional bind address
-port ${entity.redisPort?c}
-# bind ${entity.address}
-
-# Configure logging
-loglevel verbose
-logfile ${driver.runDir}/redis.log

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/slave.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/slave.conf b/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/slave.conf
deleted file mode 100644
index 3a9e64a..0000000
--- a/software/nosql/src/main/resources/brooklyn/entity/nosql/redis/slave.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-# Redis configuration file
-
-# Start as daemon
-daemonize yes
-pidfile ${driver.runDir}/pid.txt
-
-# Set port and optional bind address
-port ${entity.redisPort?c}
-# bind ${entity.address}
-
-# Slave configuration
-slaveof ${entity.master.address} ${entity.master.redisPort?c}
-
-# Configure logging
-loglevel verbose
-logfile ${driver.runDir}/redis.log


[08/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml
new file mode 100644
index 0000000..518379c
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml
@@ -0,0 +1,688 @@
+[#ftl]
+#
+# Cassandra storage config YAML 
+
+# NOTE:
+#   See http://wiki.apache.org/cassandra/StorageConfiguration for
+#   full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: '${entity.clusterName}'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to 
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: ${entity.numTokensPerNode?c}
+
+# initial_token allows you to specify tokens manually.  While you can use it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a 
+# comma-separated list -- it's primarily used when adding nodes to legacy clusters 
+# that do not have vnodes enabled.
+# initial_token: ${entity.tokensAsString}
+
+# May either be "true" or "false" to enable globally, or contain a list
+# of data centers to enable per-datacenter.
+# hinted_handoff_enabled: DC1,DC2
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated.  After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+# Maximum throttle in KBs per second, per delivery thread.  This will be
+# reduced proportionally to the number of nodes in the cluster.  (If there
+# are two nodes in the cluster, each delivery thread will use the maximum
+# rate; if there are three, each will throttle to half of the maximum,
+# since we expect two nodes to be delivering hints simultaneously.)
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# Maximum throttle in KBs per second, total. This will be
+# reduced proportionally to the number of nodes in the cluster.
+batchlog_replay_throttle_in_kb: 1024
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+#   users. It keeps usernames and hashed passwords in system_auth.credentials table.
+#   Please increase system_auth keyspace replication factor if you use this authenticator.
+authenticator: AllowAllAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+#   increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: AllowAllAuthorizer
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster.  You should leave this
+# alone for new clusters.  The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Besides Murmur3Partitioner, partitioners included for backwards
+# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
+# OrderPreservingPartitioner.
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Directories where Cassandra should store data on disk.  Cassandra
+# will spread data evenly across them, subject to the granularity of
+# the configured compaction strategy.
+data_file_directories:
+    - ${driver.runDir}/data
+
+# commit log
+commitlog_directory: ${driver.runDir}/commitlog
+
+# policy for data disk failures:
+# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+#       can still be inspected via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+#              remaining available sstables.  This means you WILL see obsolete
+#              data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# policy for commit disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+#       can still be inspected via JMX.
+# stop_commit: shutdown the commit log, letting writes collect but 
+#              continuing to service reads, as in pre-2.0.5 Cassandra
+# ignore: ignore fatal errors and let the batches fail
+commit_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must contain the entire row,
+# so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the key cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The off-heap memory allocator.  Affects storage engine metadata as
+# well as caches.  Experiments show that JEMAlloc saves some memory
+# than the native GCC allocator (i.e., JEMalloc is more
+# fragmentation-resistant).
+# 
+# Supported values are: NativeAllocator, JEMallocAllocator
+#
+# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
+# modify cassandra-env.sh as directed in the file.
+#
+# Defaults to NativeAllocator
+# memory_allocator: NativeAllocator
+
+# saved caches
+saved_caches_directory: ${driver.runDir}/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch." 
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk.  It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.  By default this allows 1024*(CPU cores) pending
+# entries on the commitlog queue.  If you are writing very large blobs,
+# you should reduce that; 16*cores works reasonably well for 1MB blobs.
+# It should be at least as large as the concurrent_writes setting.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+# commitlog_periodic_queue_size:
+
+# The size of the individual commitlog file segments.  A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.  
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+    # Addresses of hosts that are deemed contact points. 
+    # Cassandra nodes use this list of hosts to find each other and learn
+    # the topology of the ring.  You must change this if you are running
+    # multiple nodes!
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          # seeds is actually a comma-delimited list of addresses.
+          # Ex: "<ip1>,<ip2>,<ip3>"
+          - seeds: "${entity.seeds}"
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for sstable-reading buffers.  Defaults to
+# the smaller of 1/4 of heap or 512MB.
+# file_cache_size_in_mb: 512
+
+# Total memory to use for memtables.  Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/4 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.  Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.  So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads.  These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread.  At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSDs; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: ${entity.gossipPort?c}
+
+# SSL port, for encrypted communication.  Unused unless enabled in
+# encryption_options
+ssl_storage_port: ${entity.sslGossipPort?c}
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+# 
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing _if_ the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: ${entity.listenAddress}
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+broadcast_address: ${entity.broadcastAddress}
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+native_transport_port: ${entity.nativeTransportPort?c}
+# The maximum threads for handling requests when the native transport is used.
+# This is similar to rpc_max_threads though the default differs slightly (and
+# there is no native_transport_min_threads, idle threads will always be stopped
+# after 30 seconds).
+# native_transport_max_threads: 128
+#
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB.
+# native_transport_max_frame_size_in_mb: 256
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+
+# The address to bind the Thrift RPC service and native transport
+# server -- clients connect here.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
+# here if you want to listen on all interfaces, but that will break clients 
+# that rely on node auto-discovery.
+rpc_address: ${entity.rpcAddress}
+# port for Thrift to listen for clients on
+rpc_port: ${entity.thriftPort?c}
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides two out-of-the-box options for the RPC Server:
+#
+# sync  -> One thread per thrift connection. For a very large number of clients, memory
+#          will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
+#          per thread, and that will correspond to your use of virtual memory (but physical memory
+#          may be limited depending on use of stack space).
+#
+# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+#          asynchronously using a small number of threads that does not vary with the amount
+#          of thrift clients (and thus scales well to many clients). The rpc requests are still
+#          synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower.  On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and: man tcp
+# internode_send_buff_size_in_bytes:
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum message length).
+thrift_framed_transport_size_in_mb: 15
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data.  Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction.  Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you.  Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true 
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway.  These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+tombstone_warn_threshold: 1000
+tombstone_failure_threshold: 100000
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns.  The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+
+# Log WARN on any batch size exceeding this value. 5kb per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 5
+
+# Size limit for rows being compacted in memory.  Larger rows will spill
+# over to disk and use a slower two-pass compaction process.  A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair.  Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise, 
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable.  Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts.  If disabled, replicas will assume that requests
+# were forwarded to them instantly by the coordinator, which means that
+# under overload conditions we will waste that much extra time processing 
+# already-timed-out requests.
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This _can_ involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch.  The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+#   requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+#   correlated failures. It does this by grouping machines into
+#   "datacenters" and "racks."  Cassandra will do its best not to have
+#   more than one replica on the same "rack" (which may not actually
+#   be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+#  - SimpleSnitch:
+#    Treats Strategy order as proximity. This can improve cache
+#    locality when disabling read repair.  Only appropriate for
+#    single-datacenter deployments.
+#  - GossipingPropertyFileSnitch
+#    This should be your go-to snitch for production use.  The rack
+#    and datacenter for the local node are defined in
+#    cassandra-rackdc.properties and propagated to other nodes via
+#    gossip.  If cassandra-topology.properties exists, it is used as a
+#    fallback, allowing migration from the PropertyFileSnitch.
+#  - PropertyFileSnitch:
+#    Proximity is determined by rack and data center, which are
+#    explicitly configured in cassandra-topology.properties.
+#  - Ec2Snitch:
+#    Appropriate for EC2 deployments in a single Region. Loads Region
+#    and Availability Zone information from the EC2 API. The Region is
+#    treated as the datacenter, and the Availability Zone as the rack.
+#    Only private IPs are used, so this will not work across multiple
+#    Regions.
+#  - Ec2MultiRegionSnitch:
+#    Uses public IPs as broadcast_address to allow cross-region
+#    connectivity.  (Thus, you should set seed addresses to the public
+#    IP as well.) You will need to open the storage_port or
+#    ssl_storage_port on the public IP firewall.  (For intra-Region
+#    traffic, Cassandra will switch to the private IP after
+#    establishing a connection.)
+#  - RackInferringSnitch:
+#    Proximity is determined by rack and data center, which are
+#    assumed to correspond to the 3rd and 2nd octet of each node's IP
+#    address, respectively.  Unless this happens to match your
+#    deployment conventions, this is best used as an example of
+#    writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: ${driver.endpointSnitchName}
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100 
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it.  This is
+# expressed as a double which represents a percentage.  Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+#  - throttle_limit -- The throttle_limit is the number of in-flight
+#                      requests per client.  Requests beyond 
+#                      that limit are queued up until
+#                      running requests can complete.
+#                      The value of 80 here is twice the number of
+#                      concurrent_reads + concurrent_writes.
+#  - default_weight -- default_weight is optional and allows for
+#                      overriding the default which is 1.
+#  - weights -- Weights are optional and will default to 1 or the
+#               overridden default_weight. The weight translates into how
+#               many requests are handled during each turn of the
+#               RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+#    throttle_limit: 80
+#    default_weight: 5
+#    weights:
+#      Keyspace1: 1
+#      Keyspace2: 5
+
+# request_scheduler_id -- An identifier based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore.  For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+    # require_client_auth: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+    enabled: false
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    # require_client_auth: false
+    # Set trustore and truststore_password if require_client_auth is true
+    # truststore: conf/.truststore
+    # truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be:  all  - all traffic is compressed
+#          dc   - traffic between different datacenters is compressed
+#          none - nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: false
+
+# Enable or disable kernel page cache preheating from contents of the key cache after compaction.
+# When enabled it would preheat only first "page" (4KB) of each row to optimize
+# for sequential access. Note: This could be harmful for fat rows, see CASSANDRA-4937
+# for further details on that topic.
+preheat_kernel_page_cache: false

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar
new file mode 100644
index 0000000..b1c1b94
Binary files /dev/null and b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar differ

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt
new file mode 100644
index 0000000..205b18d
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.txt
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+The file cassandra-multicloud-snitch.jar is a snitch implementation
+that handles Cassandra datacenters in different cloud providers.
+
+The source code for cassandra-multicloud-snitch.jar is in sandbox/cassandra-multicloud-snitch.
+
+The source will be contributed to the Cassandra project; when it is available in the 
+Cassandra distro (and when we don't want to give backwards compatibility support for
+older Cassandra versions), then we can delete it from Brooklyn.
+
+The jar can be uploaded to a Cassandra Node as part of deployment, for if
+this multi-cloud snitch is desired.
+
+Under Apache conventions, binary files are not part of the source
+release. If you are using the source release, you may add this file
+by copying it from the master repository, which is accessible on the
+web at https://github.com/apache/incubator-brooklyn

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties
new file mode 100644
index 0000000..8fc323a
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties
@@ -0,0 +1,6 @@
+# See http://www.datastax.com/docs/1.1/cluster_architecture/replication
+# Note publicip/privateip added for use by custom MultiCloudSnitch
+dc=${entity.datacenterName}
+rack=${entity.rackName}
+publicip=${entity.publicIp}
+privateip=${entity.privateIp}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchbase/pillowfight.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchbase/pillowfight.yaml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchbase/pillowfight.yaml
new file mode 100644
index 0000000..01f4027
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchbase/pillowfight.yaml
@@ -0,0 +1,77 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+services:
+- type: brooklyn.entity.basic.VanillaSoftwareProcess
+  name: CBC Pillowfight
+  launch.command: |
+    sudo wget -O/etc/apt/sources.list.d/couchbase.list http://packages.couchbase.com/ubuntu/couchbase-ubuntu1204.list
+    sudo wget -O- http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
+    sudo apt-get update
+    sudo apt-get install -y libcouchbase2-libevent libcouchbase-dev libcouchbase2-bin
+  provisioning.properties:
+    # CentOS requires a different launch command, see below
+    osFamily: ubuntu
+  checkRunning.command: ""
+  stop.command: ""
+    
+  brooklyn.config:
+    base_url: http://127.0.0.1:8091/
+    
+  brooklyn.initializers:
+  - type: brooklyn.entity.software.ssh.SshCommandEffector
+    brooklyn.config:
+      name: pillow_fight
+      description: runs cbc pillowfight
+      command: |
+        cbc-pillowfight -U ${base_url}${bucket} \
+          `if [ -n "$username" ]; then echo -u $username; fi` \
+          `if [ -n "$password" ]; then echo -P $password; fi` \
+          `if [ -n "$num_cycles" ]; then echo -c $num_cycles; fi` \
+          `if [ -n "$min_size" ]; then echo -m $min_size; fi` \
+          `if [ -n "$max_size" ]; then echo -M $max_size; fi` \
+          `if [ -n "$ratio" ]; then echo -r $ratio; fi`
+      parameters:
+        base_url:
+          description: base URL (http or couchbases) and list of hosts/port to connect to, including trailing slash
+          defaultValue: $brooklyn:config("base_url")
+        bucket:
+          description: bucket to use
+          defaultValue: default
+        username:
+          description: username to authenticate to the bucket
+        password:
+          description: password to authenticate to the bucket
+        num_cycles:
+          description: number of iterations to run
+          defaultValue: 1
+        min_size:
+          description: minimum payload size
+          defaultValue: 50
+        max_size:
+          description: maximum payload size
+          defaultValue: 5120
+        ratio:
+          description: "specify SET/GET command ratio (default: 33, i.e. 33% SETs and 67% GETs)"
+          defaultValue: 33
+
+# For CentOS, use the following launch command:
+#  launch.command: |
+#    sudo wget -O/etc/yum.repos.d/couchbase.repo http://packages.couchbase.com/rpm/couchbase-centos55-x86_64.repo
+#    sudo yum check-update
+#    sudo yum install -y libcouchbase2-libevent libcouchbase-devel libcouchbase2-bin

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.ini
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.ini b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.ini
new file mode 100644
index 0000000..692fc8b
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.ini
@@ -0,0 +1,17 @@
+[#ftl]
+#
+[couchdb]
+database_dir = ${driver.runDir}
+view_index_dir = ${driver.runDir}
+uri_file = ${driver.runDir}/couch.uri
+
+[httpd]
+port = ${entity.httpPort?c}
+bind_address = 0.0.0.0
+
+[ssl]
+port = ${entity.httpsPort?c}
+
+[log]
+file = ${driver.runDir}/couch.log
+level = info
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.uri
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.uri b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.uri
new file mode 100644
index 0000000..0997fc7
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.uri
@@ -0,0 +1,2 @@
+[#ftl]
+http://${driver.hostname}:${entity.httpPort?c}/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default-mongod.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default-mongod.conf b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default-mongod.conf
new file mode 100644
index 0000000..e7f02fd
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default-mongod.conf
@@ -0,0 +1,7 @@
+# Default configuration for a mongod process. The use of noprealloc and smallfiles
+# mean this configuration file should not be used in a production environment.
+
+quiet = false
+
+noprealloc = true
+smallfiles = true

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default.conf b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default.conf
new file mode 100644
index 0000000..c3c279f
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default.conf
@@ -0,0 +1,2 @@
+# Default Brooklyn configuration for a MongoDB process.
+quiet = false

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/redis.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/redis.conf b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/redis.conf
new file mode 100644
index 0000000..0554eb2
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/redis.conf
@@ -0,0 +1,13 @@
+# Redis configuration file
+
+# Start as daemon
+daemonize yes
+pidfile ${driver.runDir}/pid.txt
+
+# Set port and optional bind address
+port ${entity.redisPort?c}
+# bind ${entity.address}
+
+# Configure logging
+loglevel verbose
+logfile ${driver.runDir}/redis.log

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/slave.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/slave.conf b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/slave.conf
new file mode 100644
index 0000000..3a9e64a
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/redis/slave.conf
@@ -0,0 +1,16 @@
+# Redis configuration file
+
+# Start as daemon
+daemonize yes
+pidfile ${driver.runDir}/pid.txt
+
+# Set port and optional bind address
+port ${entity.redisPort?c}
+# bind ${entity.address}
+
+# Slave configuration
+slaveof ${entity.master.address} ${entity.master.redisPort?c}
+
+# Configure logging
+loglevel verbose
+logfile ${driver.runDir}/redis.log

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/app.config
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/app.config b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/app.config
new file mode 100644
index 0000000..7ee8a37
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/app.config
@@ -0,0 +1,353 @@
+%% Brooklyn note: file from 1.4.8 Mac install, with erlang section added, and ports templated
+
+%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
+%% ex: ft=erlang ts=4 sw=4 et
+[
+ %% Riak Client APIs config
+ {riak_api, [
+            %% pb_backlog is the maximum length to which the queue of pending
+            %% connections may grow. If set, it must be an integer >= 0.
+            %% By default the value is 5. If you anticipate a huge number of
+            %% connections being initialised *simultaneously*, set this number
+            %% higher.
+            %% {pb_backlog, 64},
+             
+            %% pb is a list of IP addresses and TCP ports that the Riak 
+            %% Protocol Buffers interface will bind.
+            {pb, [ {"0.0.0.0", ${entity.riakPbPort?c} } ]}
+            ]},
+
+ %% Riak Core config
+ {riak_core, [
+              %% Default location of ringstate
+              {ring_state_dir, "./data/ring"},
+
+              %% Default ring creation size.  Make sure it is a power of 2,
+              %% e.g. 16, 32, 64, 128, 256, 512 etc
+              %{ring_creation_size, 64},
+
+              %% http is a list of IP addresses and TCP ports that the Riak
+              %% HTTP interface will bind.
+              {http, [ {"0.0.0.0", ${entity.riakWebPort?c} } ]},
+
+              %% https is a list of IP addresses and TCP ports that the Riak
+              %% HTTPS interface will bind.
+              %{https, [{ "0.0.0.0", ${entity.riakWebPort?c} }]},
+
+              %% Default cert and key locations for https can be overridden
+              %% with the ssl config variable, for example:
+              %{ssl, [
+              %       {certfile, "./etc/cert.pem"},
+              %       {keyfile, "./etc/key.pem"}
+              %      ]},
+
+              %% riak_handoff_port is the TCP port that Riak uses for
+              %% intra-cluster data handoff.
+              {handoff_port, ${entity.handoffListenerPort?c} },
+
+              %% To encrypt riak_core intra-cluster data handoff traffic,
+              %% uncomment the following line and edit its path to an
+              %% appropriate certfile and keyfile.  (This example uses a
+              %% single file with both items concatenated together.)
+              %{handoff_ssl_options, [{certfile, "/tmp/erlserver.pem"}]},
+
+              %% DTrace support
+              %% Do not enable 'dtrace_support' unless your Erlang/OTP
+              %% runtime is compiled to support DTrace.  DTrace is
+              %% available in R15B01 (supported by the Erlang/OTP
+              %% official source package) and in R14B04 via a custom
+              %% source repository & branch.
+              {dtrace_support, false},
+
+              %% Health Checks
+              %% If disabled, health checks registered by an application will
+              %% be ignored. NOTE: this option cannot be changed at runtime.
+              %% To re-enable, the setting must be changed and the node restarted.
+              %% NOTE: As of Riak 1.3.2, health checks are deprecated as they
+              %% may interfere with the new overload protection mechanisms.
+              %% If there is a good reason to re-enable them, you must uncomment
+              %% this line and also add an entry in the riak_kv section:
+              %%          {riak_kv, [ ..., {enable_health_checks, true}, ...]}
+              %% {enable_health_checks, true},
+
+              %% Platform-specific installation paths (substituted by rebar)
+              {platform_bin_dir, "./bin"},
+              {platform_data_dir, "./data"},
+              {platform_etc_dir, "./etc"},
+              {platform_lib_dir, "./lib"},
+              {platform_log_dir, "./log"}
+             ]},
+
+ %% Riak KV config
+ {riak_kv, [
+            %% Storage_backend specifies the Erlang module defining the storage
+            %% mechanism that will be used on this node.
+            {storage_backend, riak_kv_bitcask_backend},
+
+            %% raw_name is the first part of all URLS used by the Riak raw HTTP
+            %% interface.  See riak_web.erl and raw_http_resource.erl for
+            %% details.
+            %{raw_name, "riak"},
+
+            %% Enable active anti-entropy subsystem + optional debug messages:
+            %%   {anti_entropy, {on|off, []}},
+            %%   {anti_entropy, {on|off, [debug]}},
+            {anti_entropy, {on, []}},
+
+            %% Restrict how fast AAE can build hash trees. Building the tree
+            %% for a given partition requires a full scan over that partition's
+            %% data. Once built, trees stay built until they are expired.
+            %% Config is of the form:
+            %%   {num-builds, per-timespan-in-milliseconds}
+            %% Default is 1 build per hour.
+            {anti_entropy_build_limit, {1, 3600000}},
+
+            %% Determine how often hash trees are expired after being built.
+            %% Periodically expiring a hash tree ensures the on-disk hash tree
+            %% data stays consistent with the actual k/v backend data. It also
+            %% helps Riak identify silent disk failures and bit rot. However,
+            %% expiration is not needed for normal AAE operation and should be
+            %% infrequent for performance reasons. The time is specified in
+            %% milliseconds. The default is 1 week.
+            {anti_entropy_expire, 604800000},
+
+            %% Limit how many AAE exchanges/builds can happen concurrently.
+            {anti_entropy_concurrency, 2},
+
+            %% The tick determines how often the AAE manager looks for work
+            %% to do (building/expiring trees, triggering exchanges, etc).
+            %% The default is every 15 seconds. Lowering this value will
+            %% speedup the rate that all replicas are synced across the cluster.
+            %% Increasing the value is not recommended.
+            {anti_entropy_tick, 15000},
+
+            %% The directory where AAE hash trees are stored.
+            {anti_entropy_data_dir, "./data/anti_entropy"},
+
+            %% The LevelDB options used by AAE to generate the LevelDB-backed
+            %% on-disk hashtrees.
+            {anti_entropy_leveldb_opts, [{write_buffer_size, 4194304},
+                                         {max_open_files, 20}]},
+
+            %% mapred_name is URL used to submit map/reduce requests to Riak.
+            {mapred_name, "mapred"},
+
+            %% mapred_2i_pipe indicates whether secondary-index
+            %% MapReduce inputs are queued in parallel via their own
+            %% pipe ('true'), or serially via a helper process
+            %% ('false' or undefined).  Set to 'false' or leave
+            %% undefined during a rolling upgrade from 1.0.
+            {mapred_2i_pipe, true},
+
+            %% Each of the following entries control how many Javascript
+            %% virtual machines are available for executing map, reduce,
+            %% pre- and post-commit hook functions.
+            {map_js_vm_count, 8 },
+            {reduce_js_vm_count, 6 },
+            {hook_js_vm_count, 2 },
+
+            %% js_max_vm_mem is the maximum amount of memory, in megabytes,
+            %% allocated to the Javascript VMs. If unset, the default is
+            %% 8MB.
+            {js_max_vm_mem, 8},
+
+            %% js_thread_stack is the maximum amount of thread stack, in megabyes,
+            %% allocate to the Javascript VMs. If unset, the default is 16MB.
+            %% NOTE: This is not the same as the C thread stack.
+            {js_thread_stack, 16},
+
+            %% js_source_dir should point to a directory containing Javascript
+            %% source files which will be loaded by Riak when it initializes
+            %% Javascript VMs.
+            %{js_source_dir, "/tmp/js_source"},
+
+            %% http_url_encoding determines how Riak treats URL encoded
+            %% buckets, keys, and links over the REST API. When set to 'on'
+            %% Riak always decodes encoded values sent as URLs and Headers.
+            %% Otherwise, Riak defaults to compatibility mode where links
+            %% are decoded, but buckets and keys are not. The compatibility
+            %% mode will be removed in a future release.
+            {http_url_encoding, on},
+
+            %% Switch to vnode-based vclocks rather than client ids.  This
+            %% significantly reduces the number of vclock entries.
+            %% Only set true if *all* nodes in the cluster are upgraded to 1.0
+            {vnode_vclocks, true},
+
+            %% This option toggles compatibility of keylisting with 1.0
+            %% and earlier versions.  Once a rolling upgrade to a version
+            %% > 1.0 is completed for a cluster, this should be set to
+            %% true for better control of memory usage during key listing
+            %% operations
+            {listkeys_backpressure, true},
+
+            %% This option specifies how many of each type of fsm may exist
+            %% concurrently.  This is for overload protection and is a new
+            %% mechanism that obsoletes 1.3's health checks. Note that this number
+            %% represents two potential processes, so +P in vm.args should be at 
+            %% least 3X the fsm_limit.
+            {fsm_limit, 50000},
+
+            %% Uncomment to make non-paginated results be sorted the
+            %% same way paginated results are: by term, then key.
+            %% In Riak 1.4.* before 1.4.4, all results were sorted this way
+            %% by default, which can adversely affect performance in some cases.
+            %% Setting this to true emulates that behavior.
+            %% {secondary_index_sort_default, true},
+
+            %% object_format controls which binary representation of a riak_object 
+            %% is stored on disk.
+            %% Current options are: v0, v1.
+            %% v0: Original erlang:term_to_binary format. Higher space overhead.
+            %% v1: New format for more compact storage of small values.
+            {object_format, v1}
+           ]},
+
+ %% Riak Search Config
+ {riak_search, [
+                %% To enable Search functionality set this 'true'.
+                {enabled, false}
+               ]},
+
+ %% Merge Index Config
+ {merge_index, [
+                %% The root dir to store search merge_index data
+                {data_root, "./data/merge_index"},
+
+                %% Size, in bytes, of the in-memory buffer.  When this
+                %% threshold has been reached the data is transformed
+                %% into a segment file which resides on disk.
+                {buffer_rollover_size, 1048576},
+
+                %% Overtime the segment files need to be compacted.
+                %% This is the maximum number of segments that will be
+                %% compacted at once.  A lower value will lead to
+                %% quicker but more frequent compactions.
+                {max_compact_segments, 20}
+               ]},
+
+ %% Bitcask Config
+ {bitcask, [
+             %% Configure how Bitcask writes data to disk.
+             %%   erlang: Erlang's built-in file API
+             %%      nif: Direct calls to the POSIX C API
+             %%
+             %% The NIF mode provides higher throughput for certain
+             %% workloads, but has the potential to negatively impact
+             %% the Erlang VM, leading to higher worst-case latencies
+             %% and possible throughput collapse.
+             {io_mode, erlang},
+
+             {data_root, "./data/bitcask"}
+           ]},
+
+ %% eLevelDB Config
+ {eleveldb, [
+             {data_root, "./data/leveldb"}
+            ]},
+
+ %% Lager Config
+ {lager, [
+            %% What handlers to install with what arguments
+            %% The defaults for the logfiles are to rotate the files when
+            %% they reach 10Mb or at midnight, whichever comes first, and keep
+            %% the last 5 rotations. See the lager README for a description of
+            %% the time rotation format:
+            %% https://github.com/basho/lager/blob/master/README.org
+            %%
+            %% If you wish to disable rotation, you can either set the size to 0
+            %% and the rotation time to "", or instead specify a 2-tuple that only
+            %% consists of {Logfile, Level}.
+            %%
+            %% If you wish to have riak log messages to syslog, you can use a handler
+            %% like this:
+            %%   {lager_syslog_backend, ["riak", daemon, info]},
+            %%
+            {handlers, [ 
+                           {lager_file_backend, [ 
+                               {"./log/error.log", error, 10485760, "$D0", 5}, 
+                               {"./log/console.log", info, 10485760, "$D0", 5} 
+                           ]} 
+                       ] },
+
+            %% Whether to write a crash log, and where.
+            %% Commented/omitted/undefined means no crash logger.
+            {crash_log, "./log/crash.log"},
+
+            %% Maximum size in bytes of events in the crash log - defaults to 65536
+            {crash_log_msg_size, 65536},
+
+            %% Maximum size of the crash log in bytes, before its rotated, set
+            %% to 0 to disable rotation - default is 0
+            {crash_log_size, 10485760},
+
+            %% What time to rotate the crash log - default is no time
+            %% rotation. See the lager README for a description of this format:
+            %% https://github.com/basho/lager/blob/master/README.org
+            {crash_log_date, "$D0"},
+
+            %% Number of rotated crash logs to keep, 0 means keep only the
+            %% current one - default is 0
+            {crash_log_count, 5},
+
+            %% Whether to redirect error_logger messages into lager - defaults to true
+            {error_logger_redirect, true},
+
+            %% maximum number of error_logger messages to handle in a second
+            %% lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup
+            {error_logger_hwm, 100}
+        ]},
+
+ %% riak_sysmon config
+ {riak_sysmon, [
+         %% To disable forwarding events of a particular type, use a
+         %% limit of 0.
+         {process_limit, 30},
+         {port_limit, 2},
+
+         %% Finding reasonable limits for a given workload is a matter
+         %% of experimentation.
+         %% NOTE: Enabling the 'gc_ms_limit' monitor (by setting non-zero)
+         %%       can cause performance problems on multi-CPU systems.
+         {gc_ms_limit, 0},
+         {heap_word_limit, 40111000},
+
+         %% Configure the following items to 'false' to disable logging
+         %% of that event type.
+         {busy_port, true},
+         {busy_dist_port, true}
+        ]},
+
+ %% SASL config
+ {sasl, [
+         {sasl_error_logger, false}
+        ]},
+
+ %% riak_control config
+ {riak_control, [
+                %% Set to false to disable the admin panel.
+                {enabled, true},
+
+                %% Authentication style used for access to the admin
+                %% panel. Valid styles are 'userlist' <TODO>.
+                {auth, userlist},
+
+                %% If auth is set to 'userlist' then this is the
+                %% list of usernames and passwords for access to the
+                %% admin panel.
+                {userlist, [{"user", "pass"}
+                           ]},
+
+                %% The admin panel is broken up into multiple
+                %% components, each of which is enabled or disabled
+                %% by one of these settings.
+                {admin, true}
+                ]},
+ 
+ %% erlang, constrain port range so we can open the internal firewall ports               
+ { kernel, [
+            {inet_dist_listen_min, ${entity.erlangPortRangeStart?c}},
+            {inet_dist_listen_max, ${entity.erlangPortRangeEnd?c}}
+          ]}
+
+].

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml
new file mode 100644
index 0000000..0d1e7c7
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-cluster-with-solr.yaml
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+name: Cluster Riak & Solr
+location:
+  jclouds:aws-ec2:us-east-1:
+    osFamily: centos
+    osVersionRegex: 6\..*
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakCluster
+  initialSize: 2
+  memberSpec:
+    $brooklyn:entitySpec:
+      type: org.apache.brooklyn.entity.nosql.riak.RiakNode
+      searchEnabled: true
+  brooklyn.config:
+    provisioning.properties:
+      minCores: 2
+      minRam: 6gb
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-mac.conf
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-mac.conf b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-mac.conf
new file mode 100644
index 0000000..d123000
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-mac.conf
@@ -0,0 +1,494 @@
+## Brooklyn note: file from 2.0.1 Mac install, with erlang section added, and ports templated
+
+## Where to emit the default log messages (typically at 'info'
+## severity):
+## off: disabled
+## file: the file specified by log.console.file
+## console: to standard output (seen when using `riak attach-direct`)
+## both: log.console.file and standard out.
+##
+## Default: file
+##
+## Acceptable values:
+##   - one of: off, file, console, both
+log.console = file
+
+## The severity level of the console log, default is 'info'.
+##
+## Default: info
+##
+## Acceptable values:
+##   - one of: debug, info, notice, warning, error, critical, alert, emergency, none
+log.console.level = info
+
+## When 'log.console' is set to 'file' or 'both', the file where
+## console messages will be logged.
+##
+## Default: $(platform_log_dir)/console.log
+##
+## Acceptable values:
+##   - the path to a file
+log.console.file = $(platform_log_dir)/console.log
+
+## The file where error messages will be logged.
+##
+## Default: $(platform_log_dir)/error.log
+##
+## Acceptable values:
+##   - the path to a file
+log.error.file = $(platform_log_dir)/error.log
+
+## When set to 'on', enables log output to syslog.
+##
+## Default: off
+##
+## Acceptable values:
+##   - on or off
+log.syslog = off
+
+## Whether to enable the crash log.
+##
+## Default: on
+##
+## Acceptable values:
+##   - on or off
+log.crash = on
+
+## If the crash log is enabled, the file where its messages will
+## be written.
+##
+## Default: $(platform_log_dir)/crash.log
+##
+## Acceptable values:
+##   - the path to a file
+log.crash.file = $(platform_log_dir)/crash.log
+
+## Maximum size in bytes of individual messages in the crash log
+##
+## Default: 64KB
+##
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+log.crash.maximum_message_size = 64KB
+
+## Maximum size of the crash log in bytes, before it is rotated
+##
+## Default: 10MB
+##
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+log.crash.size = 10MB
+
+## The schedule on which to rotate the crash log.  For more
+## information see:
+## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation
+##
+## Default: $D0
+##
+## Acceptable values:
+##   - text
+log.crash.rotation = $D0
+
+## The number of rotated crash logs to keep. When set to
+## 'current', only the current open log file is kept.
+##
+## Default: 5
+##
+## Acceptable values:
+##   - an integer
+##   - the text "current"
+log.crash.rotation.keep = 5
+
+## Name of the Erlang node
+##
+## Default: riak@127.0.0.1
+##
+## Acceptable values:
+##   - text
+nodename = riak@${driver.hostname}
+
+## Cookie for distributed node communication.  All nodes in the
+## same cluster should use the same cookie or they will not be able to
+## communicate.
+##
+## Default: riak
+##
+## Acceptable values:
+##   - text
+distributed_cookie = riak
+
+## Sets the number of threads in async thread pool, valid range
+## is 0-1024. If thread support is available, the default is 64.
+## More information at: http://erlang.org/doc/man/erl.html
+##
+## Default: 64
+##
+## Acceptable values:
+##   - an integer
+erlang.async_threads = 64
+
+## The number of concurrent ports/sockets
+## Valid range is 1024-134217727
+##
+## Default: 65536
+##
+## Acceptable values:
+##   - an integer
+erlang.max_ports = 65536
+
+## Set scheduler forced wakeup interval. All run queues will be
+## scanned each Interval milliseconds. While there are sleeping
+## schedulers in the system, one scheduler will be woken for each
+## non-empty run queue found. An Interval of zero disables this
+## feature, which also is the default.
+## This feature is a workaround for lengthy executing native code, and
+## native code that do not bump reductions properly.
+## More information: http://www.erlang.org/doc/man/erl.html#+sfwi
+##
+## Acceptable values:
+##   - an integer
+## erlang.schedulers.force_wakeup_interval = 500
+
+## Enable or disable scheduler compaction of load. By default
+## scheduler compaction of load is enabled. When enabled, load
+## balancing will strive for a load distribution which causes as many
+## scheduler threads as possible to be fully loaded (i.e., not run out
+## of work). This is accomplished by migrating load (e.g. runnable
+## processes) into a smaller set of schedulers when schedulers
+## frequently run out of work. When disabled, the frequency with which
+## schedulers run out of work will not be taken into account by the
+## load balancing logic.
+## More information: http://www.erlang.org/doc/man/erl.html#+scl
+##
+## Acceptable values:
+##   - one of: true, false
+## erlang.schedulers.compaction_of_load = false
+
+## Enable or disable scheduler utilization balancing of load. By
+## default scheduler utilization balancing is disabled and instead
+## scheduler compaction of load is enabled which will strive for a
+## load distribution which causes as many scheduler threads as
+## possible to be fully loaded (i.e., not run out of work). When
+## scheduler utilization balancing is enabled the system will instead
+## try to balance scheduler utilization between schedulers. That is,
+## strive for equal scheduler utilization on all schedulers.
+## More information: http://www.erlang.org/doc/man/erl.html#+sub
+##
+## Acceptable values:
+##   - one of: true, false
+## erlang.schedulers.utilization_balancing = true
+
+## Number of partitions in the cluster (only valid when first
+## creating the cluster). Must be a power of 2, minimum 8 and maximum
+## 1024.
+##
+## Default: 64
+##
+## Acceptable values:
+##   - an integer
+## ring_size = 64
+
+## Number of concurrent node-to-node transfers allowed.
+##
+## Default: 2
+##
+## Acceptable values:
+##   - an integer
+## transfer_limit = 2
+
+## Default cert location for https can be overridden
+## with the ssl config variable, for example:
+##
+## Acceptable values:
+##   - the path to a file
+## ssl.certfile = $(platform_etc_dir)/cert.pem
+
+## Default key location for https can be overridden with the ssl
+## config variable, for example:
+##
+## Acceptable values:
+##   - the path to a file
+## ssl.keyfile = $(platform_etc_dir)/key.pem
+
+## Default signing authority location for https can be overridden
+## with the ssl config variable, for example:
+##
+## Acceptable values:
+##   - the path to a file
+## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem
+
+## DTrace support Do not enable 'dtrace' unless your Erlang/OTP
+## runtime is compiled to support DTrace.  DTrace is available in
+## R15B01 (supported by the Erlang/OTP official source package) and in
+## R14B04 via a custom source repository & branch.
+##
+## Default: off
+##
+## Acceptable values:
+##   - on or off
+dtrace = off
+
+## Platform-specific installation paths (substituted by rebar)
+##
+## Default: ./bin
+##
+## Acceptable values:
+##   - the path to a directory
+platform_bin_dir = ./bin
+
+##
+## Default: ./data
+##
+## Acceptable values:
+##   - the path to a directory
+platform_data_dir = ./data
+
+##
+## Default: ./etc
+##
+## Acceptable values:
+##   - the path to a directory
+platform_etc_dir = ./etc
+
+##
+## Default: ./lib
+##
+## Acceptable values:
+##   - the path to a directory
+platform_lib_dir = ./lib
+
+##
+## Default: ./log
+##
+## Acceptable values:
+##   - the path to a directory
+platform_log_dir = ./log
+
+## Enable consensus subsystem. Set to 'on' to enable the
+## consensus subsystem used for strongly consistent Riak operations.
+##
+## Default: off
+##
+## Acceptable values:
+##   - on or off
+## strong_consistency = on
+
+## listener.http.<name> is an IP address and TCP port that the Riak
+## HTTP interface will bind.
+##
+## Default: 127.0.0.1:8098
+##
+## Acceptable values:
+##   - an IP/port pair, e.g. 127.0.0.1:10011
+listener.http.internal = 0.0.0.0:${entity.riakWebPort?c}
+
+## listener.protobuf.<name> is an IP address and TCP port that the Riak
+## Protocol Buffers interface will bind.
+##
+## Default: 127.0.0.1:8087
+##
+## Acceptable values:
+##   - an IP/port pair, e.g. 127.0.0.1:10011
+listener.protobuf.internal = 0.0.0.0:${entity.riakPbPort?c}
+
+## The maximum length to which the queue of pending connections
+## may grow. If set, it must be an integer > 0. If you anticipate a
+## huge number of connections being initialized *simultaneously*, set
+## this number higher.
+##
+## Default: 128
+##
+## Acceptable values:
+##   - an integer
+## protobuf.backlog = 128
+
+## listener.https.<name> is an IP address and TCP port that the Riak
+## HTTPS interface will bind.
+##
+## Acceptable values:
+##   - an IP/port pair, e.g. 127.0.0.1:10011
+## listener.https.internal = 127.0.0.1:8098
+
+## How Riak will repair out-of-sync keys. Some features require
+## this to be set to 'active', including search.
+## * active: out-of-sync keys will be repaired in the background
+## * passive: out-of-sync keys are only repaired on read
+## * active-debug: like active, but outputs verbose debugging
+## information
+##
+## Default: active
+##
+## Acceptable values:
+##   - one of: active, passive, active-debug
+anti_entropy = active
+
+## Specifies the storage engine used for Riak's key-value data
+## and secondary indexes (if supported).
+##
+## Default: bitcask
+##
+## Acceptable values:
+##   - one of: bitcask, leveldb, memory, multi
+storage_backend = bitcask
+
+## Controls which binary representation of a riak value is stored
+## on disk.
+## * 0: Original erlang:term_to_binary format. Higher space overhead.
+## * 1: New format for more compact storage of small values.
+##
+## Default: 1
+##
+## Acceptable values:
+##   - the integer 1
+##   - the integer 0
+object.format = 1
+
+## Reading or writing objects bigger than this size will write a
+## warning in the logs.
+##
+## Default: 5MB
+##
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+object.size.warning_threshold = 5MB
+
+## Writing an object bigger than this will send a failure to the
+## client.
+##
+## Default: 50MB
+##
+## Acceptable values:
+##   - a byte size with units, e.g. 10GB
+object.size.maximum = 50MB
+
+## Writing an object with more than this number of siblings will
+## generate a warning in the logs.
+##
+## Default: 25
+##
+## Acceptable values:
+##   - an integer
+object.siblings.warning_threshold = 25
+
+## Writing an object with more than this number of siblings will
+## send a failure to the client.
+##
+## Default: 100
+##
+## Acceptable values:
+##   - an integer
+object.siblings.maximum = 100
+
+## A path under which bitcask data files will be stored.
+##
+## Default: $(platform_data_dir)/bitcask
+##
+## Acceptable values:
+##   - the path to a directory
+bitcask.data_root = $(platform_data_dir)/bitcask
+
+## Configure how Bitcask writes data to disk.
+## erlang: Erlang's built-in file API
+## nif: Direct calls to the POSIX C API
+## The NIF mode provides higher throughput for certain
+## workloads, but has the potential to negatively impact
+## the Erlang VM, leading to higher worst-case latencies
+## and possible throughput collapse.
+##
+## Default: erlang
+##
+## Acceptable values:
+##   - one of: erlang, nif
+bitcask.io_mode = erlang
+
+## Set to 'off' to disable the admin panel.
+##
+## Default: off
+##
+## Acceptable values:
+##   - on or off
+riak_control = on
+
+## Authentication mode used for access to the admin panel.
+##
+## Default: off
+##
+## Acceptable values:
+##   - one of: off, userlist
+riak_control.auth.mode = off
+
+## If riak control's authentication mode (riak_control.auth.mode)
+## is set to 'userlist' then this is the list of usernames and
+## passwords for access to the admin panel.
+## To create users with given names, add entries of the format:
+## riak_control.auth.user.USERNAME.password = PASSWORD
+## replacing USERNAME with the desired username and PASSWORD with the
+## desired password for that user.
+##
+## Acceptable values:
+##   - text
+## riak_control.auth.user.admin.password = pass
+
+## This parameter defines the percentage of total server memory
+## to assign to LevelDB. LevelDB will dynamically adjust its internal
+## cache sizes to stay within this size.  The memory size can
+## alternately be assigned as a byte count via leveldb.maximum_memory
+## instead.
+##
+## Default: 70
+##
+## Acceptable values:
+##   - an integer
+leveldb.maximum_memory.percent = 70
+
+## To enable Search set this 'on'.
+##
+## Default: off
+##
+## Acceptable values:
+##   - on or off
+search = off
+
+## How long Riak will wait for Solr to start. The start sequence
+## will be tried twice. If both attempts timeout, then the Riak node
+## will be shutdown. This may need to be increased as more data is
+## indexed and Solr takes longer to start. Values lower than 1s will
+## be rounded up to the minimum 1s.
+##
+## Default: 30s
+##
+## Acceptable values:
+##   - a time duration with units, e.g. '10s' for 10 seconds
+search.solr.start_timeout = 30s
+
+## The port number which Solr binds to.
+## NOTE: Binds on every interface.
+##
+## Default: 8093
+##
+## Acceptable values:
+##   - an integer
+search.solr.port = ${entity.searchSolrPort?c}
+
+## The port number which Solr JMX binds to.
+## NOTE: Binds on every interface.
+##
+## Default: 8985
+##
+## Acceptable values:
+##   - an integer
+search.solr.jmx_port = ${entity.searchSolrJmxPort?c}
+
+## The options to pass to the Solr JVM.  Non-standard options,
+## i.e. -XX, may not be portable across JVM implementations.
+## E.g. -XX:+UseCompressedStrings
+##
+## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
+##
+## Acceptable values:
+##   - text
+search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
+
+## erlang, constrain port range so we can open the internal firewall ports
+erlang.distribution.port_range.minimum = ${entity.erlangPortRangeStart?c}
+erlang.distribution.port_range.maximum = ${entity.erlangPortRangeEnd?c}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml
new file mode 100644
index 0000000..5e184ac
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp-cluster.yaml
@@ -0,0 +1,42 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+name: Riak Cluster with Webapp Cluster
+location: jclouds:softlayer:sjc01
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakCluster
+  initialSize: 2
+  id: cluster
+  brooklyn.config:
+    install.version: 2.0.0
+- type: brooklyn.entity.webapp.ControlledDynamicWebAppCluster
+  name: Web Cluster
+  brooklyn.config:
+    initialSize: 2
+    controlleddynamicwebappcluster.controllerSpec:
+      $brooklyn:entitySpec:
+        type: brooklyn.entity.proxy.nginx.NginxController
+        brooklyn.config:
+          member.sensor.hostname: "host.subnet.hostname"
+    wars.root: "https://s3-eu-west-1.amazonaws.com/brooklyn-clocker/brooklyn-example-hello-world-sql-webapp.war"
+    java.sysprops: 
+      brooklyn.example.riak.nodes: $brooklyn:component("cluster").attributeWhenReady("riak.cluster.nodeList")
+
+# Alternative URL for War file if available on classpath
+# "classpath://brooklyn-example-hello-world-sql-webapp.war"

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp.yaml
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp.yaml b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp.yaml
new file mode 100644
index 0000000..d2b08ff
--- /dev/null
+++ b/software/nosql/src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-with-webapp.yaml
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+name: Riak Cluster with Webapp
+location: aws-ec2:eu-west-1
+services:
+- type: org.apache.brooklyn.entity.nosql.riak.RiakCluster
+  initialSize: 2
+  id: cluster
+- type: brooklyn.entity.webapp.jboss.JBoss7Server
+  name: Web
+  brooklyn.config:
+    wars.root: "https://s3-eu-west-1.amazonaws.com/brooklyn-clocker/brooklyn-example-hello-world-sql-webapp.war"
+    java.sysprops: 
+      brooklyn.example.riak.nodes: $brooklyn:component("cluster").attributeWhenReady("riak.cluster.nodeList")
+  provisioning.properties:
+    osFamily: centos
+
+# Alternative URL for War file if available on classpath
+# "classpath://brooklyn-example-hello-world-sql-webapp.war"


[21/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java
deleted file mode 100644
index ae82af4..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayImpl.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import brooklyn.config.render.RendererHints;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.location.access.BrooklynAccessUtils;
-
-import com.google.common.base.Functions;
-import com.google.common.net.HostAndPort;
-
-public class CouchbaseSyncGatewayImpl extends SoftwareProcessImpl implements CouchbaseSyncGateway {
-
-    private HttpFeed httpFeed;
-
-    @Override
-    public Class<CouchbaseSyncGatewayDriver> getDriverInterface() {
-        return CouchbaseSyncGatewayDriver.class;
-    }
-
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        connectServiceUpIsRunning();
-    }
-
-    @Override
-    protected void connectServiceUpIsRunning() {
-        HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(this,
-                getAttribute(CouchbaseSyncGateway.ADMIN_REST_API_PORT));
-
-        String managementUri = String.format("http://%s:%s",
-                hp.getHostText(), hp.getPort());
-
-        setAttribute(MANAGEMENT_URL, managementUri);
-
-        httpFeed = HttpFeed.builder()
-                .entity(this)
-                .period(200)
-                .baseUri(managementUri)
-                .poll(new HttpPollConfig<Boolean>(SERVICE_UP)
-                        .onSuccess(HttpValueFunctions.responseCodeEquals(200))
-                        .onFailureOrException(Functions.constant(false)))
-                .build();
-    }
-
-    @Override
-    protected void disconnectSensors() {
-        super.disconnectSensors();
-        disconnectServiceUpIsRunning();
-    }
-
-    @Override
-    protected void disconnectServiceUpIsRunning() {
-        if (httpFeed != null) {
-            httpFeed.stop();
-        }
-    }
-    
-    static {
-        RendererHints.register(MANAGEMENT_URL, RendererHints.namedActionWithUrl());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java
deleted file mode 100644
index d21f9b5..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewaySshDriver.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import static brooklyn.util.ssh.BashCommands.INSTALL_CURL;
-import static brooklyn.util.ssh.BashCommands.alternatives;
-import static brooklyn.util.ssh.BashCommands.chainGroup;
-import static brooklyn.util.ssh.BashCommands.sudo;
-import static java.lang.String.format;
-
-import java.util.List;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityLocal;
-import brooklyn.entity.basic.EntityPredicates;
-import brooklyn.entity.drivers.downloads.DownloadResolver;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.location.OsDetails;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.ssh.BashCommands;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Predicates;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-public class CouchbaseSyncGatewaySshDriver extends AbstractSoftwareProcessSshDriver implements CouchbaseSyncGatewayDriver {
-    public CouchbaseSyncGatewaySshDriver(EntityLocal entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public void stop() {
-
-    }
-
-    @Override
-    public void install() {
-        //reference http://docs.couchbase.com/sync-gateway/#getting-started-with-sync-gateway
-        DownloadResolver resolver = Entities.newDownloader(this);
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-
-        log.info("Installing couchbase-sync-gateway version: {}", getVersion());
-        if (osDetails.isLinux()) {
-            List<String> commands = installLinux(urls, saveAs);
-            newScript(INSTALLING)
-                    .body.append(commands).execute();
-        }
-    }
-
-    @Override
-    public void customize() {
-
-    }
-
-    @Override
-    public void launch() {
-        Entity cbNode = entity.getConfig(CouchbaseSyncGateway.COUCHBASE_SERVER);
-        Entities.waitForServiceUp(cbNode, Duration.ONE_HOUR);
-        DependentConfiguration.waitInTaskForAttributeReady(cbNode, CouchbaseCluster.IS_CLUSTER_INITIALIZED, Predicates.equalTo(true));
-        // Even once the bucket has published its API URL, it can still take a couple of seconds for it to become available
-        Time.sleep(10 * 1000);
-        if (cbNode instanceof CouchbaseCluster) {
-            // in_cluster now applies even to a node in a cluster of size 1
-            Optional<Entity> cbClusterNode = Iterables.tryFind(cbNode.getAttribute(CouchbaseCluster.GROUP_MEMBERS),
-                Predicates.and(Predicates.instanceOf(CouchbaseNode.class), EntityPredicates.attributeEqualTo(CouchbaseNode.IS_IN_CLUSTER, Boolean.TRUE)));
-            
-            if (!cbClusterNode.isPresent()) {
-                throw new IllegalArgumentException(format("The cluster %s does not contain any suitable Couchbase nodes to connect to..", cbNode.getId()));
-            }
-            
-            cbNode = cbClusterNode.get();
-        }
-        String hostname = cbNode.getAttribute(CouchbaseNode.HOSTNAME);
-        String webPort = cbNode.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT).toString();
-
-
-        String username = cbNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
-        String password = cbNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
-
-        String bucketName = entity.getConfig(CouchbaseSyncGateway.COUCHBASE_SERVER_BUCKET);
-        String pool = entity.getConfig(CouchbaseSyncGateway.COUCHBASE_SERVER_POOL);
-        String pretty = entity.getConfig(CouchbaseSyncGateway.PRETTY) ? "-pretty" : "";
-        String verbose = entity.getConfig(CouchbaseSyncGateway.VERBOSE) ? "-verbose" : "";
-
-        String adminRestApiPort = entity.getConfig(CouchbaseSyncGateway.ADMIN_REST_API_PORT).iterator().next().toString();
-        String syncRestApiPort = entity.getConfig(CouchbaseSyncGateway.SYNC_REST_API_PORT).iterator().next().toString();
-
-        String serverWebAdminUrl = format("http://%s:%s@%s:%s", username, password, hostname, webPort);
-        String options = format("-url %s -bucket %s -adminInterface 0.0.0.0:%s -interface 0.0.0.0:%s -pool %s %s %s",
-                serverWebAdminUrl, bucketName, adminRestApiPort, syncRestApiPort, pool, pretty, verbose);
-
-        newScript(ImmutableMap.of("usePidFile", true), LAUNCHING)
-                .body.append(format("/opt/couchbase-sync-gateway/bin/sync_gateway %s ", options) + "> out.log 2> err.log < /dev/null &")
-                .failOnNonZeroResultCode()
-                .execute();
-    }
-    
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of("usePidFile", true), CHECK_RUNNING).execute() == 0;
-    }
-    
-    @Override
-    public void kill() {
-        newScript(MutableMap.of("usePidFile", true), KILLING).execute();
-    }
-
-    private List<String> installLinux(List<String> urls, String saveAs) {
-
-        String apt = chainGroup(
-                "which apt-get",
-                sudo("apt-get update"),
-                sudo(format("dpkg -i %s", saveAs)));
-
-        String yum = chainGroup(
-                "which yum",
-                sudo(format("rpm --install %s", saveAs)));
-
-        return ImmutableList.<String>builder()
-                .add(INSTALL_CURL)
-                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
-                .add(alternatives(apt, yum))
-                .build();
-    }
-
-    @Override
-    public String getOsTag() {
-        OsDetails os = getLocation().getOsDetails();
-        if (os == null) {
-            // Default to generic linux
-            return "x86_64.rpm";
-        } else {
-            //FIXME should be a better way to check for OS name and version
-            String osName = os.getName().toLowerCase();
-            String fileExtension = osName.contains("deb") || osName.contains("ubuntu") ? ".deb" : ".rpm";
-            String arch = os.is64bit() ? "x86_64" : "x86";
-            return arch + fileExtension;
-        }
-    }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBCluster.java
deleted file mode 100644
index 7e65a78..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBCluster.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-/**
- * A cluster of {@link CouchDBNode}s based on {@link DynamicCluster} which can be resized by a policy if required.
- *
- * TODO add sensors with aggregated CouchDB statistics from cluster
- */
-@ImplementedBy(CouchDBClusterImpl.class)
-public interface CouchDBCluster extends DynamicCluster {
-
-    @SetFromFlag("clusterName")
-    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = new BasicAttributeSensorAndConfigKey<String>(String.class, "couchdb.cluster.name", "Name of the CouchDB cluster", "BrooklynCluster");
-
-    AttributeSensor<String> HOSTNAME = Sensors.newStringSensor("couchdb.cluster.hostname", "Hostname to connect to cluster with");
-
-    AttributeSensor<Integer> HTTP_PORT = Sensors.newIntegerSensor("couchdb.cluster.http.port", "CouchDB HTTP port to connect to cluster with");
-
-    /**
-     * The name of the cluster.
-     */
-    String getClusterName();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java
deleted file mode 100644
index 4835f72..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBClusterImpl.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-
-/**
- * Implementation of {@link CouchDBCluster}.
- */
-public class CouchDBClusterImpl extends DynamicClusterImpl implements CouchDBCluster {
-
-    @SuppressWarnings("unused")
-    private static final Logger log = LoggerFactory.getLogger(CouchDBClusterImpl.class);
-
-    public CouchDBClusterImpl() {
-    }
-
-    /**
-     * Sets the default {@link #MEMBER_SPEC} to describe the CouchDB nodes.
-     */
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        return getConfig(MEMBER_SPEC, EntitySpec.create(CouchDBNode.class));
-    }
-
-    @Override
-    public String getClusterName() {
-        return getAttribute(CLUSTER_NAME);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNode.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNode.java
deleted file mode 100644
index 88d0694..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNode.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.entity.webapp.WebAppService;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.util.flags.SetFromFlag;
-
-/**
- * An {@link brooklyn.entity.Entity} that represents a CouchDB node in a {@link CouchDBCluster}.
- */
-@Catalog(name="CouchDB Node",
-        description="Apache CouchDB is a database that uses JSON for documents, JavaScript for MapReduce queries, " +
-                "and regular HTTP for an API",
-        iconUrl="classpath:///couchdb-logo.png")
-@ImplementedBy(CouchDBNodeImpl.class)
-public interface CouchDBNode extends SoftwareProcess, WebAppService {
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "1.2.1");
-
-    @SetFromFlag("erlangVersion")
-    ConfigKey<String> ERLANG_VERSION = ConfigKeys.newStringConfigKey("erlang.version", "Erlang runtime version", "R15B");
-
-    @SetFromFlag("clusterName")
-    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = CouchDBCluster.CLUSTER_NAME;
-
-    @SetFromFlag("couchdbConfigTemplateUrl")
-    BasicAttributeSensorAndConfigKey<String> COUCHDB_CONFIG_TEMPLATE_URL = new BasicAttributeSensorAndConfigKey<String>(
-            String.class, "couchdb.config.templateUrl", "Template file (in freemarker format) for the couchdb config file", 
-            "classpath://brooklyn/entity/nosql/couchdb/couch.ini");
-
-    @SetFromFlag("couchdbUriTemplateUrl")
-    BasicAttributeSensorAndConfigKey<String> COUCHDB_URI_TEMPLATE_URL = new BasicAttributeSensorAndConfigKey<String>(
-            String.class, "couchdb.uri.templateUrl", "Template file (in freemarker format) for the couchdb URI file", 
-            "classpath://brooklyn/entity/nosql/couchdb/couch.uri");
-
-    @SetFromFlag("couchdbConfigFileName")
-    BasicAttributeSensorAndConfigKey<String> COUCHDB_CONFIG_FILE_NAME = new BasicAttributeSensorAndConfigKey<String>(
-            String.class, "couchdb.config.fileName", "Name for the copied config file", "local.ini");
-
-    Integer getHttpPort();
-
-    Integer getHttpsPort();
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java
deleted file mode 100644
index 6e98c33..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeDriver.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface CouchDBNodeDriver extends SoftwareProcessDriver {
-
-    Integer getHttpPort();
-
-    Integer getHttpsPort();
-
-    String getClusterName();
-
-    String getCouchDBConfigTemplateUrl();
-
-    String getCouchDBUriTemplateUrl();
-
-    String getCouchDBConfigFileName();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java
deleted file mode 100644
index 89573ec..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeImpl.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import java.util.concurrent.TimeUnit;
-
-import javax.annotation.Nullable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.entity.webapp.JavaWebAppSoftwareProcessImpl;
-import brooklyn.entity.webapp.WebAppServiceMethods;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-
-/**
- * Implementation of {@link CouchDBNode}.
- */
-public class CouchDBNodeImpl extends SoftwareProcessImpl implements CouchDBNode {
-
-    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeImpl.class);
-
-    public CouchDBNodeImpl() {
-    }
-
-    public Integer getHttpPort() { return getAttribute(CouchDBNode.HTTP_PORT); }
-    public Integer getHttpsPort() { return getAttribute(CouchDBNode.HTTPS_PORT); }
-    public String getClusterName() { return getAttribute(CouchDBNode.CLUSTER_NAME); }
-
-    @Override
-    public Class<CouchDBNodeDriver> getDriverInterface() {
-        return CouchDBNodeDriver.class;
-    }
-
-    private volatile HttpFeed httpFeed;
-
-    @Override 
-    protected void connectSensors() {
-        super.connectSensors();
-
-        connectServiceUpIsRunning();
-
-        httpFeed = HttpFeed.builder()
-                .entity(this)
-                .period(500, TimeUnit.MILLISECONDS)
-                .baseUri(String.format("http://%s:%d/_stats", getAttribute(HOSTNAME), getHttpPort()))
-                .poll(new HttpPollConfig<Integer>(REQUEST_COUNT)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[] { "httpd", "requests", "count" }, Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(ERROR_COUNT)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[] { "httpd_status_codes", "404", "count" }, Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(TOTAL_PROCESSING_TIME)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[] { "couchdb", "request_time", "count" }, Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(MAX_PROCESSING_TIME)
-                        .onSuccess(HttpValueFunctions.chain(HttpValueFunctions.jsonContents(new String[] { "couchdb", "request_time", "max" }, Double.class), new Function<Double, Integer>() {
-                            @Override
-                            public Integer apply(@Nullable Double input) {
-                                return Integer.valueOf(input.intValue());
-                            }
-                        }))
-                        .onFailureOrException(Functions.constant(-1)))
-                .build();
-
-        WebAppServiceMethods.connectWebAppServerPolicies(this);
-    }
-
-    @Override
-    public void disconnectSensors() {
-        super.disconnectSensors();
-        if (httpFeed != null) httpFeed.stop();
-        disconnectServiceUpIsRunning();
-    }
-
-    /** @see JavaWebAppSoftwareProcessImpl#postStop() */
-    @Override
-    protected void postStop() {
-        super.postStop();
-        // zero our workrate derived workrates.
-        setAttribute(REQUESTS_PER_SECOND_LAST, 0D);
-        setAttribute(REQUESTS_PER_SECOND_IN_WINDOW, 0D);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java
deleted file mode 100644
index 74a8092..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchdb/CouchDBNodeSshDriver.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import static brooklyn.util.ssh.BashCommands.*;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.location.Location;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.net.Networking;
-import brooklyn.util.os.Os;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
-/**
- * Start a {@link CouchDBNode} in a {@link Location} accessible over ssh.
- */
-public class CouchDBNodeSshDriver extends AbstractSoftwareProcessSshDriver implements CouchDBNodeDriver {
-
-    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeSshDriver.class);
-
-    public CouchDBNodeSshDriver(CouchDBNodeImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-
-        entity.setAttribute(Attributes.LOG_FILE_LOCATION, getLogFileLocation());
-    }
-
-    public String getLogFileLocation() { return Os.mergePathsUnix(getRunDir(), "couchdb.log"); }
-
-    @Override
-    public Integer getHttpPort() { return entity.getAttribute(CouchDBNode.HTTP_PORT); }
-
-    @Override
-    public Integer getHttpsPort() { return entity.getAttribute(CouchDBNode.HTTPS_PORT); }
-
-    @Override
-    public String getClusterName() { return entity.getAttribute(CouchDBNode.CLUSTER_NAME); }
-
-    @Override
-    public String getCouchDBConfigTemplateUrl() { return entity.getAttribute(CouchDBNode.COUCHDB_CONFIG_TEMPLATE_URL); }
-
-    @Override
-    public String getCouchDBUriTemplateUrl() { return entity.getAttribute(CouchDBNode.COUCHDB_URI_TEMPLATE_URL); }
-
-    @Override
-    public String getCouchDBConfigFileName() { return entity.getAttribute(CouchDBNode.COUCHDB_CONFIG_FILE_NAME); }
-
-    public String getErlangVersion() { return entity.getConfig(CouchDBNode.ERLANG_VERSION); }
-
-    @Override
-    public void install() {
-        log.info("Installing {}", entity);
-        List<String> commands = ImmutableList.<String>builder()
-                .add(ifExecutableElse0("zypper", chainGroup( // SLES 11 not supported, would require building from source
-                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/devel:/languages:/erlang/openSUSE_11.4 erlang_suse_11")),
-                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/devel:/languages:/erlang/openSUSE_12.3 erlang_suse_12")),
-                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/devel:/languages:/erlang/openSUSE_13.1 erlang_suse_13")),
-                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/server:/database/openSUSE_11.4 db_suse_11")),
-                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/server:/database/openSUSE_12.3 db_suse_12")),
-                        ok(sudo("zypper --non-interactive addrepo http://download.opensuse.org/repositories/server:/database/openSUSE_13.1 db_suse_13")))))
-                .add(installPackage( // NOTE only 'port' states the version of Erlang used, maybe remove this constraint?
-                        ImmutableMap.of(
-                                "apt", "erlang-nox erlang-dev",
-                                "port", "erlang@"+getErlangVersion()+"+ssl"),
-                        "erlang"))
-                .add(installPackage("couchdb"))
-                .add(ifExecutableElse0("service", sudo("service couchdb stop")))
-                .build();
-
-        newScript(INSTALLING)
-                .body.append(commands)
-                .execute();
-    }
-
-    @Override
-    public Set<Integer> getPortsUsed() {
-        Set<Integer> result = Sets.newLinkedHashSet(super.getPortsUsed());
-        result.addAll(getPortMap().values());
-        return result;
-    }
-
-    private Map<String, Integer> getPortMap() {
-        return ImmutableMap.<String, Integer>builder()
-                .put("httpPort", getHttpPort())
-                .build();
-    }
-
-    @Override
-    public void customize() {
-        log.info("Customizing {} (Cluster {})", entity, getClusterName());
-        Networking.checkPortsValid(getPortMap());
-
-        newScript(CUSTOMIZING).execute();
-
-        // Copy the configuration files across
-        String destinationConfigFile = Os.mergePathsUnix(getRunDir(), getCouchDBConfigFileName());
-        copyTemplate(getCouchDBConfigTemplateUrl(), destinationConfigFile);
-        String destinationUriFile = Os.mergePathsUnix(getRunDir(), "couch.uri");
-        copyTemplate(getCouchDBUriTemplateUrl(), destinationUriFile);
-    }
-
-    @Override
-    public void launch() {
-        log.info("Launching  {}", entity);
-        newScript(MutableMap.of(USE_PID_FILE, false), LAUNCHING)
-                .body.append(sudo(String.format("nohup couchdb -p %s -a %s -o couchdb-console.log -e couchdb-error.log -b &", getPidFile(), Os.mergePathsUnix(getRunDir(), getCouchDBConfigFileName()))))
-                .execute();
-    }
-
-    public String getPidFile() { return Os.mergePathsUnix(getRunDir(), "couchdb.pid"); }
-
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of(USE_PID_FILE, false), CHECK_RUNNING)
-                .body.append(sudo(String.format("couchdb -p %s -s", getPidFile())))
-                .execute() == 0;
-    }
-
-    @Override
-    public void stop() {
-        newScript(MutableMap.of(USE_PID_FILE, false), STOPPING)
-                .body.append(sudo(String.format("couchdb -p %s -k", getPidFile())))
-                .failOnNonZeroResultCode()
-                .execute();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java
deleted file mode 100644
index ac5e1a8..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchCluster.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.util.flags.SetFromFlag;
-
-/**
- * A cluster of {@link ElasticSearchNode}s based on {@link DynamicCluster} which can be resized by a policy if required.
- */
-@Catalog(name="Elastic Search Cluster", description="Elasticsearch is an open-source search server based on Lucene. "
-        + "It provides a distributed, multitenant-capable full-text search engine with a RESTful web interface and "
-        + "schema-free JSON documents.")
-@ImplementedBy(ElasticSearchClusterImpl.class)
-public interface ElasticSearchCluster extends DynamicCluster {
-    @SetFromFlag("clusterName")
-    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = new BasicAttributeSensorAndConfigKey<String>(String.class, 
-            "elasticsearch.cluster.name", "Name of the ElasticSearch cluster", "BrooklynCluster");
-    
-    String getClusterName();
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java
deleted file mode 100644
index a773006..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterImpl.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import java.util.concurrent.atomic.AtomicInteger;
-
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-
-public class ElasticSearchClusterImpl extends DynamicClusterImpl implements ElasticSearchCluster {
-    
-    private AtomicInteger nextMemberId = new AtomicInteger(0);
-
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        EntitySpec<?> spec = EntitySpec.create(getConfig(MEMBER_SPEC, EntitySpec.create(ElasticSearchNode.class)));
-        
-        spec.configure(ElasticSearchNode.CLUSTER_NAME, getConfig(ElasticSearchClusterImpl.CLUSTER_NAME))
-            .configure(ElasticSearchNode.NODE_NAME, "elasticsearch-" + nextMemberId.incrementAndGet());
-        
-        return spec;
-    }
-    
-    @Override
-    public String getClusterName() {
-        return getConfig(CLUSTER_NAME);
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java b/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java
deleted file mode 100644
index 814955e..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNode.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.database.DatastoreMixins;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.entity.webapp.WebAppServiceConstants;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey.StringAttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.location.basic.PortRanges;
-import brooklyn.util.flags.SetFromFlag;
-
-/**
- * An {@link brooklyn.entity.Entity} that represents an ElasticSearch node
- */
-@Catalog(name="Elastic Search Node", description="Elasticsearch is an open-source search server based on Lucene. "
-        + "It provides a distributed, multitenant-capable full-text search engine with a RESTful web interface and "
-        + "schema-free JSON documents.")
-@ImplementedBy(ElasticSearchNodeImpl.class)
-public interface ElasticSearchNode extends SoftwareProcess, DatastoreMixins.HasDatastoreUrl {
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "1.2.1");
-    
-    @SetFromFlag("downloadUrl")
-    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${version}.tar.gz");
-    
-    @SetFromFlag("dataDir")
-    ConfigKey<String> DATA_DIR = ConfigKeys.newStringConfigKey("elasticsearch.node.data.dir", "Directory for writing data files", null);
-    
-    @SetFromFlag("logDir")
-    ConfigKey<String> LOG_DIR = ConfigKeys.newStringConfigKey("elasticsearch.node.log.dir", "Directory for writing log files", null);
-    
-    @SetFromFlag("configFileUrl")
-    ConfigKey<String> TEMPLATE_CONFIGURATION_URL = ConfigKeys.newStringConfigKey(
-            "elasticsearch.node.template.configuration.url", "URL where the elasticsearch configuration file (in freemarker format) can be found", null);
-    
-    @SetFromFlag("multicastEnabled")
-    ConfigKey<Boolean> MULTICAST_ENABLED = ConfigKeys.newBooleanConfigKey("elasticsearch.node.multicast.enabled", 
-            "Indicates whether zen discovery multicast should be enabled for a node", null);
-    
-    @SetFromFlag("multicastEnabled")
-    ConfigKey<Boolean> UNICAST_ENABLED = ConfigKeys.newBooleanConfigKey("elasticsearch.node.UNicast.enabled", 
-            "Indicates whether zen discovery unicast should be enabled for a node", null);
-    
-    @SetFromFlag("httpPort")
-    PortAttributeSensorAndConfigKey HTTP_PORT = new PortAttributeSensorAndConfigKey(WebAppServiceConstants.HTTP_PORT, PortRanges.fromString("9200+"));
-    
-    @SetFromFlag("nodeName")
-    StringAttributeSensorAndConfigKey NODE_NAME = new StringAttributeSensorAndConfigKey("elasticsearch.node.name", 
-            "Node name (or randomly selected if not set", null);
-    
-    @SetFromFlag("clusterName")
-    StringAttributeSensorAndConfigKey CLUSTER_NAME = new StringAttributeSensorAndConfigKey("elasticsearch.node.cluster.name", 
-            "Cluster name (or elasticsearch selected if not set", null);
-    
-    AttributeSensor<String> NODE_ID = Sensors.newStringSensor("elasticsearch.node.id");
-    AttributeSensor<Integer> DOCUMENT_COUNT = Sensors.newIntegerSensor("elasticsearch.node.docs.count");
-    AttributeSensor<Integer> STORE_BYTES = Sensors.newIntegerSensor("elasticsearch.node.store.bytes");
-    AttributeSensor<Integer> GET_TOTAL = Sensors.newIntegerSensor("elasticsearch.node.get.total");
-    AttributeSensor<Integer> GET_TIME_IN_MILLIS = Sensors.newIntegerSensor("elasticsearch.node.get.time.in.millis");
-    AttributeSensor<Integer> SEARCH_QUERY_TOTAL = Sensors.newIntegerSensor("elasticsearch.node.search.query.total");
-    AttributeSensor<Integer> SEARCH_QUERY_TIME_IN_MILLIS = Sensors.newIntegerSensor("elasticsearch.node.search.query.time.in.millis");
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java
deleted file mode 100644
index 976b05c..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeDriver.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface ElasticSearchNodeDriver extends SoftwareProcessDriver {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java
deleted file mode 100644
index b6b244d..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeImpl.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.event.feed.http.JsonFunctions;
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.util.guava.Functionals;
-import brooklyn.util.guava.Maybe;
-import brooklyn.util.guava.MaybeFunctions;
-import brooklyn.util.guava.TypeTokens;
-import brooklyn.util.http.HttpToolResponse;
-
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-import com.google.common.net.HostAndPort;
-import com.google.gson.JsonElement;
-
-public class ElasticSearchNodeImpl extends SoftwareProcessImpl implements ElasticSearchNode {
-    
-    protected static final Function<Maybe<JsonElement>, Maybe<JsonElement>> GET_FIRST_NODE_FROM_NODES = new Function<Maybe<JsonElement>, Maybe<JsonElement>>() {
-        @Override public Maybe<JsonElement> apply(Maybe<JsonElement> input) {
-            if (input.isAbsent()) {
-                return input;
-            }
-            return Maybe.fromNullable(input.get().getAsJsonObject().entrySet().iterator().next().getValue());
-        }
-    };
-    
-    protected static final Function<HttpToolResponse, Maybe<JsonElement>> GET_FIRST_NODE = Functionals.chain(HttpValueFunctions.jsonContents(), 
-            MaybeFunctions.<JsonElement>wrap(), JsonFunctions.walkM("nodes"), GET_FIRST_NODE_FROM_NODES);
-    
-    
-    HttpFeed httpFeed;
-
-    @Override
-    public Class<ElasticSearchNodeDriver> getDriverInterface() {
-        return ElasticSearchNodeDriver.class;
-    }
-    
-    protected static final <T> HttpPollConfig<T> getSensorFromNodeStat(AttributeSensor<T> sensor, String... jsonPath) {
-        return new HttpPollConfig<T>(sensor)
-            .onSuccess(Functionals.chain(GET_FIRST_NODE, JsonFunctions.walkM(jsonPath), JsonFunctions.castM(TypeTokens.getRawRawType(sensor.getTypeToken()), null)))
-            .onFailureOrException(Functions.<T>constant(null));
-    }
-    
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        Integer rawPort = getAttribute(HTTP_PORT);
-        checkNotNull(rawPort, "HTTP_PORT sensors not set for %s; is an acceptable port available?", this);
-        HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, rawPort);
-        Function<Maybe<JsonElement>, String> getNodeId = new Function<Maybe<JsonElement>, String>() {
-            @Override public String apply(Maybe<JsonElement> input) {
-                if (input.isAbsent()) {
-                    return null;
-                }
-                return input.get().getAsJsonObject().entrySet().iterator().next().getKey();
-            }
-        };
-        httpFeed = HttpFeed.builder()
-            .entity(this)
-            .period(1000)
-            .baseUri(String.format("http://%s:%s/_nodes/_local/stats", hp.getHostText(), hp.getPort()))
-            .poll(new HttpPollConfig<Boolean>(SERVICE_UP)
-                .onSuccess(HttpValueFunctions.responseCodeEquals(200))
-                .onFailureOrException(Functions.constant(false)))
-            .poll(new HttpPollConfig<String>(NODE_ID)
-                .onSuccess(Functionals.chain(HttpValueFunctions.jsonContents(), MaybeFunctions.<JsonElement>wrap(), JsonFunctions.walkM("nodes"), getNodeId))
-                .onFailureOrException(Functions.constant("")))
-            .poll(getSensorFromNodeStat(NODE_NAME, "name"))
-            .poll(getSensorFromNodeStat(DOCUMENT_COUNT, "indices", "docs", "count"))
-            .poll(getSensorFromNodeStat(STORE_BYTES, "indices", "store", "size_in_bytes"))
-            .poll(getSensorFromNodeStat(GET_TOTAL, "indices", "get", "total"))
-            .poll(getSensorFromNodeStat(GET_TIME_IN_MILLIS, "indices", "get", "time_in_millis"))
-            .poll(getSensorFromNodeStat(SEARCH_QUERY_TOTAL, "indices", "search", "query_total"))
-            .poll(getSensorFromNodeStat(SEARCH_QUERY_TIME_IN_MILLIS, "indices", "search", "query_time_in_millis"))
-            .poll(new HttpPollConfig<String>(CLUSTER_NAME)
-                .onSuccess(HttpValueFunctions.jsonContents("cluster_name", String.class)))
-            .build();
-    }
-    
-    @Override
-    protected void disconnectSensors() {
-        if (httpFeed != null) {
-            httpFeed.stop();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java
deleted file mode 100644
index 1fdb672..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeSshDriver.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import static java.lang.String.format;
-
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.List;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityLocal;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.net.Urls;
-import brooklyn.util.os.Os;
-import brooklyn.util.ssh.BashCommands;
-
-import com.google.common.collect.ImmutableList;
-
-public class ElasticSearchNodeSshDriver extends AbstractSoftwareProcessSshDriver implements ElasticSearchNodeDriver {
-
-    public ElasticSearchNodeSshDriver(EntityLocal entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("elasticsearch-%s", getVersion()))));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-        
-        List<String> commands = ImmutableList.<String>builder()
-            .add(BashCommands.installJavaLatestOrWarn())
-            .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
-            .add(String.format("tar zxvf %s", saveAs))
-            .build();
-        
-        newScript(INSTALLING).body.append(commands).execute();
-    }
-
-    @Override
-    public void customize() {
-        newScript(CUSTOMIZING).execute();  //create the directory
-        
-        String configFileUrl = entity.getConfig(ElasticSearchNode.TEMPLATE_CONFIGURATION_URL);
-        
-        if (configFileUrl == null) {
-            return;
-        }
-
-        String configScriptContents = processTemplate(configFileUrl);
-        Reader configContents = new StringReader(configScriptContents);
-
-        getMachine().copyTo(configContents, Urls.mergePaths(getRunDir(), getConfigFile()));
-    }
-
-    @Override
-    public void launch() {
-        String pidFile = getRunDir() + "/" + AbstractSoftwareProcessSshDriver.PID_FILENAME;
-        entity.setAttribute(ElasticSearchNode.PID_FILE, pidFile);
-        StringBuilder commandBuilder = new StringBuilder()
-            .append(String.format("%s/bin/elasticsearch -d -p %s", getExpandedInstallDir(), pidFile));
-        if (entity.getConfig(ElasticSearchNode.TEMPLATE_CONFIGURATION_URL) != null) {
-            commandBuilder.append(" -Des.config=" + Os.mergePaths(getRunDir(), getConfigFile()));
-        }
-        appendConfigIfPresent(commandBuilder, "es.path.data", ElasticSearchNode.DATA_DIR, Os.mergePaths(getRunDir(), "data"));
-        appendConfigIfPresent(commandBuilder, "es.path.logs", ElasticSearchNode.LOG_DIR, Os.mergePaths(getRunDir(), "logs"));
-        appendConfigIfPresent(commandBuilder, "es.node.name", ElasticSearchNode.NODE_NAME.getConfigKey());
-        appendConfigIfPresent(commandBuilder, "es.cluster.name", ElasticSearchNode.CLUSTER_NAME.getConfigKey());
-        appendConfigIfPresent(commandBuilder, "es.discovery.zen.ping.multicast.enabled", ElasticSearchNode.MULTICAST_ENABLED);
-        appendConfigIfPresent(commandBuilder, "es.discovery.zen.ping.unicast.enabled", ElasticSearchNode.UNICAST_ENABLED);
-        commandBuilder.append(" > out.log 2> err.log < /dev/null");
-        newScript(MutableMap.of("usePidFile", false), LAUNCHING)
-            .updateTaskAndFailOnNonZeroResultCode()
-            .body.append(commandBuilder.toString())
-            .execute();
-    }
-    
-    private void appendConfigIfPresent(StringBuilder builder, String parameter, ConfigKey<?> configKey) {
-        appendConfigIfPresent(builder, parameter, configKey, null);
-    }
-    
-    private void appendConfigIfPresent(StringBuilder builder, String parameter, ConfigKey<?> configKey, String defaultValue) {
-        String config = null;
-        if (entity.getConfig(configKey) != null) {
-            config = String.valueOf(entity.getConfig(configKey));
-        }
-        if (config == null && defaultValue != null) {
-            config = defaultValue;
-        }
-        if (config != null) {
-            builder.append(String.format(" -D%s=%s", parameter, config));
-        }
-    }
-    
-    public String getConfigFile() {
-        return "elasticsearch.yaml";
-    }
-    
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of("usePidFile", true), CHECK_RUNNING).execute() == 0;
-    }
-    
-    @Override
-    public void stop() {
-        newScript(MutableMap.of("usePidFile", true), STOPPING).execute();
-    }
-    
-    @Override
-    public void kill() {
-        newScript(MutableMap.of("usePidFile", true), KILLING).execute();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java
deleted file mode 100644
index 152bb20..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBServer.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.event.basic.AttributeSensorAndConfigKey;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.util.flags.SetFromFlag;
-
-public interface AbstractMongoDBServer extends SoftwareProcess, Entity {
-
-    // TODO Need to properly test v2.4.x and v2.5.x support.
-    // I think the v2.5.x were dev releases.
-    // Should update mongo.config to yaml format, but no rush for that.
-    
-    @SetFromFlag("dataDirectory")
-    ConfigKey<String> DATA_DIRECTORY = ConfigKeys.newStringConfigKey(
-            "mongodb.data.directory", "Data directory to store MongoDB journals");
-    
-    @SetFromFlag("mongodbConfTemplateUrl")
-    ConfigKey<String> MONGODB_CONF_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
-            "mongodb.config.url", "Template file (in freemarker format) for a MongoDB configuration file",
-            "classpath://brooklyn/entity/nosql/mongodb/default.conf");
-    
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION =
-            ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "2.6.5");
-
-    // TODO: Windows support
-    // e.g. http://fastdl.mongodb.org/linux/mongodb-linux-x86_64-2.2.2.tgz,
-    // http://fastdl.mongodb.org/osx/mongodb-osx-x86_64-2.2.2.tgz
-    // http://downloads.mongodb.org/win32/mongodb-win32-x86_64-1.8.5.zip
-    // Note Windows download is a zip.
-    @SetFromFlag("downloadUrl")
-    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "http://fastdl.mongodb.org/${driver.osDir}/${driver.osTag}-${version}.tgz");
-
-    @SetFromFlag("port")
-    PortAttributeSensorAndConfigKey PORT =
-            new PortAttributeSensorAndConfigKey("mongodb.server.port", "Server port", "27017+");
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java
deleted file mode 100644
index 8210c77..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/AbstractMongoDBSshDriver.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityLocal;
-import brooklyn.entity.basic.lifecycle.ScriptHelper;
-import brooklyn.location.OsDetails;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.net.Networking;
-import brooklyn.util.os.Os;
-import brooklyn.util.ssh.BashCommands;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-public abstract class AbstractMongoDBSshDriver extends AbstractSoftwareProcessSshDriver {
-
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractMongoDBSshDriver.class);
-    
-    public AbstractMongoDBSshDriver(EntityLocal entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(getBaseName())));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-    
-        List<String> commands = new LinkedList<String>();
-        commands.addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs));
-        commands.add(BashCommands.INSTALL_TAR);
-        commands.add("tar xzfv " + saveAs);
-    
-        newScript(INSTALLING)
-                .failOnNonZeroResultCode()
-                .body.append(commands).execute();
-    }
-    
-    @Override
-    public void customize() {
-        Map<?,?> ports = ImmutableMap.of("port", getServerPort());
-        Networking.checkPortsValid(ports);
-        String command = String.format("mkdir -p %s", getDataDirectory());
-        newScript(CUSTOMIZING)
-                .updateTaskAndFailOnNonZeroResultCode()
-                .body.append(command).execute();
-        String templateUrl = entity.getConfig(MongoDBServer.MONGODB_CONF_TEMPLATE_URL);
-        if (!Strings.isNullOrEmpty(templateUrl)) copyTemplate(templateUrl, getConfFile());
-    }
-    
-    @Override
-    public boolean isRunning() {
-        try {
-            return MongoDBClientSupport.forServer((AbstractMongoDBServer) entity).ping();
-        } catch (Exception e) {
-            Exceptions.propagateIfFatal(e);
-            return false;
-        }
-    }
-    
-    /**
-     * Kills the server with SIGINT. Sending SIGKILL is likely to result in data corruption.
-     * @see <a href="http://docs.mongodb.org/manual/tutorial/manage-mongodb-processes/#sending-a-unix-int-or-term-signal">http://docs.mongodb.org/manual/tutorial/manage-mongodb-processes/#sending-a-unix-int-or-term-signal</a>
-     */
-    @Override
-    public void stop() {
-        // TODO: Wait for process to terminate. Currently, this will send the signal and then immediately continue with next steps, 
-        // which could involve stopping VM etc.
-        
-        // We could also use SIGTERM (15)
-        new ScriptHelper(this, "Send SIGINT to MongoDB server")
-                .body.append("kill -2 $(cat " + getPidFile() + ")")
-                .execute();
-    }
-
-    protected String getBaseName() {
-        return getOsTag() + "-" + entity.getConfig(AbstractMongoDBServer.SUGGESTED_VERSION);
-    }
-
-    // IDE note: This is used by MongoDBServer.DOWNLOAD_URL
-    public String getOsDir() {
-        return (getLocation().getOsDetails().isMac()) ? "osx" : "linux";
-    }
-
-    public String getOsTag() {
-        OsDetails os = getLocation().getOsDetails();
-        if (os == null) {
-            // Default to generic linux
-            return "mongodb-linux-x86_64";
-        } else if (os.isMac()) {
-            // Mac is 64bit only
-            return "mongodb-osx-x86_64";
-        } else {
-            String arch = os.is64bit() ? "x86_64" : "i686";
-            return "mongodb-linux-" + arch;
-        }
-    }
-
-    public String getDataDirectory() {
-        String result = entity.getConfig(MongoDBServer.DATA_DIRECTORY);
-        if (result!=null) return result;
-        return getRunDir() + "/data";
-    }
-
-    protected String getLogFile() {
-        return getRunDir() + "/log.txt";
-    }
-
-    protected String getPidFile() {
-        return getRunDir() + "/pid";
-    }
-
-    protected Integer getServerPort() {
-        return entity.getAttribute(MongoDBServer.PORT);
-    }
-
-    protected String getConfFile() {
-        return getRunDir() + "/mongo.conf";
-    }
-
-    protected ImmutableList.Builder<String> getArgsBuilderWithDefaults(AbstractMongoDBServer server) {
-        Integer port = server.getAttribute(MongoDBServer.PORT);
-
-        return ImmutableList.<String>builder()
-                .add("--config", getConfFile())
-                .add("--pidfilepath", getPidFile())
-                .add("--logpath", getLogFile())
-                .add("--port", port.toString())
-                .add("--fork");
-    }
-    
-    protected void launch(ImmutableList.Builder<String> argsBuilder) {
-        String args = Joiner.on(" ").join(argsBuilder.build());
-        String command = String.format("%s/bin/mongod %s > out.log 2> err.log < /dev/null", getExpandedInstallDir(), args);
-        LOG.info(command);
-        newScript(LAUNCHING)
-                .updateTaskAndFailOnNonZeroResultCode()
-                .body.append(command).execute();
-    }
- 
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClient.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClient.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClient.java
deleted file mode 100644
index 6485101..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClient.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.util.List;
-import java.util.Map;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.annotation.Effector;
-import brooklyn.entity.annotation.EffectorParam;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.MethodEffector;
-import brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.util.flags.SetFromFlag;
-
-import com.google.common.reflect.TypeToken;
-
-@ImplementedBy(MongoDBClientImpl.class)
-public interface MongoDBClient extends AbstractMongoDBServer {
-    
-    MethodEffector<Void> RUN_SCRIPT = new MethodEffector<Void>(MongoDBClient.class, "runScript");
-    
-    @SuppressWarnings("serial")
-    @SetFromFlag("startupJsScripts")
-    ConfigKey<List<String>> STARTUP_JS_SCRIPTS = ConfigKeys.newConfigKey(
-            new TypeToken<List<String>>(){}, "mongodb.client.startupJsScripts", 
-                "List of scripts defined in mongodb.client.scripts to be run on startup");
-    
-    @SuppressWarnings("serial")
-    @SetFromFlag("scripts")
-    ConfigKey<Map<String, String>> JS_SCRIPTS = ConfigKeys.newConfigKey(
-            new TypeToken<Map<String, String>>(){}, "mongodb.client.scripts", "List of javascript scripts to be copied "
-                    + "to the server. These scripts can be run using the runScript effector");
-    
-    @SetFromFlag("shardedDeployment")
-    ConfigKey<MongoDBShardedDeployment> SHARDED_DEPLOYMENT = ConfigKeys.newConfigKey(MongoDBShardedDeployment.class, 
-            "mongodb.client.shardeddeployment", "Sharded deployment that the client will use to run scripts. "
-                    + "If both SERVER and SHARDED_DEPLOYMENT are specified, SERVER will be used");
-    
-    @SetFromFlag("server")
-    ConfigKey<AbstractMongoDBServer> SERVER = ConfigKeys.newConfigKey(AbstractMongoDBServer.class, 
-            "mongodb.client.server", "MongoDBServer that the client will use to run scripts. "
-                    + "If both SERVER and SHARDED_DEPLOYMENT are specified, SERVER will be used");
-    
-    @Effector(description="Runs one of the scripts defined in mongodb.client.scripts")
-    void runScript(@EffectorParam(name="preStart", description="use this to create parameters that can be used by the script, e.g.:<p><code>var loopCount = 10</code>") String preStart,
-            @EffectorParam(name="scriptName", description="Name of the script as defined in mongodb.client.scripts") String scriptName);
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java
deleted file mode 100644
index cb78240..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientDriver.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface MongoDBClientDriver extends SoftwareProcessDriver {
-    void runScript(String preStart, String scriptName);
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java
deleted file mode 100644
index 034a928..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientImpl.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.entity.trait.Startable;
-
-public class MongoDBClientImpl extends SoftwareProcessImpl implements MongoDBClient {
-    
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        setAttribute(Startable.SERVICE_UP, true);
-    }
-
-    @SuppressWarnings("rawtypes")
-    @Override
-    public Class getDriverInterface() {
-        return MongoDBClientDriver.class;
-    }
-
-    @Override
-    public void runScript(String preStart, String scriptName) {
-        ((MongoDBClientDriver)getDriver()).runScript(preStart, scriptName);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java
deleted file mode 100644
index 3579ff2..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/mongodb/MongoDBClientSshDriver.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.EntityLocal;
-import brooklyn.entity.nosql.mongodb.sharding.MongoDBRouter;
-import brooklyn.entity.nosql.mongodb.sharding.MongoDBRouterCluster;
-import brooklyn.entity.nosql.mongodb.sharding.MongoDBShardedDeployment;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.math.MathPredicates;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-
-public class MongoDBClientSshDriver extends AbstractMongoDBSshDriver implements MongoDBClientDriver {
-    
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBClientSshDriver.class);
-
-    private boolean isRunning = false;
-
-    public MongoDBClientSshDriver(EntityLocal entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-    
-    @Override
-    public void customize() {
-        String command = String.format("mkdir -p %s", getUserScriptDir());
-        newScript(CUSTOMIZING)
-            .updateTaskAndFailOnNonZeroResultCode()
-            .body.append(command).execute();
-        Map<String, String> scripts = entity.getConfig(MongoDBClient.JS_SCRIPTS);
-        for (String scriptName : scripts.keySet()) {
-            copyResource(scripts.get(scriptName), getUserScriptDir() + scriptName + ".js");
-        }
-    }
-
-    @Override
-    public void launch() {
-        AbstractMongoDBServer server = getServer();
-        // The scripts are going to be run on the machine via SSH so it shouldn't matter
-        // that the accessible host and port might be different.
-        String host = server.getAttribute(AbstractMongoDBServer.HOSTNAME);
-        Integer port = server.getAttribute(AbstractMongoDBServer.PORT);
-
-        List<String> scripts = entity.getConfig(MongoDBClient.STARTUP_JS_SCRIPTS);
-        if (scripts!=null) {
-            for (String scriptName : scripts) {
-                try {
-                    LOG.debug("Running MongoDB script "+scriptName+" at "+getEntity());
-                    runScript("", scriptName, host, port);
-                } catch (Exception e) {
-                    LOG.warn("Error running MongoDB script "+scriptName+" at "+getEntity()+", throwing: "+e);
-                    isRunning = false;
-                    Exceptions.propagateIfFatal(e);
-                    throw new IllegalStateException("Error running MongoDB script "+scriptName+" at "+entity+": "+e, e);
-                }
-            }
-        }
-        isRunning = true;
-    }
-    
-    @Override
-    public boolean isRunning() {
-        // TODO better would be to get some confirmation
-        return isRunning;
-    }
-    
-    @Override
-    public void stop() {
-        try {
-            super.stop();
-        } finally {
-            isRunning = false;
-        }
-    }
-    
-    private String getUserScriptDir() {
-        return getRunDir() + "/userScripts/" ;
-    }
-    
-    public void runScript(String preStart, String scriptName) {
-        AbstractMongoDBServer server = getServer();
-        String host = server.getAttribute(AbstractMongoDBServer.HOSTNAME);
-        Integer port = server.getAttribute(AbstractMongoDBServer.PORT);
-        runScript(preStart, scriptName, host, port);
-    }
-    
-    private void runScript(String preStart, String scriptName, String host, Integer port) {
-        // TODO: escape preStart to prevent injection attack
-        String command = String.format("%s/bin/mongo %s:%s --eval \"%s\" %s/%s > out.log 2> err.log < /dev/null", getExpandedInstallDir(), 
-                host, port, preStart, getUserScriptDir(), scriptName + ".js");
-        newScript(LAUNCHING)
-            .updateTaskAndFailOnNonZeroResultCode()
-            .body.append(command).execute();
-    }
-    
-    private AbstractMongoDBServer getServer() {
-        AbstractMongoDBServer server = entity.getConfig(MongoDBClient.SERVER);
-        MongoDBShardedDeployment deployment = entity.getConfig(MongoDBClient.SHARDED_DEPLOYMENT);
-        if (server == null) {
-            Preconditions.checkNotNull(deployment, "Either server or shardedDeployment must be specified for %s", this);
-            server = DependentConfiguration.builder()
-                    .attributeWhenReady(deployment.getRouterCluster(), MongoDBRouterCluster.ANY_ROUTER)
-                    .blockingDetails("any available router")
-                    .runNow();
-            DependentConfiguration.builder()
-                    .attributeWhenReady(server, MongoDBRouter.SHARD_COUNT)
-                    .readiness(MathPredicates.<Integer>greaterThan(0))
-                    .runNow();
-        } else {
-            if (deployment != null) {
-                log.warn("Server and ShardedDeployment defined for {}; using server ({} instead of {})", 
-                        new Object[] {this, server, deployment});
-            }
-            DependentConfiguration.builder()
-                    .attributeWhenReady(server, Startable.SERVICE_UP)
-                    .readiness(Predicates.equalTo(true))
-                    .runNow();
-        }
-        return server;
-    }
-}


[23/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java
deleted file mode 100644
index 5c7b8fd..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeImpl.java
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.math.BigInteger;
-import java.net.Socket;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import javax.annotation.Nullable;
-import javax.management.ObjectName;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.enricher.RollingTimeWindowMeanEnricher;
-import brooklyn.enricher.TimeWeightedDeltaEnricher;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.entity.effector.EffectorBody;
-import brooklyn.entity.java.JavaAppUtils;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.event.basic.Sensors;
-import brooklyn.event.feed.function.FunctionFeed;
-import brooklyn.event.feed.function.FunctionPollConfig;
-import brooklyn.event.feed.jmx.JmxAttributePollConfig;
-import brooklyn.event.feed.jmx.JmxFeed;
-import brooklyn.event.feed.jmx.JmxHelper;
-import brooklyn.event.feed.jmx.JmxOperationPollConfig;
-import brooklyn.location.MachineLocation;
-import brooklyn.location.MachineProvisioningLocation;
-import brooklyn.location.basic.Machines;
-import brooklyn.location.cloud.CloudLocationConfig;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.config.ConfigBag;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.guava.Maybe;
-import brooklyn.util.text.Strings;
-import brooklyn.util.text.TemplateProcessor;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-import com.google.common.base.Joiner;
-import com.google.common.base.Predicate;
-import com.google.common.base.Predicates;
-import com.google.common.base.Splitter;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Implementation of {@link CassandraNode}.
- */
-public class CassandraNodeImpl extends SoftwareProcessImpl implements CassandraNode {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraNodeImpl.class);
-
-    private final AtomicReference<Boolean> detectedCloudSensors = new AtomicReference<Boolean>(false);
-    
-    public CassandraNodeImpl() {
-    }
-    
-    @Override
-    public void init() {
-        super.init();
-        
-        getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
-            @Override
-            public String call(ConfigBag parameters) {
-                return executeScript((String)parameters.getStringKey("commands"));
-            }
-        });
-        
-        Entities.checkRequiredUrl(this, getCassandraConfigTemplateUrl());
-        Entities.getRequiredUrlConfig(this, CASSANDRA_RACKDC_CONFIG_TEMPLATE_URL);
-        
-        connectEnrichers();
-    }
-    
-    /**
-     * Some clouds (e.g. Rackspace) give us VMs that have two nics: one for private and one for public.
-     * If the private IP is used then it doesn't work, even for a cluster purely internal to Rackspace!
-     * 
-     * TODO Ugly. Need to understand more and find a better fix. Perhaps in Cassandra itself if necessary.
-     * Also need to investigate further:
-     *  - does it still fail if BroadcastAddress is set to private IP?
-     *  - is `openIptables` opening it up for both interfaces?
-     *  - for aws->rackspace comms between nodes (thus using the public IP), will it be listening on an accessible port?
-     *  - ideally do a check, open a server on one port on the machine, see if it is contactable on the public address;
-     *    and set that as a flag on the cloud
-     */
-    protected void setCloudPreferredSensorNames() {
-        if (detectedCloudSensors.get()) return;
-        synchronized (detectedCloudSensors) {
-            if (detectedCloudSensors.get()) return;
-
-            MachineProvisioningLocation<?> loc = getProvisioningLocation();
-            if (loc != null) {
-                try {
-                    Method method = loc.getClass().getMethod("getProvider");
-                    method.setAccessible(true);
-                    String provider = (String) method.invoke(loc);
-                    String result = "(nothing special)";
-                    if (provider!=null) {
-                        if (provider.contains("rackspace") || provider.contains("cloudservers") || provider.contains("softlayer")) {
-                            /* These clouds have 2 NICs and it has to be consistent, so use public IP here to allow external access;
-                             * (TODO internal access could be configured to improve performance / lower cost, 
-                             * if we know all nodes are visible to each other) */
-                            if (getConfig(LISTEN_ADDRESS_SENSOR)==null)
-                                setConfig(LISTEN_ADDRESS_SENSOR, CassandraNode.ADDRESS.getName());
-                            if (getConfig(BROADCAST_ADDRESS_SENSOR)==null)
-                                setConfig(BROADCAST_ADDRESS_SENSOR, CassandraNode.ADDRESS.getName());
-                            result = "public IP for both listen and broadcast";
-                        } else if (provider.contains("google-compute")) {
-                            /* Google nodes cannot reach themselves/each-other on the public IP,
-                             * and there is no hostname, so use private IP here */
-                            if (getConfig(LISTEN_ADDRESS_SENSOR)==null)
-                                setConfig(LISTEN_ADDRESS_SENSOR, CassandraNode.SUBNET_HOSTNAME.getName());
-                            if (getConfig(BROADCAST_ADDRESS_SENSOR)==null)
-                                setConfig(BROADCAST_ADDRESS_SENSOR, CassandraNode.SUBNET_HOSTNAME.getName());
-                            result = "private IP for both listen and broadcast";
-                        }
-                    }
-                    log.debug("Cassandra NICs inferred {} for {}; using location {}, based on provider {}", new Object[] {result, this, loc, provider});
-                } catch (Exception e) {
-                    log.debug("Cassandra NICs auto-detection failed for {} in location {}: {}", new Object[] {this, loc, e});
-                }
-            }
-            detectedCloudSensors.set(true);
-        }
-    }
-    
-    @Override
-    protected void preStart() {
-        super.preStart();
-        setCloudPreferredSensorNames();
-    }
-    
-    // Used for freemarker
-    public String getMajorMinorVersion() {
-        String version = getConfig(CassandraNode.SUGGESTED_VERSION);
-        if (Strings.isBlank(version)) return "";
-        List<String> versionParts = ImmutableList.copyOf(Splitter.on(".").split(version));
-        return versionParts.get(0) + (versionParts.size() > 1 ? "."+versionParts.get(1) : "");
-    }
-    
-    public String getCassandraConfigTemplateUrl() {
-        String templatedUrl = getConfig(CassandraNode.CASSANDRA_CONFIG_TEMPLATE_URL);
-        return TemplateProcessor.processTemplateContents(templatedUrl, this, ImmutableMap.<String, Object>of());
-    }
-
-    @Override public Integer getGossipPort() { return getAttribute(CassandraNode.GOSSIP_PORT); }
-    @Override public Integer getSslGossipPort() { return getAttribute(CassandraNode.SSL_GOSSIP_PORT); }
-    @Override public Integer getThriftPort() { return getAttribute(CassandraNode.THRIFT_PORT); }
-    @Override public Integer getNativeTransportPort() { return getAttribute(CassandraNode.NATIVE_TRANSPORT_PORT); }
-    @Override public String getClusterName() { return getAttribute(CassandraNode.CLUSTER_NAME); }
-    
-    @Override public int getNumTokensPerNode() {
-        return getConfig(CassandraNode.NUM_TOKENS_PER_NODE);
-    }
-
-    @Deprecated
-    @Override public BigInteger getToken() {
-        BigInteger token = getAttribute(CassandraNode.TOKEN);
-        if (token == null) {
-            token = getConfig(CassandraNode.TOKEN);
-        }
-        return token;
-    }
-    
-    @Override public Set<BigInteger> getTokens() {
-        // Prefer an already-set attribute over the config.
-        // Prefer TOKENS over TOKEN.
-        Set<BigInteger> tokens = getAttribute(CassandraNode.TOKENS);
-        if (tokens == null) {
-            BigInteger token = getAttribute(CassandraNode.TOKEN);
-            if (token != null) {
-                tokens = ImmutableSet.of(token);
-            }
-        }
-        if (tokens == null) {
-            tokens = getConfig(CassandraNode.TOKENS);
-        }
-        if (tokens == null) {
-            BigInteger token = getConfig(CassandraNode.TOKEN);
-            if (token != null) {
-                tokens = ImmutableSet.of(token);
-            }
-        }
-        return tokens;
-    }
-    
-    @Deprecated
-    @Override public String getTokenAsString() {
-        BigInteger token = getToken();
-        if (token==null) return "";
-        return ""+token;
-    }
-
-    @Override public String getTokensAsString() {
-        // TODO check what is required when replacing failed node.
-        // with vnodes in Cassandra 2.x, don't bother supplying token
-        Set<BigInteger> tokens = getTokens();
-        if (tokens == null) return "";
-        return Joiner.on(",").join(tokens);
-    }
-    
-    @Override public String getListenAddress() {
-        String sensorName = getConfig(LISTEN_ADDRESS_SENSOR);
-        if (Strings.isNonBlank(sensorName))
-            return Entities.submit(this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))).getUnchecked();
-        
-        String subnetAddress = getAttribute(CassandraNode.SUBNET_ADDRESS);
-        return Strings.isNonBlank(subnetAddress) ? subnetAddress : getAttribute(CassandraNode.ADDRESS);
-    }
-    @Override public String getBroadcastAddress() {
-        String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR);
-        if (Strings.isNonBlank(sensorName))
-            return Entities.submit(this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))).getUnchecked();
-        
-        String snitchName = getConfig(CassandraNode.ENDPOINT_SNITCH_NAME);
-        if (snitchName.equals("Ec2MultiRegionSnitch") || snitchName.contains("MultiCloudSnitch")) {
-            // http://www.datastax.com/documentation/cassandra/2.0/mobile/cassandra/architecture/architectureSnitchEC2MultiRegion_c.html
-            // describes that the listen_address is set to the private IP, and the broadcast_address is set to the public IP.
-            return getAttribute(CassandraNode.ADDRESS);
-        } else if (!getDriver().isClustered()) {
-            return getListenAddress();
-        } else {
-            // In other situations, prefer the hostname, so other regions can see it
-            // *Unless* hostname resolves at the target to a local-only interface which is different to ADDRESS
-            // (workaround for issue deploying to localhost)
-            String hostname = getAttribute(CassandraNode.HOSTNAME);
-            try {
-                String resolvedAddress = getDriver().getResolvedAddress(hostname);
-                if (resolvedAddress==null) {
-                    log.debug("Cassandra using broadcast address "+getListenAddress()+" for "+this+" because hostname "+hostname+" could not be resolved at remote machine");
-                    return getListenAddress();
-                }
-                if (resolvedAddress.equals("127.0.0.1")) {
-                    log.debug("Cassandra using broadcast address "+getListenAddress()+" for "+this+" because hostname "+hostname+" resolves to 127.0.0.1");
-                    return getListenAddress();                    
-                }
-                return hostname;
-            } catch (Exception e) {
-                Exceptions.propagateIfFatal(e);
-                log.warn("Error resolving hostname "+hostname+" for "+this+": "+e, e);
-                return hostname;
-            }
-        }
-    }
-    /** not always the private IP, if public IP has been insisted on for broadcast, e.g. setting up a rack topology */
-    // have not confirmed this does the right thing in all clouds ... only used for rack topology however
-    public String getPrivateIp() {
-        String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR);
-        if (Strings.isNonBlank(sensorName)) {
-            return getAttribute(Sensors.newStringSensor(sensorName));
-        } else {
-            String subnetAddress = getAttribute(CassandraNode.SUBNET_ADDRESS);
-            return Strings.isNonBlank(subnetAddress) ? subnetAddress : getAttribute(CassandraNode.ADDRESS);
-        }
-    }
-    public String getPublicIp() {
-        // may need to be something else in google
-        return getAttribute(CassandraNode.ADDRESS);
-    }
-
-    @Override public String getRpcAddress() {
-        String sensorName = getConfig(RPC_ADDRESS_SENSOR);
-        if (Strings.isNonBlank(sensorName))
-            return Entities.submit(this, DependentConfiguration.attributeWhenReady(this, Sensors.newStringSensor(sensorName))).getUnchecked();
-        return "0.0.0.0";
-    }
-    
-    @Override public String getSeeds() { 
-        Set<Entity> seeds = getConfig(CassandraNode.INITIAL_SEEDS);
-        if (seeds==null) {
-            log.warn("No seeds available when requested for "+this, new Throwable("source of no Cassandra seeds when requested"));
-            return null;
-        }
-        String snitchName = getConfig(CassandraNode.ENDPOINT_SNITCH_NAME);
-        MutableSet<String> seedsHostnames = MutableSet.of();
-        for (Entity entity : seeds) {
-            // tried removing ourselves if there are other nodes, but that is a BAD idea!
-            // blows up with a "java.lang.RuntimeException: No other nodes seen!"
-            
-            if (snitchName.equals("Ec2MultiRegionSnitch") || snitchName.contains("MultiCloudSnitch")) {
-                // http://www.datastax.com/documentation/cassandra/2.0/mobile/cassandra/architecture/architectureSnitchEC2MultiRegion_c.html
-                // says the seeds should be public IPs.
-                seedsHostnames.add(entity.getAttribute(CassandraNode.ADDRESS));
-            } else {
-                String sensorName = getConfig(BROADCAST_ADDRESS_SENSOR);
-                if (Strings.isNonBlank(sensorName)) {
-                    seedsHostnames.add(entity.getAttribute(Sensors.newStringSensor(sensorName)));
-                } else {
-                    Maybe<String> optionalSeedHostname = Machines.findSubnetOrPublicHostname(entity);
-                    if (optionalSeedHostname.isPresent()) {
-                        String seedHostname = optionalSeedHostname.get();
-                        seedsHostnames.add(seedHostname);
-                    } else {
-                        log.warn("In node {}, seed hostname missing for {}; not including in seeds list", this, entity);
-                    }
-                }
-            }
-        }
-        
-        String result = Strings.join(seedsHostnames, ",");
-        log.info("Seeds for {}: {}", this, result);
-        return result;
-    }
-
-    // referenced by cassandra-rackdc.properties, read by some of the cassandra snitches
-    public String getDatacenterName() {
-        String name = getAttribute(CassandraNode.DATACENTER_NAME);
-        if (name == null) {
-            MachineLocation machine = getMachineOrNull();
-            MachineProvisioningLocation<?> provisioningLocation = getProvisioningLocation();
-            if (machine != null) {
-                name = machine.getConfig(CloudLocationConfig.CLOUD_REGION_ID);
-            }
-            if (name == null && provisioningLocation != null) {
-                name = provisioningLocation.getConfig(CloudLocationConfig.CLOUD_REGION_ID);
-            }
-            if (name == null) {
-                name = "UNKNOWN_DATACENTER";
-            }
-            setAttribute((AttributeSensor<String>)DATACENTER_NAME, name);
-        }
-        return name;
-    }
-
-    public String getRackName() {
-        String name = getAttribute(CassandraNode.RACK_NAME);
-        if (name == null) {
-            MachineLocation machine = getMachineOrNull();
-            MachineProvisioningLocation<?> provisioningLocation = getProvisioningLocation();
-            if (machine != null) {
-                name = machine.getConfig(CloudLocationConfig.CLOUD_AVAILABILITY_ZONE_ID);
-            }
-            if (name == null && provisioningLocation != null) {
-                name = provisioningLocation.getConfig(CloudLocationConfig.CLOUD_AVAILABILITY_ZONE_ID);
-            }
-            if (name == null) {
-                name = "UNKNOWN_RACK";
-            }
-            setAttribute((AttributeSensor<String>)RACK_NAME, name);
-        }
-        return name;
-    }
-
-    @Override
-    public Class<? extends CassandraNodeDriver> getDriverInterface() {
-        return CassandraNodeDriver.class;
-    }
-    
-    @Override
-    public CassandraNodeDriver getDriver() {
-        return (CassandraNodeDriver) super.getDriver();
-    }
-
-    private volatile JmxFeed jmxFeed;
-    private volatile FunctionFeed functionFeed;
-    private JmxFeed jmxMxBeanFeed;
-    private JmxHelper jmxHelper;
-    private ObjectName storageServiceMBean = JmxHelper.createObjectName("org.apache.cassandra.db:type=StorageService");
-    private ObjectName readStageMBean = JmxHelper.createObjectName("org.apache.cassandra.request:type=ReadStage");
-    private ObjectName mutationStageMBean = JmxHelper.createObjectName("org.apache.cassandra.request:type=MutationStage");
-    private ObjectName snitchMBean = JmxHelper.createObjectName("org.apache.cassandra.db:type=EndpointSnitchInfo");
-
-    
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    @Override
-    protected void connectSensors() {
-        // "cassandra" isn't really a protocol, but okay for now
-        setAttribute(DATASTORE_URL, "cassandra://"+getAttribute(HOSTNAME)+":"+getAttribute(THRIFT_PORT));
-        
-        super.connectSensors();
-
-        jmxHelper = new JmxHelper(this);
-        jmxFeed = JmxFeed.builder()
-                .entity(this)
-                .period(3000, TimeUnit.MILLISECONDS)
-                .helper(jmxHelper)
-                .pollAttribute(new JmxAttributePollConfig<Boolean>(SERVICE_UP_JMX)
-                        .objectName(storageServiceMBean)
-                        .attributeName("Initialized")
-                        .onSuccess(Functions.forPredicate(Predicates.notNull()))
-                        .onException(Functions.constant(false)))
-                .pollAttribute(new JmxAttributePollConfig<Set<BigInteger>>(TOKENS)
-                        .objectName(storageServiceMBean)
-                        .attributeName("TokenToEndpointMap")
-                        .onSuccess(new Function<Object, Set<BigInteger>>() {
-                            @Override
-                            public Set<BigInteger> apply(@Nullable Object arg) {
-                                Map input = (Map)arg;
-                                if (input == null || input.isEmpty()) return null;
-                                // FIXME does not work on aws-ec2, uses RFC1918 address
-                                Predicate<String> self = Predicates.in(ImmutableList.of(getAttribute(HOSTNAME), getAttribute(ADDRESS), getAttribute(SUBNET_ADDRESS), getAttribute(SUBNET_HOSTNAME)));
-                                Set<String> tokens = Maps.filterValues(input, self).keySet();
-                                Set<BigInteger> result = Sets.newLinkedHashSet();
-                                for (String token : tokens) {
-                                    result.add(new BigInteger(token));
-                                }
-                                return result;
-                            }})
-                        .onException(Functions.<Set<BigInteger>>constant(null)))
-                .pollAttribute(new JmxAttributePollConfig<BigInteger>(TOKEN)
-                        .objectName(storageServiceMBean)
-                        .attributeName("TokenToEndpointMap")
-                        .onSuccess(new Function<Object, BigInteger>() {
-                            @Override
-                            public BigInteger apply(@Nullable Object arg) {
-                                Map input = (Map)arg;
-                                // TODO remove duplication from setting TOKENS
-                                if (input == null || input.isEmpty()) return null;
-                                // FIXME does not work on aws-ec2, uses RFC1918 address
-                                Predicate<String> self = Predicates.in(ImmutableList.of(getAttribute(HOSTNAME), getAttribute(ADDRESS), getAttribute(SUBNET_ADDRESS), getAttribute(SUBNET_HOSTNAME)));
-                                Set<String> tokens = Maps.filterValues(input, self).keySet();
-                                String token = Iterables.getFirst(tokens, null);
-                                return (token != null) ? new BigInteger(token) : null;
-                            }})
-                        .onException(Functions.<BigInteger>constant(null)))
-                .pollOperation(new JmxOperationPollConfig<String>(DATACENTER_NAME)
-                        .period(60, TimeUnit.SECONDS)
-                        .objectName(snitchMBean)
-                        .operationName("getDatacenter")
-                        .operationParams(ImmutableList.of(getBroadcastAddress()))
-                        .onException(Functions.<String>constant(null)))
-                .pollOperation(new JmxOperationPollConfig<String>(RACK_NAME)
-                        .period(60, TimeUnit.SECONDS)
-                        .objectName(snitchMBean)
-                        .operationName("getRack")
-                        .operationParams(ImmutableList.of(getBroadcastAddress()))
-                        .onException(Functions.<String>constant(null)))
-                .pollAttribute(new JmxAttributePollConfig<Integer>(PEERS)
-                        .objectName(storageServiceMBean)
-                        .attributeName("TokenToEndpointMap")
-                        .onSuccess(new Function<Object, Integer>() {
-                            @Override
-                            public Integer apply(@Nullable Object arg) {
-                                Map input = (Map)arg;
-                                if (input == null || input.isEmpty()) return 0;
-                                return input.size();
-                            }
-                        })
-                        .onException(Functions.constant(-1)))
-                .pollAttribute(new JmxAttributePollConfig<Integer>(LIVE_NODE_COUNT)
-                        .objectName(storageServiceMBean)
-                        .attributeName("LiveNodes")
-                        .onSuccess(new Function<Object, Integer>() {
-                            @Override
-                            public Integer apply(@Nullable Object arg) {
-                                List input = (List)arg;
-                                if (input == null || input.isEmpty()) return 0;
-                                return input.size();
-                            }
-                        })
-                        .onException(Functions.constant(-1)))
-                .pollAttribute(new JmxAttributePollConfig<Integer>(READ_ACTIVE)
-                        .objectName(readStageMBean)
-                        .attributeName("ActiveCount")
-                        .onException(Functions.constant((Integer)null)))
-                .pollAttribute(new JmxAttributePollConfig<Long>(READ_PENDING)
-                        .objectName(readStageMBean)
-                        .attributeName("PendingTasks")
-                        .onException(Functions.constant((Long)null)))
-                .pollAttribute(new JmxAttributePollConfig<Long>(READ_COMPLETED)
-                        .objectName(readStageMBean)
-                        .attributeName("CompletedTasks")
-                        .onException(Functions.constant((Long)null)))
-                .pollAttribute(new JmxAttributePollConfig<Integer>(WRITE_ACTIVE)
-                        .objectName(mutationStageMBean)
-                        .attributeName("ActiveCount")
-                        .onException(Functions.constant((Integer)null)))
-                .pollAttribute(new JmxAttributePollConfig<Long>(WRITE_PENDING)
-                        .objectName(mutationStageMBean)
-                        .attributeName("PendingTasks")
-                        .onException(Functions.constant((Long)null)))
-                .pollAttribute(new JmxAttributePollConfig<Long>(WRITE_COMPLETED)
-                        .objectName(mutationStageMBean)
-                        .attributeName("CompletedTasks")
-                        .onException(Functions.constant((Long)null)))
-                .build();
-        
-        functionFeed = FunctionFeed.builder()
-                .entity(this)
-                .period(3000, TimeUnit.MILLISECONDS)
-                .poll(new FunctionPollConfig<Long, Long>(THRIFT_PORT_LATENCY)
-                        .onException(Functions.constant((Long)null))
-                        .callable(new Callable<Long>() {
-                            public Long call() {
-                                try {
-                                    long start = System.currentTimeMillis();
-                                    Socket s = new Socket(getAttribute(Attributes.HOSTNAME), getThriftPort());
-                                    s.close();
-                                    long latency = System.currentTimeMillis() - start;
-                                    computeServiceUp();
-                                    return latency;
-                                } catch (Exception e) {
-                                    if (log.isDebugEnabled())
-                                        log.debug("Cassandra thrift port poll failure: "+e);
-                                    setAttribute(SERVICE_UP, false);
-                                    return null;
-                                }
-                            }
-                            public void computeServiceUp() {
-                                // this will wait an additional poll period after thrift port is up,
-                                // as the caller will not have set yet, but that will help ensure it is really healthy!
-                                setAttribute(SERVICE_UP,
-                                        getAttribute(THRIFT_PORT_LATENCY)!=null && getAttribute(THRIFT_PORT_LATENCY)>=0 && 
-                                        Boolean.TRUE.equals(getAttribute(SERVICE_UP_JMX)));
-                            }
-                        }))
-                .build();
-        
-        jmxMxBeanFeed = JavaAppUtils.connectMXBeanSensors(this);
-    }
-    
-    protected void connectEnrichers() {
-        connectEnrichers(Duration.TEN_SECONDS);
-    }
-    
-    protected void connectEnrichers(Duration windowPeriod) {
-        JavaAppUtils.connectJavaAppServerPolicies(this);
-
-        addEnricher(TimeWeightedDeltaEnricher.<Long>getPerSecondDeltaEnricher(this, READ_COMPLETED, READS_PER_SECOND_LAST));
-        addEnricher(TimeWeightedDeltaEnricher.<Long>getPerSecondDeltaEnricher(this, WRITE_COMPLETED, WRITES_PER_SECOND_LAST));
-        
-        if (windowPeriod!=null) {
-            addEnricher(new RollingTimeWindowMeanEnricher<Long>(this, THRIFT_PORT_LATENCY, 
-                    THRIFT_PORT_LATENCY_IN_WINDOW, windowPeriod));
-            addEnricher(new RollingTimeWindowMeanEnricher<Double>(this, READS_PER_SECOND_LAST, 
-                    READS_PER_SECOND_IN_WINDOW, windowPeriod));
-            addEnricher(new RollingTimeWindowMeanEnricher<Double>(this, WRITES_PER_SECOND_LAST, 
-                    WRITES_PER_SECOND_IN_WINDOW, windowPeriod));
-        }
-    }
-    
-    @Override
-    public void disconnectSensors() {
-        super.disconnectSensors();
-
-        if (jmxFeed != null) jmxFeed.stop();
-        if (jmxMxBeanFeed != null) jmxMxBeanFeed.stop();
-        if (jmxHelper != null) jmxHelper.terminate();
-        if (functionFeed != null) functionFeed.stop();
-    }
-
-    @Override
-    public void setToken(String token) {
-        try {
-            if (!jmxHelper.isConnected()) jmxHelper.connect();;
-            jmxHelper.operation(storageServiceMBean, "move", token);
-            log.info("Moved server {} to token {}", getId(), token);
-        } catch (IOException ioe) {
-            Throwables.propagate(ioe);
-        }
-    }
-    
-    @Override
-    public String executeScript(String commands) {
-        return getDriver().executeScriptAsync(commands).block().getStdout();
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java
deleted file mode 100644
index 44651ba..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraNodeSshDriver.java
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityLocal;
-import brooklyn.entity.database.DatastoreMixins;
-import brooklyn.entity.java.JavaSoftwareProcessSshDriver;
-import brooklyn.entity.java.UsesJmx;
-import brooklyn.entity.software.SshEffectorTasks;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.location.Location;
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.location.basic.Machines;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.management.TaskWrapper;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.guava.Maybe;
-import brooklyn.util.net.Networking;
-import brooklyn.util.os.Os;
-import brooklyn.util.ssh.BashCommands;
-import brooklyn.util.stream.Streams;
-import brooklyn.util.task.DynamicTasks;
-import brooklyn.util.task.Tasks;
-import brooklyn.util.task.system.ProcessTaskWrapper;
-import brooklyn.util.text.Identifiers;
-import brooklyn.util.text.Strings;
-import brooklyn.util.text.TemplateProcessor;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-/**
- * Start a {@link CassandraNode} in a {@link Location} accessible over ssh.
- */
-public class CassandraNodeSshDriver extends JavaSoftwareProcessSshDriver implements CassandraNodeDriver {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraNodeSshDriver.class);
-
-    protected Maybe<String> resolvedAddressCache = Maybe.absent();
-
-    public CassandraNodeSshDriver(CassandraNodeImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    protected String getLogFileLocation() { return Os.mergePathsUnix(getRunDir(),"cassandra.log"); }
-
-    @Override
-    public Integer getGossipPort() { return entity.getAttribute(CassandraNode.GOSSIP_PORT); }
-
-    @Override
-    public Integer getSslGossipPort() { return entity.getAttribute(CassandraNode.SSL_GOSSIP_PORT); }
-
-    @Override
-    public Integer getThriftPort() { return entity.getAttribute(CassandraNode.THRIFT_PORT); }
-
-    @Override
-    public Integer getNativeTransportPort() { return entity.getAttribute(CassandraNode.NATIVE_TRANSPORT_PORT); }
-
-    @Override
-    public String getClusterName() { return entity.getAttribute(CassandraNode.CLUSTER_NAME); }
-
-    @Override
-    public String getCassandraConfigTemplateUrl() {
-        String templatedUrl = entity.getConfig(CassandraNode.CASSANDRA_CONFIG_TEMPLATE_URL);
-        return TemplateProcessor.processTemplateContents(templatedUrl, this, ImmutableMap.<String, Object>of());
-    }
-
-    @Override
-    public String getCassandraConfigFileName() { return entity.getConfig(CassandraNode.CASSANDRA_CONFIG_FILE_NAME); }
-
-    public String getEndpointSnitchName() { return entity.getConfig(CassandraNode.ENDPOINT_SNITCH_NAME); }
-
-    public String getCassandraRackdcConfigTemplateUrl() { return entity.getConfig(CassandraNode.CASSANDRA_RACKDC_CONFIG_TEMPLATE_URL); }
-
-    public String getCassandraRackdcConfigFileName() { return entity.getConfig(CassandraNode.CASSANDRA_RACKDC_CONFIG_FILE_NAME); }
-
-    public String getMirrorUrl() { return entity.getConfig(CassandraNode.MIRROR_URL); }
-
-    protected String getDefaultUnpackedDirectoryName() {
-        return "apache-cassandra-"+getVersion();
-    }
-
-    protected boolean isV2() {
-        String version = getVersion();
-        return version.startsWith("2.");
-    }
-
-    @Override
-    public boolean installJava() {
-        if (isV2()) {
-            return checkForAndInstallJava("1.8");
-        } else {
-            return super.installJava();
-        }
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(getDefaultUnpackedDirectoryName())));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        List<String> commands = ImmutableList.<String>builder()
-                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
-                .add(BashCommands.INSTALL_TAR)
-                .add("tar xzfv " + saveAs)
-                .build();
-
-        newScript(INSTALLING)
-                .body.append(commands)
-                .execute();
-    }
-
-    @Override
-    public Set<Integer> getPortsUsed() {
-        return ImmutableSet.<Integer>builder()
-                .addAll(super.getPortsUsed())
-                .addAll(getPortMap().values())
-                .build();
-    }
-
-    protected Map<String, Integer> getPortMap() {
-        return ImmutableMap.<String, Integer>builder()
-                .put("jmxPort", entity.getAttribute(UsesJmx.JMX_PORT))
-                .put("rmiPort", entity.getAttribute(UsesJmx.RMI_REGISTRY_PORT))
-                .put("gossipPort", getGossipPort())
-                .put("sslGossipPort", getSslGossipPort())
-                .put("thriftPort", getThriftPort())
-                .build();
-    }
-
-    @Override
-    public void customize() {
-        log.debug("Customizing {} (Cluster {})", entity, getClusterName());
-        Networking.checkPortsValid(getPortMap());
-
-        customizeInitialSeeds();
-
-        String logFileEscaped = getLogFileLocation().replace("/", "\\/"); // escape slashes
-
-        ImmutableList.Builder<String> commands = new ImmutableList.Builder<String>()
-                .add(String.format("cp -R %s/{bin,conf,lib,interface,pylib,tools} .", getExpandedInstallDir()))
-                .add("mkdir -p data")
-                .add("mkdir -p brooklyn_commands")
-                .add(String.format("sed -i.bk 's/log4j.appender.R.File=.*/log4j.appender.R.File=%s/g' %s/conf/log4j-server.properties", logFileEscaped, getRunDir()))
-                .add(String.format("sed -i.bk '/JMX_PORT/d' %s/conf/cassandra-env.sh", getRunDir()))
-                // Script sets 180k on Linux which gives Java error:  The stack size specified is too small, Specify at least 228k
-                .add(String.format("sed -i.bk 's/-Xss180k/-Xss280k/g' %s/conf/cassandra-env.sh", getRunDir()));
-
-        newScript(CUSTOMIZING)
-                .body.append(commands.build())
-                .failOnNonZeroResultCode()
-                .execute();
-
-        // Copy the cassandra.yaml configuration file across
-        String destinationConfigFile = Os.mergePathsUnix(getRunDir(), "conf", getCassandraConfigFileName());
-        copyTemplate(getCassandraConfigTemplateUrl(), destinationConfigFile);
-
-        // Copy the cassandra-rackdc.properties configuration file across
-        String rackdcDestinationFile = Os.mergePathsUnix(getRunDir(), "conf", getCassandraRackdcConfigFileName());
-        copyTemplate(getCassandraRackdcConfigTemplateUrl(), rackdcDestinationFile);
-
-        customizeCopySnitch();
-    }
-
-    protected void customizeCopySnitch() {
-        // Copy the custom snitch jar file across
-        String customSnitchJarUrl = entity.getConfig(CassandraNode.CUSTOM_SNITCH_JAR_URL);
-        if (Strings.isNonBlank(customSnitchJarUrl)) {
-            int lastSlashIndex = customSnitchJarUrl.lastIndexOf("/");
-            String customSnitchJarName = (lastSlashIndex > 0) ? customSnitchJarUrl.substring(lastSlashIndex+1) : "customBrooklynSnitch.jar";
-            String jarDestinationFile = Os.mergePathsUnix(getRunDir(), "lib", customSnitchJarName);
-            InputStream customSnitchJarStream = checkNotNull(resource.getResourceFromUrl(customSnitchJarUrl), "%s could not be loaded", customSnitchJarUrl);
-            try {
-                getMachine().copyTo(customSnitchJarStream, jarDestinationFile);
-            } finally {
-                Streams.closeQuietly(customSnitchJarStream);
-            }
-        }
-    }
-
-    protected void customizeInitialSeeds() {
-        if (entity.getConfig(CassandraNode.INITIAL_SEEDS)==null) {
-            if (isClustered()) {
-                entity.setConfig(CassandraNode.INITIAL_SEEDS,
-                    DependentConfiguration.attributeWhenReady(entity.getParent(), CassandraDatacenter.CURRENT_SEEDS));
-            } else {
-                entity.setConfig(CassandraNode.INITIAL_SEEDS, MutableSet.<Entity>of(entity));
-            }
-        }
-    }
-
-    @Override
-    public boolean isClustered() {
-        return entity.getParent() instanceof CassandraDatacenter;
-    }
-
-    @Override
-    public void launch() {
-        String subnetHostname = Machines.findSubnetOrPublicHostname(entity).get();
-        Set<Entity> seeds = getEntity().getConfig(CassandraNode.INITIAL_SEEDS);
-        List<Entity> ancestors = getCassandraAncestors();
-        log.info("Launching " + entity + ": " +
-                "cluster "+getClusterName()+", " +
-                "hostname (public) " + getEntity().getAttribute(Attributes.HOSTNAME) + ", " +
-                "hostname (subnet) " + subnetHostname + ", " +
-                "seeds "+((CassandraNode)entity).getSeeds()+" (from "+seeds+")");
-
-        boolean isFirst = seeds.iterator().next().equals(entity);
-        if (isClustered() && !isFirst && CassandraDatacenter.WAIT_FOR_FIRST) {
-            // wait for the first node
-            long firstStartTime = Entities.submit(entity, DependentConfiguration.attributeWhenReady(
-                ancestors.get(ancestors.size()-1), CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC)).getUnchecked();
-            // optionally force a delay before starting subsequent nodes; see comment at CassandraCluster.DELAY_AFTER_FIRST
-            Duration toWait = Duration.millis(firstStartTime + CassandraDatacenter.DELAY_AFTER_FIRST.toMilliseconds() -  System.currentTimeMillis());
-            if (toWait.toMilliseconds()>0) {
-                log.info("Launching " + entity + ": delaying launch of non-first node by "+toWait+" to prevent schema disagreements");
-                Tasks.setBlockingDetails("Pausing to ensure first node has time to start");
-                Time.sleep(toWait);
-                Tasks.resetBlockingDetails();
-            }
-        }
-
-        List<Entity> queuedStart = null;
-        if (CassandraDatacenter.DELAY_BETWEEN_STARTS!=null && !ancestors.isEmpty()) {
-            Entity root = ancestors.get(ancestors.size()-1);
-            // TODO currently use the class as a semaphore; messy, and obviously will not federate;
-            // should develop a brooklyn framework semaphore (similar to that done on SshMachineLocation)
-            // and use it - note however the synch block is very very short so relatively safe at least
-            synchronized (CassandraNode.class) {
-                queuedStart = root.getAttribute(CassandraDatacenter.QUEUED_START_NODES);
-                if (queuedStart==null) {
-                    queuedStart = new ArrayList<Entity>();
-                    ((EntityLocal)root).setAttribute(CassandraDatacenter.QUEUED_START_NODES, queuedStart);
-                }
-                queuedStart.add(getEntity());
-                ((EntityLocal)root).setAttribute(CassandraDatacenter.QUEUED_START_NODES, queuedStart);
-            }
-            do {
-                // get it again in case it is backed by something external
-                queuedStart = root.getAttribute(CassandraDatacenter.QUEUED_START_NODES);
-                if (queuedStart.get(0).equals(getEntity())) break;
-                synchronized (queuedStart) {
-                    try {
-                        queuedStart.wait(1000);
-                    } catch (InterruptedException e) {
-                        Exceptions.propagate(e);
-                    }
-                }
-            } while (true);
-
-            // TODO should look at last start time... but instead we always wait
-            CassandraDatacenter.DELAY_BETWEEN_STARTS.countdownTimer().waitForExpiryUnchecked();
-        }
-
-        try {
-            // Relies on `bin/cassandra -p <pidfile>`, rather than us writing pid file ourselves.
-            newScript(MutableMap.of(USE_PID_FILE, false), LAUNCHING)
-                    .body.append(
-                            // log the date to attempt to debug occasional http://wiki.apache.org/cassandra/FAQ#schema_disagreement
-                            // (can be caused by machines out of synch time-wise; but in our case it seems to be caused by other things!)
-                            "echo date on cassandra server `hostname` when launching is `date`",
-                            launchEssentialCommand(),
-                            "echo after essential command")
-                    .execute();
-            if (!isClustered()) {
-                InputStream creationScript = DatastoreMixins.getDatabaseCreationScript(entity);
-                if (creationScript!=null) {
-                    Tasks.setBlockingDetails("Pausing to ensure Cassandra (singleton) has started before running creation script");
-                    Time.sleep(Duration.seconds(20));
-                    Tasks.resetBlockingDetails();
-                    executeScriptAsync(Streams.readFullyString(creationScript));
-                }
-            }
-            if (isClustered() && isFirst) {
-                for (Entity ancestor: getCassandraAncestors()) {
-                    ((EntityLocal)ancestor).setAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, System.currentTimeMillis());
-                }
-            }
-        } finally {
-            if (queuedStart!=null) {
-                Entity head = queuedStart.remove(0);
-                checkArgument(head.equals(getEntity()), "first queued node was "+head+" but we are "+getEntity());
-                synchronized (queuedStart) {
-                    queuedStart.notifyAll();
-                }
-            }
-        }
-    }
-
-    /** returns cassandra-related ancestors (datacenter, fabric), with datacenter first and fabric last */
-    protected List<Entity> getCassandraAncestors() {
-        List<Entity> result = new ArrayList<Entity>();
-        Entity ancestor = getEntity().getParent();
-        while (ancestor!=null) {
-            if (ancestor instanceof CassandraDatacenter || ancestor instanceof CassandraFabric)
-                result.add(ancestor);
-            ancestor = ancestor.getParent();
-        }
-        return result;
-    }
-
-    protected String launchEssentialCommand() {
-        if (isV2()) {
-            return String.format("./bin/cassandra -p %s > ./cassandra-console.log 2>&1", getPidFile());
-        } else {
-            // TODO Could probably get rid of the nohup here, as script does equivalent itself
-            // with `exec ... <&- &`
-            return String.format("nohup ./bin/cassandra -p %s > ./cassandra-console.log 2>&1 &", getPidFile());
-        }
-    }
-
-    public String getPidFile() { return Os.mergePathsUnix(getRunDir(), "cassandra.pid"); }
-
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of(USE_PID_FILE, getPidFile()), CHECK_RUNNING).execute() == 0;
-    }
-
-    @Override
-    public void stop() {
-        newScript(MutableMap.of(USE_PID_FILE, getPidFile()), STOPPING).execute();
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    protected Map<String,String> getCustomJavaSystemProperties() {
-        return MutableMap.<String, String>builder()
-                .putAll(super.getCustomJavaSystemProperties())
-                .put("cassandra.config", getCassandraConfigFileName())
-                .build();
-    }
-
-    @Override
-    public Map<String, String> getShellEnvironment() {
-        return MutableMap.<String, String>builder()
-                .putAll(super.getShellEnvironment())
-                .put("CASSANDRA_HOME", getRunDir())
-                .put("CASSANDRA_CONF", Os.mergePathsUnix(getRunDir(), "conf"))
-                .renameKey("JAVA_OPTS", "JVM_OPTS")
-                .build();
-    }
-
-    @Override
-    public ProcessTaskWrapper<Integer> executeScriptAsync(String commands) {
-        String fileToRun = Os.mergePathsUnix("brooklyn_commands", "cassandra-commands-"+Identifiers.makeRandomId(8));
-        TaskWrapper<Void> task = SshEffectorTasks.put(Os.mergePathsUnix(getRunDir(), fileToRun))
-                .machine(getMachine())
-                .contents(commands)
-                .summary("copying cassandra script to execute "+fileToRun)
-                .newTask();
-        DynamicTasks.queueIfPossible(task).orSubmitAndBlock(getEntity()).andWaitForSuccess();
-        return executeScriptFromInstalledFileAsync(fileToRun);
-    }
-
-    public ProcessTaskWrapper<Integer> executeScriptFromInstalledFileAsync(String fileToRun) {
-        ProcessTaskWrapper<Integer> task = SshEffectorTasks.ssh(
-                        "cd "+getRunDir(),
-                        scriptInvocationCommand(getThriftPort(), fileToRun))
-                .machine(getMachine())
-                .summary("executing cassandra script "+fileToRun)
-                .newTask();
-        DynamicTasks.queueIfPossible(task).orSubmitAndBlock(getEntity());
-        return task;
-    }
-
-    protected String scriptInvocationCommand(Integer optionalThriftPort, String fileToRun) {
-        return "bin/cassandra-cli " +
-                (optionalThriftPort != null ? "--port " + optionalThriftPort : "") +
-                " --file "+fileToRun;
-    }
-
-    @Override
-    public String getResolvedAddress(String hostname) {
-        return resolvedAddressCache.or(BrooklynAccessUtils.resolvedAddressSupplier(getEntity(), getMachine(), hostname));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerator.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerator.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerator.java
deleted file mode 100644
index 8503ad7..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerator.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.math.BigInteger;
-import java.util.Set;
-
-public interface TokenGenerator {
-
-    BigInteger max();
-    BigInteger min();
-    BigInteger range();
-
-    void setOrigin(BigInteger shift);
-    
-    BigInteger newToken();
-    
-    BigInteger getTokenForReplacementNode(BigInteger oldToken);
-    
-    Set<BigInteger> getTokensForReplacementNode(Set<BigInteger> oldTokens);
-    
-    /**
-     * Indicates that we are starting a new cluster of the given number of nodes,
-     * so expect that number of consecutive calls to {@link #newToken()}.
-     * 
-     * @param numNewNodes
-     */
-    void growingCluster(int numNewNodes);
-
-    void shrinkingCluster(Set<BigInteger> nodesToRemove);
-    
-    void refresh(Set<BigInteger> currentNodes);
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerators.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerators.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerators.java
deleted file mode 100644
index 70dd0f6..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/TokenGenerators.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.io.Serializable;
-import java.math.BigInteger;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import brooklyn.util.collections.MutableList;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-public class TokenGenerators {
-
-    /**
-     * Sub-classes are recommended to call {@link #checkRangeValid()} at construction time.
-     */
-    public static abstract class AbstractTokenGenerator implements TokenGenerator, Serializable {
-        
-        private static final long serialVersionUID = -1884526356161711176L;
-        
-        public static final BigInteger TWO = BigInteger.valueOf(2);
-        
-        public abstract BigInteger max();
-        public abstract BigInteger min();
-        public abstract BigInteger range();
-
-        private final Set<BigInteger> currentTokens = Sets.newTreeSet();
-        private final List<BigInteger> nextTokens = Lists.newArrayList();
-        private BigInteger origin = BigInteger.ZERO;
-        
-        protected void checkRangeValid() {
-            Preconditions.checkState(range().equals(max().subtract(min()).add(BigInteger.ONE)), 
-                    "min=%s; max=%s; range=%s", min(), max(), range());
-        }
-        
-        @Override
-        public void setOrigin(BigInteger shift) {
-            this.origin = Preconditions.checkNotNull(shift, "shift");
-        }
-        
-        /**
-         * Unless we're explicitly starting a new cluster or resizing by a pre-defined number of nodes, then
-         * let Cassandra decide (i.e. return null).
-         */
-        @Override
-        public synchronized BigInteger newToken() {
-            BigInteger result = (nextTokens.isEmpty()) ? null : nextTokens.remove(0);
-            if (result != null) currentTokens.add(result);
-            return result;
-        }
-
-        @Override
-        public synchronized BigInteger getTokenForReplacementNode(BigInteger oldToken) {
-            checkNotNull(oldToken, "oldToken");
-            return normalize(oldToken.subtract(BigInteger.ONE));
-        }
-
-        @Override
-        public synchronized Set<BigInteger> getTokensForReplacementNode(Set<BigInteger> oldTokens) {
-            checkNotNull(oldTokens, "oldToken");
-            Set<BigInteger> result = Sets.newLinkedHashSet();
-            for (BigInteger oldToken : oldTokens) {
-                result.add(getTokenForReplacementNode(oldToken));
-            }
-            return result;
-        }
-        
-        @Override
-        public synchronized void growingCluster(int numNewNodes) {
-            if (currentTokens.isEmpty() && nextTokens.isEmpty()) {
-                nextTokens.addAll(generateEquidistantTokens(numNewNodes));
-            } else {
-                // simple strategy which iteratively finds best midpoint
-                for (int i=0; i<numNewNodes; i++) {
-                    nextTokens.add(generateBestNextToken());
-                }
-            }
-        }
-
-        @Override
-        public synchronized void shrinkingCluster(Set<BigInteger> nodesToRemove) {
-            currentTokens.remove(nodesToRemove);
-        }
-
-        @Override
-        public synchronized void refresh(Set<BigInteger> currentNodes) {
-            currentTokens.clear();
-            currentTokens.addAll(currentNodes);
-        }
-
-        private List<BigInteger> generateEquidistantTokens(int numTokens) {
-            List<BigInteger> result = Lists.newArrayList();
-            for (int i = 0; i < numTokens; i++) {
-                BigInteger token = range().multiply(BigInteger.valueOf(i)).divide(BigInteger.valueOf(numTokens)).add(min());
-                token = normalize(token.add(origin));
-                result.add(token);
-            }
-            return result;
-        }
-        
-        private BigInteger normalize(BigInteger input) {
-            while (input.compareTo(min()) < 0)
-                input = input.add(range());
-            while (input.compareTo(max()) > 0)
-                input = input.subtract(range());
-            return input;
-        }
-        
-        private BigInteger generateBestNextToken() {
-            List<BigInteger> allTokens = MutableList.<BigInteger>of().appendAll(currentTokens).appendAll(nextTokens);
-            Collections.sort(allTokens);
-            Iterator<BigInteger> ti = allTokens.iterator();
-            
-            BigInteger thisValue = ti.next();
-            BigInteger prevValue = allTokens.get(allTokens.size()-1).subtract(range());
-            
-            BigInteger bestNewTokenSoFar = normalize(prevValue.add(thisValue).divide(TWO));
-            BigInteger biggestRangeSizeSoFar = thisValue.subtract(prevValue);
-            
-            while (ti.hasNext()) {
-                prevValue = thisValue;
-                thisValue = ti.next();
-                
-                BigInteger rangeHere = thisValue.subtract(prevValue);
-                if (rangeHere.compareTo(biggestRangeSizeSoFar) > 0) {
-                    bestNewTokenSoFar = prevValue.add(thisValue).divide(TWO);
-                    biggestRangeSizeSoFar = rangeHere;
-                }
-            }
-            return bestNewTokenSoFar;
-        }
-
-    }
-
-    public static class PosNeg63TokenGenerator extends AbstractTokenGenerator {
-        private static final long serialVersionUID = 7327403957176106754L;
-        
-        public static final BigInteger MIN_TOKEN = TWO.pow(63).negate();
-        public static final BigInteger MAX_TOKEN = TWO.pow(63).subtract(BigInteger.ONE);
-        public static final BigInteger RANGE = TWO.pow(64);
-
-        public PosNeg63TokenGenerator() {
-            checkRangeValid();
-        }
-
-        @Override public BigInteger max() { return MAX_TOKEN; }
-        @Override public BigInteger min() { return MIN_TOKEN; }
-        @Override public BigInteger range() { return RANGE; }
-    }
-    
-    /** token generator used by cassandra pre v1.2 */
-    public static class NonNeg127TokenGenerator extends AbstractTokenGenerator {
-        private static final long serialVersionUID = 1357426905711548198L;
-        
-        public static final BigInteger MIN_TOKEN = BigInteger.ZERO;
-        public static final BigInteger MAX_TOKEN = TWO.pow(127).subtract(BigInteger.ONE);
-        public static final BigInteger RANGE = TWO.pow(127);
-
-        public NonNeg127TokenGenerator() {
-            checkRangeValid();
-        }
-        
-        @Override public BigInteger max() { return MAX_TOKEN; }
-        @Override public BigInteger min() { return MIN_TOKEN; }
-        @Override public BigInteger range() { return RANGE; }
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java
deleted file mode 100644
index e824b71..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseCluster.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.time.Duration;
-
-import com.google.common.reflect.TypeToken;
-
-@Catalog(name="CouchBase Cluster", description="Couchbase is an open source, distributed (shared-nothing architecture) "
-        + "NoSQL document-oriented database that is optimized for interactive applications.")
-@ImplementedBy(CouchbaseClusterImpl.class)
-public interface CouchbaseCluster extends DynamicCluster {
-
-    AttributeSensor<Integer> ACTUAL_CLUSTER_SIZE = Sensors.newIntegerSensor("coucbase.cluster.actualClusterSize", "returns the actual number of nodes in the cluster");
-
-    @SuppressWarnings("serial")
-    AttributeSensor<Set<Entity>> COUCHBASE_CLUSTER_UP_NODES = Sensors.newSensor(new TypeToken<Set<Entity>>() {
-    }, "couchbase.cluster.clusterEntities", "the set of service up nodes");
-
-    @SuppressWarnings("serial")
-    AttributeSensor<List<String>> COUCHBASE_CLUSTER_BUCKETS = Sensors.newSensor(new TypeToken<List<String>>() {
-    }, "couchbase.cluster.buckets", "Names of all the buckets the couchbase cluster");
-
-    AttributeSensor<Entity> COUCHBASE_PRIMARY_NODE = Sensors.newSensor(Entity.class, "couchbase.cluster.primaryNode", "The primary couchbase node to query and issue add-server and rebalance on");
-
-    AttributeSensor<Boolean> IS_CLUSTER_INITIALIZED = Sensors.newBooleanSensor("couchbase.cluster.isClusterInitialized", "flag to emit if the couchbase cluster was intialized");
-
-    @SetFromFlag("clusterName")
-    ConfigKey<String> CLUSTER_NAME = ConfigKeys.newStringConfigKey("couchbase.cluster.name", "Optional name for this cluster");
-
-    @SetFromFlag("intialQuorumSize")
-    ConfigKey<Integer> INITIAL_QUORUM_SIZE = ConfigKeys.newIntegerConfigKey("couchbase.cluster.intialQuorumSize", "Initial cluster quorum size - number of initial nodes that must have been successfully started to report success (if < 0, then use value of INITIAL_SIZE)",
-            -1);
-
-    @SetFromFlag("delayBeforeAdvertisingCluster")
-    ConfigKey<Duration> DELAY_BEFORE_ADVERTISING_CLUSTER = ConfigKeys.newConfigKey(Duration.class, "couchbase.cluster.delayBeforeAdvertisingCluster", "Delay after cluster is started before checking and advertising its availability", Duration.TEN_SECONDS);
-
-    // TODO not sure if this is needed; previously waited 3m (SERVICE_UP_TIME_OUT) but that seems absurdly long
-    @SetFromFlag("postStartStabilizationDelay")
-    ConfigKey<Duration> NODES_STARTED_STABILIZATION_DELAY = ConfigKeys.newConfigKey(Duration.class, "couchbase.cluster.postStartStabilizationDelay", "Delay after nodes have been started before treating it as a cluster", Duration.TEN_SECONDS);
-    
-    @SetFromFlag("adminUsername")
-    ConfigKey<String> COUCHBASE_ADMIN_USERNAME = CouchbaseNode.COUCHBASE_ADMIN_USERNAME;
-
-    @SetFromFlag("adminPassword")
-    ConfigKey<String> COUCHBASE_ADMIN_PASSWORD = CouchbaseNode.COUCHBASE_ADMIN_PASSWORD;
-
-    @SuppressWarnings("serial")
-    AttributeSensor<List<String>> COUCHBASE_CLUSTER_UP_NODE_ADDRESSES = Sensors.newSensor(new TypeToken<List<String>>() {},
-            "couchbase.cluster.node.addresses", "List of host:port of all active nodes in the cluster (http admin port, and public hostname/IP)");
-    AttributeSensor<String> COUCHBASE_CLUSTER_CONNECTION_URL = Sensors.newStringSensor(
-            "couchbase.cluster.connection.url", "Couchbase-style URL to connect to the cluster (e.g. http://127.0.0.1:8091/ or couchbase://10.0.0.1,10.0.0.2/)");
-    
-    // Interesting stats
-    AttributeSensor<Double> OPS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.ops", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/ops");
-    AttributeSensor<Double> EP_BG_FETCHED_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.ep.bg.fetched", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/ep_bg_fetched");
-    AttributeSensor<Double> CURR_ITEMS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.curr.items", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/curr_items");
-    AttributeSensor<Double> VB_REPLICA_CURR_ITEMS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.vb.replica.curr.items", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/vb_replica_curr_items");
-    AttributeSensor<Double> GET_HITS_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.get.hits", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/get_hits");
-    AttributeSensor<Double> CMD_GET_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.cmd.get", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/cmd_get");
-    AttributeSensor<Double> CURR_ITEMS_TOT_PER_NODE = Sensors.newDoubleSensor("couchbase.stats.cluster.per.node.curr.items.tot", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/curr_items_tot");
-    // Although these are Double (after aggregation), they need to be coerced to Long for ByteSizeStrings rendering
-    AttributeSensor<Long> COUCH_DOCS_DATA_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.docs.data.size", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_docs_data_size");
-    AttributeSensor<Long> MEM_USED_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.mem.used", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/mem_used");
-    AttributeSensor<Long> COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.views.actual.disk.size", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_views_actual_disk_size");
-    AttributeSensor<Long> COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.docs.actual.disk.size", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_docs_actual_disk_size");
-    AttributeSensor<Long> COUCH_VIEWS_DATA_SIZE_PER_NODE = Sensors.newLongSensor("couchbase.stats.cluster.per.node.couch.views.data.size", 
-            "Average across cluster for pools/nodes/<current node>/interestingStats/couch_views_data_size");
-    
-    AttributeSensor<Boolean> BUCKET_CREATION_IN_PROGRESS = Sensors.newBooleanSensor("couchbase.cluster.bucketCreationInProgress", "Indicates that a bucket is currently being created, and" +
-            "further bucket creation should be deferred");
-
-    /**
-     * createBuckets is a list of all the buckets to be created on the couchbase cluster
-     * the buckets will be created on the primary node of the cluster
-     * each map entry for a bucket should contain the following parameters:
-     * - <"bucket",(String) name of the bucket (default: default)>
-     * - <"bucket-type",(String) name of bucket type (default: couchbase)>
-     * - <"bucket-port",(Integer) the bucket port to connect to (default: 11222)>
-     * - <"bucket-ramsize",(Integer) ram size allowed for bucket (default: 200)>
-     * - <"bucket-replica",(Integer) number of replicas for the bucket (default: 1)>
-     */
-    @SuppressWarnings("serial")
-    @SetFromFlag("createBuckets")
-    ConfigKey<List<Map<String, Object>>> CREATE_BUCKETS = ConfigKeys.newConfigKey(new TypeToken<List<Map<String, Object>>>() {}, 
-            "couchbase.cluster.createBuckets", "a list of all dedicated port buckets to be created on the couchbase cluster");
-    
-    @SuppressWarnings("serial")
-    @SetFromFlag("replication")
-    ConfigKey<List<Map<String,Object>>> REPLICATION = ConfigKeys.newConfigKey(new TypeToken<List<Map<String,Object>>>() {}, 
-            "couchbase.cluster.replicationConfiguration", "List of replication rules to configure, each rule including target (id of another cluster) and mode (unidirectional or bidirectional)");
-
-    int getQuorumSize();
-}


[11/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterImpl.java
new file mode 100644
index 0000000..49bd515
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakClusterImpl.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import static brooklyn.util.JavaGroovyEquivalents.groovyTruth;
+
+import java.net.URI;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityInternal;
+import brooklyn.entity.basic.EntityPredicates;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.basic.ServiceStateLogic.ServiceNotUpLogic;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.DependentConfiguration;
+import brooklyn.policy.EnricherSpec;
+import brooklyn.policy.PolicySpec;
+import brooklyn.util.task.Tasks;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+public class RiakClusterImpl extends DynamicClusterImpl implements RiakCluster {
+
+    private static final Logger log = LoggerFactory.getLogger(RiakClusterImpl.class);
+
+    private transient Object mutex = new Object[0];
+
+    public void init() {
+        super.init();
+        log.info("Initializing the riak cluster...");
+        setAttribute(IS_CLUSTER_INIT, false);
+    }
+
+    @Override
+    protected void doStart() {
+        super.doStart();
+        connectSensors();
+
+        try {
+            Duration delay = getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER);
+            Tasks.setBlockingDetails("Sleeping for "+delay+" before advertising cluster available");
+            Time.sleep(delay);
+        } finally {
+            Tasks.resetBlockingDetails();
+        }
+
+        //FIXME: add a quorum to tolerate failed nodes before setting on fire.
+        @SuppressWarnings("unchecked")
+        Optional<Entity> anyNode = Iterables.tryFind(getMembers(), Predicates.and(
+                Predicates.instanceOf(RiakNode.class),
+                EntityPredicates.attributeEqualTo(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true),
+                EntityPredicates.attributeEqualTo(RiakNode.SERVICE_UP, true)));
+        if (anyNode.isPresent()) {
+            setAttribute(IS_CLUSTER_INIT, true);
+        } else {
+            log.warn("No Riak Nodes are found on the cluster: {}. Initialization Failed", getId());
+            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
+        }
+    }
+
+    protected EntitySpec<?> getMemberSpec() {
+        EntitySpec<?> result = config().get(MEMBER_SPEC);
+        if (result!=null) return result;
+        return EntitySpec.create(RiakNode.class);
+    }
+
+    protected void connectSensors() {
+        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
+                .displayName("Controller targets tracker")
+                .configure("sensorsToTrack", ImmutableSet.of(RiakNode.SERVICE_UP))
+                .configure("group", this));
+
+        EnricherSpec<?> first = Enrichers.builder()
+                 .aggregating(Attributes.MAIN_URI)
+                 .publishing(Attributes.MAIN_URI)
+                 .computing(new Function<Collection<URI>,URI>() {
+                    @Override
+                    public URI apply(Collection<URI> input) {
+                        return input.iterator().next();
+                    } })
+                 .fromMembers()
+                 .build();
+        addEnricher(first);
+        
+        Map<? extends AttributeSensor<? extends Number>, ? extends AttributeSensor<? extends Number>> enricherSetup = 
+            ImmutableMap.<AttributeSensor<? extends Number>, AttributeSensor<? extends Number>>builder()
+                .put(RiakNode.NODE_PUTS, RiakCluster.NODE_PUTS_1MIN_PER_NODE)
+                .put(RiakNode.NODE_GETS, RiakCluster.NODE_GETS_1MIN_PER_NODE)
+                .put(RiakNode.NODE_OPS, RiakCluster.NODE_OPS_1MIN_PER_NODE)
+            .build();
+        // construct sum and average over cluster
+        for (AttributeSensor<? extends Number> nodeSensor : enricherSetup.keySet()) {
+            addSummingMemberEnricher(nodeSensor);
+            addAveragingMemberEnricher(nodeSensor, enricherSetup.get(nodeSensor));
+        }
+    }
+
+    private void addAveragingMemberEnricher(AttributeSensor<? extends Number> fromSensor, AttributeSensor<? extends Number> toSensor) {
+        addEnricher(Enrichers.builder()
+            .aggregating(fromSensor)
+            .publishing(toSensor)
+            .fromMembers()
+            .computingAverage()
+            .build()
+        );
+    }
+
+    private void addSummingMemberEnricher(AttributeSensor<? extends Number> source) {
+        addEnricher(Enrichers.builder()
+            .aggregating(source)
+            .publishing(source)
+            .fromMembers()
+            .computingSum()
+            .build()
+        );
+    }
+
+    protected void onServerPoolMemberChanged(final Entity member) {
+        synchronized (mutex) {
+            log.trace("For {}, considering membership of {} which is in locations {}", new Object[]{ this, member, member.getLocations() });
+
+            Map<Entity, String> nodes = getAttribute(RIAK_CLUSTER_NODES);
+            if (belongsInServerPool(member)) {
+                // TODO can we discover the nodes by asking the riak cluster, rather than assuming what we add will be in there?
+                // TODO and can we do join as part of node starting?
+
+                if (nodes == null) {
+                    nodes = Maps.newLinkedHashMap();
+                }
+                String riakName = getRiakName(member);
+                Preconditions.checkNotNull(riakName);
+
+                // flag a first node to be the first node in the riak cluster.
+                Boolean firstNode = getAttribute(IS_FIRST_NODE_SET);
+                if (!Boolean.TRUE.equals(firstNode)) {
+                    setAttribute(IS_FIRST_NODE_SET, Boolean.TRUE);
+
+                    nodes.put(member, riakName);
+                    setAttribute(RIAK_CLUSTER_NODES, nodes);
+
+                    ((EntityInternal) member).setAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Boolean.TRUE);
+
+                    log.info("Added initial Riak node {}: {}; {} to new cluster", new Object[] { this, member, getRiakName(member) });
+                } else {
+                    // TODO: be wary of erroneous nodes but are still flagged 'in cluster'
+                    // add the new node to be part of the riak cluster.
+                    Optional<Entity> anyNodeInCluster = Iterables.tryFind(nodes.keySet(), Predicates.and(
+                            Predicates.instanceOf(RiakNode.class),
+                            EntityPredicates.attributeEqualTo(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true)));
+                    if (anyNodeInCluster.isPresent()) {
+                        if (!nodes.containsKey(member) && member.getAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER) == null) {
+                            String anyNodeName = anyNodeInCluster.get().getAttribute(RiakNode.RIAK_NODE_NAME);
+                            Entities.invokeEffectorWithArgs(this, member, RiakNode.JOIN_RIAK_CLUSTER, anyNodeName).blockUntilEnded();
+                            nodes.put(member, riakName);
+                            setAttribute(RIAK_CLUSTER_NODES, nodes);
+                            log.info("Added Riak node {}: {}; {} to cluster", new Object[] { this, member, getRiakName(member) });
+                        }
+                    } else {
+                        log.error("isFirstNodeSet, but no cluster members found to add {}", member.getId());
+                    }
+                }
+            } else {
+                if (nodes != null && nodes.containsKey(member)) {
+                    DependentConfiguration.attributeWhenReady(member, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Predicates.equalTo(false)).blockUntilEnded(Duration.TWO_MINUTES);
+                    @SuppressWarnings("unchecked")
+                    Optional<Entity> anyNodeInCluster = Iterables.tryFind(nodes.keySet(), Predicates.and(
+                            Predicates.instanceOf(RiakNode.class),
+                            EntityPredicates.attributeEqualTo(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true),
+                            Predicates.not(Predicates.equalTo(member))));
+                    if (anyNodeInCluster.isPresent()) {
+                        Entities.invokeEffectorWithArgs(this, anyNodeInCluster.get(), RiakNode.REMOVE_FROM_CLUSTER, getRiakName(member)).blockUntilEnded();
+                    }
+                    nodes.remove(member);
+                    setAttribute(RIAK_CLUSTER_NODES, nodes);
+                    log.info("Removed Riak node {}: {}; {} from cluster", new Object[]{ this, member, getRiakName(member) });
+                }
+            }
+
+            ServiceNotUpLogic.updateNotUpIndicatorRequiringNonEmptyMap(this, RIAK_CLUSTER_NODES);
+
+            calculateClusterAddresses();
+        }
+    }
+
+    private void calculateClusterAddresses() {
+        List<String> addresses = Lists.newArrayList();
+        List<String> addressesPbPort = Lists.newArrayList();
+        for (Entity entity : this.getMembers()) {
+            if (entity instanceof RiakNode && entity.getAttribute(Attributes.SERVICE_UP)) {
+                RiakNode riakNode = (RiakNode) entity;
+                addresses.add(riakNode.getAttribute(Attributes.SUBNET_HOSTNAME) + ":" + riakNode.getAttribute(RiakNode.RIAK_WEB_PORT));
+                addressesPbPort.add(riakNode.getAttribute(Attributes.SUBNET_HOSTNAME) + ":" + riakNode.getAttribute(RiakNode.RIAK_PB_PORT));
+            }
+        }
+        setAttribute(RiakCluster.NODE_LIST, Joiner.on(",").join(addresses));
+        setAttribute(RiakCluster.NODE_LIST_PB_PORT, Joiner.on(",").join(addressesPbPort));
+    }
+
+    protected boolean belongsInServerPool(Entity member) {
+        if (!groovyTruth(member.getAttribute(Startable.SERVICE_UP))) {
+            log.trace("Members of {}, checking {}, eliminating because not up", this, member);
+            return false;
+        }
+        if (!getMembers().contains(member)) {
+            log.trace("Members of {}, checking {}, eliminating because not member", this, member);
+            return false;
+        }
+        log.trace("Members of {}, checking {}, approving", this, member);
+
+        return true;
+    }
+
+    private String getRiakName(Entity node) {
+        return node.getAttribute(RiakNode.RIAK_NODE_NAME);
+    }
+
+    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override
+        protected void onEntityEvent(EventType type, Entity entity) {
+            ((RiakClusterImpl) super.entity).onServerPoolMemberChanged(entity);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNode.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNode.java
new file mode 100644
index 0000000..9542840
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNode.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import java.net.URI;
+import java.util.List;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.annotation.Effector;
+import brooklyn.entity.annotation.EffectorParam;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.MethodEffector;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.java.UsesJava;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.AttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.reflect.TypeToken;
+
+@Catalog(name="Riak Node", description="Riak is a distributed NoSQL key-value data store that offers "
+        + "extremely high availability, fault tolerance, operational simplicity and scalability.")
+@ImplementedBy(RiakNodeImpl.class)
+public interface RiakNode extends SoftwareProcess, UsesJava {
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION,
+            "Version to install (Default 2.0.5)", "2.0.5");
+
+    @SetFromFlag("optimizeNetworking")
+    ConfigKey<Boolean> OPTIMIZE_HOST_NETWORKING  = ConfigKeys.newBooleanConfigKey("riak.networking.optimize", "Optimize host networking when running in a VM", Boolean.TRUE);
+
+    // vm.args and app.config are used for pre-version 2.0.0. Later versions use the (simplified) riak.conf
+    // see https://github.com/joedevivo/ricon/blob/master/cuttlefish.md
+    @SetFromFlag("vmArgsTemplateUrl")
+    ConfigKey<String> RIAK_VM_ARGS_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
+            "riak.vmArgs.templateUrl", "Template file (in freemarker format) for the vm.args config file",
+            "classpath://org/apache/brooklyn/entity/nosql/riak/vm.args");
+    @SetFromFlag("appConfigTemplateUrl")
+    ConfigKey<String> RIAK_APP_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
+            "riak.appConfig.templateUrl", "Template file (in freemarker format) for the app.config config file",
+            "classpath://org/apache/brooklyn/entity/nosql/riak/app.config");
+    @SetFromFlag("appConfigTemplateUrlLinux")
+    ConfigKey<String> RIAK_CONF_TEMPLATE_URL_LINUX = ConfigKeys.newStringConfigKey(
+            "riak.riakConf.templateUrl.linux", "Template file (in freemarker format) for the app.config config file",
+            "classpath://org/apache/brooklyn/entity/nosql/riak/riak.conf");
+    @SetFromFlag("appConfigTemplateUrlMac")
+    ConfigKey<String> RIAK_CONF_TEMPLATE_URL_MAC = ConfigKeys.newStringConfigKey(
+            "riak.riakConf.templateUrl.mac", "Template file (in freemarker format) for the app.config config file",
+            "classpath://org/apache/brooklyn/entity/nosql/riak/riak-mac.conf");
+
+    ConfigKey<String> RIAK_CONF_ADDITIONAL_CONTENT = ConfigKeys.newStringConfigKey(
+            "riak.riakConf.additionalContent", "Template file (in freemarker format) for setting up additional settings in the riak.conf file", "");
+    
+    // maxOpenFiles' default value (65536) is based on the Basho's recommendation - http://docs.basho.com/riak/latest/ops/tuning/open-files-limit/ 
+    @SetFromFlag("maxOpenFiles")
+    ConfigKey<Integer> RIAK_MAX_OPEN_FILES = ConfigKeys.newIntegerConfigKey(
+            "riak.max.open.files", "Number of the open files required by Riak", 65536);
+    
+    @SetFromFlag("downloadUrlRhelCentos")
+    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_RHEL_CENTOS = ConfigKeys.newTemplateSensorAndConfigKey("download.url.rhelcentos",
+            "URL pattern for downloading the linux RPM installer (will substitute things like ${version} automatically)",
+            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/rhel/" +
+                    "${entity.osMajorVersion}/riak-${entity.fullVersion}-1.el${entity.osMajorVersion}.x86_64.rpm");
+
+    @SetFromFlag("downloadUrlUbuntu")
+    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_UBUNTU = ConfigKeys.newTemplateSensorAndConfigKey("download.url.ubuntu",
+            "URL pattern for downloading the linux Ubuntu installer (will substitute things like ${version} automatically)",
+            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/ubuntu/" +
+                    "$OS_RELEASE/riak_${entity.fullVersion}-1_amd64.deb");
+
+    @SetFromFlag("downloadUrlDebian")
+    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_DEBIAN = ConfigKeys.newTemplateSensorAndConfigKey("download.url.debian",
+            "URL pattern for downloading the linux Debian installer (will substitute things like ${version} automatically)",
+            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/debian/" +
+                    "$OS_RELEASE/riak_${entity.fullVersion}-1_amd64.deb");
+
+    @SetFromFlag("downloadUrlMac")
+    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_MAC = ConfigKeys.newTemplateSensorAndConfigKey("download.url.mac",
+            "URL pattern for downloading the MAC binaries tarball (will substitute things like ${version} automatically)",
+            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/osx/10.8/riak-${entity.fullVersion}-OSX-x86_64.tar.gz");
+
+    // NB these two needed for clients to access
+    @SetFromFlag("riakWebPort")
+    PortAttributeSensorAndConfigKey RIAK_WEB_PORT = new PortAttributeSensorAndConfigKey("riak.webPort", "Riak Web Port", "8098+");
+
+    @SetFromFlag("riakPbPort")
+    PortAttributeSensorAndConfigKey RIAK_PB_PORT = new PortAttributeSensorAndConfigKey("riak.pbPort", "Riak Protocol Buffers Port", "8087+");
+
+    AttributeSensor<Boolean> RIAK_PACKAGE_INSTALL = Sensors.newBooleanSensor(
+            "riak.install.package", "Flag to indicate whether Riak was installed using an OS package");
+    AttributeSensor<Boolean> RIAK_ON_PATH = Sensors.newBooleanSensor(
+            "riak.install.onPath", "Flag to indicate whether Riak is available on the PATH");
+
+    AttributeSensor<Boolean> RIAK_NODE_HAS_JOINED_CLUSTER = Sensors.newBooleanSensor(
+            "riak.node.riakNodeHasJoinedCluster", "Flag to indicate whether the Riak node has joined a cluster member");
+
+    AttributeSensor<String> RIAK_NODE_NAME = Sensors.newStringSensor("riak.node", "Returns the riak node name as defined in vm.args");
+
+    // these needed for nodes to talk to each other, but not clients (so ideally set up in the security group for internal access)
+    PortAttributeSensorAndConfigKey HANDOFF_LISTENER_PORT = new PortAttributeSensorAndConfigKey("handoffListenerPort", "Handoff Listener Port", "8099+");
+    PortAttributeSensorAndConfigKey EPMD_LISTENER_PORT = new PortAttributeSensorAndConfigKey("epmdListenerPort", "Erlang Port Mapper Daemon Listener Port", "4369");
+    PortAttributeSensorAndConfigKey ERLANG_PORT_RANGE_START = new PortAttributeSensorAndConfigKey("erlangPortRangeStart", "Erlang Port Range Start", "6000+");
+    PortAttributeSensorAndConfigKey ERLANG_PORT_RANGE_END = new PortAttributeSensorAndConfigKey("erlangPortRangeEnd", "Erlang Port Range End", "7999+");
+
+    @SetFromFlag("searchEnabled")
+    ConfigKey<Boolean> SEARCH_ENABLED = ConfigKeys.newBooleanConfigKey("riak.search", "Deploy Solr and configure Riak to use it", false);
+
+    /**
+     * http://docs.basho.com/riak/latest/dev/using/search/
+     * Solr is powered by Riak's Yokozuna engine and it is used through the riak webport
+     * So SEARCH_SOLR_PORT shouldn't be exposed
+     */
+    ConfigKey<Integer> SEARCH_SOLR_PORT = ConfigKeys.newIntegerConfigKey("search.solr.port", "Solr port", 8983);
+    ConfigKey<Integer> SEARCH_SOLR_JMX_PORT = ConfigKeys.newIntegerConfigKey("search.solr.jmx_port", "Solr port", 8985);
+
+    AttributeSensor<Integer> NODE_GETS = Sensors.newIntegerSensor("riak.node.gets", "Gets in the last minute");
+    AttributeSensor<Integer> NODE_GETS_TOTAL = Sensors.newIntegerSensor("riak.node.gets.total", "Total gets since node started");
+    AttributeSensor<Integer> NODE_PUTS = Sensors.newIntegerSensor("riak.node.puts", "Puts in the last minute");
+    AttributeSensor<Integer> NODE_PUTS_TOTAL = Sensors.newIntegerSensor("riak.node.puts.total", "Total puts since node started");
+    AttributeSensor<Integer> VNODE_GETS = Sensors.newIntegerSensor("riak.vnode.gets");
+    AttributeSensor<Integer> VNODE_GETS_TOTAL = Sensors.newIntegerSensor("riak.vnode.gets.total");
+
+    //Sensors for Riak Node Counters (within 1 minute window or lifetime of node.
+    //http://docs.basho.com/riak/latest/ops/running/stats-and-monitoring/#Statistics-from-Riak
+    AttributeSensor<Integer> VNODE_PUTS = Sensors.newIntegerSensor("riak.vnode.puts");
+    AttributeSensor<Integer> VNODE_PUTS_TOTAL = Sensors.newIntegerSensor("riak.vnode.puts.total");
+    AttributeSensor<Integer> READ_REPAIRS_TOTAL = Sensors.newIntegerSensor("riak.read.repairs.total");
+    AttributeSensor<Integer> COORD_REDIRS_TOTAL = Sensors.newIntegerSensor("riak.coord.redirs.total");
+    //Additional Riak node counters
+    AttributeSensor<Integer> MEMORY_PROCESSES_USED = Sensors.newIntegerSensor("riak.memory.processes.used");
+    AttributeSensor<Integer> SYS_PROCESS_COUNT = Sensors.newIntegerSensor("riak.sys.process.count");
+    AttributeSensor<Integer> PBC_CONNECTS = Sensors.newIntegerSensor("riak.pbc.connects");
+    AttributeSensor<Integer> PBC_ACTIVE = Sensors.newIntegerSensor("riak.pbc.active");
+    @SuppressWarnings("serial")
+    AttributeSensor<List<String>> RING_MEMBERS = Sensors.newSensor(new TypeToken<List<String>>() {},
+            "ring.members", "all the riak nodes in the ring");
+    
+    AttributeSensor<Integer> NODE_OPS = Sensors.newIntegerSensor("riak.node.ops", "Sum of node gets and puts in the last minute");
+    AttributeSensor<Integer> NODE_OPS_TOTAL = Sensors.newIntegerSensor("riak.node.ops.total", "Sum of node gets and puts since the node started");
+
+    MethodEffector<Void> JOIN_RIAK_CLUSTER = new MethodEffector<Void>(RiakNode.class, "joinCluster");
+    MethodEffector<Void> LEAVE_RIAK_CLUSTER = new MethodEffector<Void>(RiakNode.class, "leaveCluster");
+    MethodEffector<Void> REMOVE_FROM_CLUSTER = new MethodEffector<Void>(RiakNode.class, "removeNode");
+
+    AttributeSensor<Integer> RIAK_NODE_GET_FSM_TIME_MEAN = Sensors.newIntegerSensor("riak.node_get_fsm_time_mean", "Time between reception of client read request and subsequent response to client");
+    AttributeSensor<Integer> RIAK_NODE_PUT_FSM_TIME_MEAN = Sensors.newIntegerSensor("riak.node_put_fsm_time_mean", "Time between reception of client write request and subsequent response to client");
+    AttributeSensor<Integer> RIAK_OBJECT_COUNTER_MERGE_TIME_MEAN = Sensors.newIntegerSensor("riak.object_counter_merge_time_mean", "Time it takes to perform an Update Counter operation");
+    AttributeSensor<Integer> RIAK_OBJECT_SET_MERGE_TIME_MEAN = Sensors.newIntegerSensor("riak.object_set_merge_time_mean", "Time it takes to perform an Update Set operation");
+    AttributeSensor<Integer> RIAK_OBJECT_MAP_MERGE_TIME_MEAN = Sensors.newIntegerSensor("riak.object_map_merge_time_mean", "Time it takes to perform an Update Map operation");
+    AttributeSensor<Integer> RIAK_CONSISTENT_GET_TIME_MEAN = Sensors.newIntegerSensor("riak.consistent_get_time_mean", "Strongly consistent read latency");
+    AttributeSensor<Integer> RIAK_CONSISTENT_PUT_TIME_MEAN = Sensors.newIntegerSensor("riak.consistent_put_time_mean", "Strongly consistent write latency");
+
+    List<AttributeSensor<Integer>> ONE_MINUTE_SENSORS = ImmutableList.of(RIAK_NODE_GET_FSM_TIME_MEAN, RIAK_NODE_PUT_FSM_TIME_MEAN,
+            RIAK_OBJECT_COUNTER_MERGE_TIME_MEAN, RIAK_OBJECT_SET_MERGE_TIME_MEAN, RIAK_OBJECT_MAP_MERGE_TIME_MEAN,
+            RIAK_CONSISTENT_GET_TIME_MEAN, RIAK_CONSISTENT_PUT_TIME_MEAN);
+
+    AttributeSensor<URI> RIAK_CONSOLE_URI = Attributes.MAIN_URI;
+
+    // accessors, for use from template file
+    Integer getRiakWebPort();
+
+    Integer getRiakPbPort();
+
+    Integer getHandoffListenerPort();
+
+    Integer getEpmdListenerPort();
+
+    Integer getErlangPortRangeStart();
+
+    Integer getErlangPortRangeEnd();
+
+    Boolean isSearchEnabled();
+
+    Integer getSearchSolrPort();
+
+    Integer getSearchSolrJmxPort();
+
+    String getFullVersion();
+
+    String getMajorVersion();
+
+    String getOsMajorVersion();
+
+    // TODO add commitCluster() effector and add effectors joinCluster, leaveCluster, removeNode, recoverFailedNode which do not execute commitCluster()
+    // the commit where the commitCluster effector was available is adbf2dc1cb5df98b1e52d3ab35fa6bb4983b722f
+
+    @Effector(description = "Join the Riak cluster on the given node")
+    void joinCluster(@EffectorParam(name = "nodeName") String nodeName);
+
+    @Effector(description = "Leave the Riak cluster")
+    void leaveCluster();
+
+    @Effector(description = "Remove the given node from the Riak cluster")
+    void removeNode(@EffectorParam(name = "nodeName") String nodeName);
+
+    @Effector(description = "Recover and join the Riak cluster on the given node")
+    void recoverFailedNode(@EffectorParam(name = "nodeName") String nodeName);
+
+    @Effector(description = "Create or modify a bucket type before activation")
+    void bucketTypeCreate(@EffectorParam(name = "bucketTypeName") String bucketTypeName,
+                          @EffectorParam(name = "bucketTypeProperties") String bucketTypeProperties);
+
+    @Effector(description = "List all currently available bucket types and their activation status")
+    List<String> bucketTypeList();
+
+    @Effector(description = "Display the status and properties of a specific bucket type")
+    List<String> bucketTypeStatus(@EffectorParam(name = "bucketTypeName") String bucketTypeName);
+
+    @Effector(description = "Update a bucket type after activation")
+    void bucketTypeUpdate(@EffectorParam(name = "bucketTypeName") String bucketTypeName,
+                          @EffectorParam(name = "bucketTypeProperties") String bucketTypeProperties);
+
+    @Effector(description = "Activate a bucket type")
+    void bucketTypeActivate(@EffectorParam(name = "bucketTypeName") String bucketTypeName);
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeDriver.java
new file mode 100644
index 0000000..b9339cf
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeDriver.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+
+import java.util.List;
+
+public interface RiakNodeDriver extends SoftwareProcessDriver {
+
+    String getRiakEtcDir();
+
+    void joinCluster(String nodeName);
+
+    void leaveCluster();
+
+    void removeNode(String nodeName);
+
+    void recoverFailedNode(String nodeName);
+
+    String getOsMajorVersion();
+
+    void bucketTypeCreate(String bucketTypeName, String bucketTypeProperties);
+
+    List<String> bucketTypeList();
+
+    List<String> bucketTypeStatus(String bucketTypeName);
+
+    void bucketTypeUpdate(String bucketTypeName, String bucketTypeProperties);
+
+    void bucketTypeActivate(String bucketTypeName);
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeImpl.java
new file mode 100644
index 0000000..d631516
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeImpl.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.Nullable;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.entity.webapp.WebAppServiceMethods;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.AttributeSensorAndConfigKey;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.location.MachineProvisioningLocation;
+import brooklyn.location.access.BrooklynAccessUtils;
+import brooklyn.location.cloud.CloudLocationConfig;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.config.ConfigBag;
+import brooklyn.util.guava.Functionals;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Function;
+import com.google.common.base.Functions;
+import com.google.common.collect.ContiguousSet;
+import com.google.common.collect.DiscreteDomain;
+import com.google.common.collect.Range;
+import com.google.common.net.HostAndPort;
+
+public class RiakNodeImpl extends SoftwareProcessImpl implements RiakNode {
+
+    private volatile HttpFeed httpFeed;
+
+    @Override
+    public RiakNodeDriver getDriver() {
+        return (RiakNodeDriver) super.getDriver();
+    }
+
+    @Override
+    public Class<RiakNodeDriver> getDriverInterface() {
+        return RiakNodeDriver.class;
+    }
+
+    @Override
+    public void init() {
+        super.init();
+        // fail fast if config files not avail
+        Entities.getRequiredUrlConfig(this, RIAK_VM_ARGS_TEMPLATE_URL);
+        Entities.getRequiredUrlConfig(this, RIAK_APP_CONFIG_TEMPLATE_URL);
+        
+        Integer defaultMaxOpenFiles = RIAK_MAX_OPEN_FILES.getDefaultValue();
+        Integer maxOpenFiles = getConfig(RiakNode.RIAK_MAX_OPEN_FILES);
+        Preconditions.checkArgument(maxOpenFiles >= defaultMaxOpenFiles , "Specified number of open files : %s : is less than the required minimum",
+                maxOpenFiles, defaultMaxOpenFiles);
+    }
+
+    @SuppressWarnings("rawtypes")
+    public boolean isPackageDownloadUrlProvided() {
+        AttributeSensorAndConfigKey[] downloadProperties = { DOWNLOAD_URL_RHEL_CENTOS, DOWNLOAD_URL_UBUNTU, DOWNLOAD_URL_DEBIAN };
+        for (AttributeSensorAndConfigKey property : downloadProperties) {
+            if (!((ConfigurationSupportInternal) config()).getRaw(property).isAbsent()) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    @Override
+    protected Map<String, Object> obtainProvisioningFlags(@SuppressWarnings("rawtypes") MachineProvisioningLocation location) {
+        ConfigBag result = ConfigBag.newInstance(super.obtainProvisioningFlags(location));
+        result.configure(CloudLocationConfig.OS_64_BIT, true);
+        return result.getAllConfig();
+    }
+
+    @Override
+    protected Collection<Integer> getRequiredOpenPorts() {
+        // TODO this creates a huge list of inbound ports; much better to define on a security group using range syntax!
+        int erlangRangeStart = getConfig(ERLANG_PORT_RANGE_START).iterator().next();
+        int erlangRangeEnd = getConfig(ERLANG_PORT_RANGE_END).iterator().next();
+
+        Set<Integer> ports = MutableSet.copyOf(super.getRequiredOpenPorts());
+        Set<Integer> erlangPorts = ContiguousSet.create(Range.open(erlangRangeStart, erlangRangeEnd), DiscreteDomain.integers());
+        ports.addAll(erlangPorts);
+
+        return ports;
+    }
+
+    @Override
+    public void connectSensors() {
+        super.connectSensors();
+        connectServiceUpIsRunning();
+        HostAndPort accessible = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getRiakWebPort());
+
+        HttpFeed.Builder httpFeedBuilder = HttpFeed.builder()
+                .entity(this)
+                .period(500, TimeUnit.MILLISECONDS)
+                .baseUri(String.format("http://%s/stats", accessible.toString()))
+                .poll(new HttpPollConfig<Integer>(NODE_GETS)
+                        .onSuccess(HttpValueFunctions.jsonContents("node_gets", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(NODE_GETS_TOTAL)
+                        .onSuccess(HttpValueFunctions.jsonContents("node_gets_total", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(NODE_PUTS)
+                        .onSuccess(HttpValueFunctions.jsonContents("node_puts", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(NODE_PUTS_TOTAL)
+                        .onSuccess(HttpValueFunctions.jsonContents("node_puts_total", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(VNODE_GETS)
+                        .onSuccess(HttpValueFunctions.jsonContents("vnode_gets", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(VNODE_GETS_TOTAL)
+                        .onSuccess(HttpValueFunctions.jsonContents("vnode_gets_total", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(VNODE_PUTS)
+                        .onSuccess(HttpValueFunctions.jsonContents("vnode_puts", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(VNODE_PUTS_TOTAL)
+                        .onSuccess(HttpValueFunctions.jsonContents("vnode_puts_total", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(READ_REPAIRS_TOTAL)
+                        .onSuccess(HttpValueFunctions.jsonContents("read_repairs_total", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(COORD_REDIRS_TOTAL)
+                        .onSuccess(HttpValueFunctions.jsonContents("coord_redirs_total", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(MEMORY_PROCESSES_USED)
+                        .onSuccess(HttpValueFunctions.jsonContents("memory_processes_used", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(SYS_PROCESS_COUNT)
+                        .onSuccess(HttpValueFunctions.jsonContents("sys_process_count", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(PBC_CONNECTS)
+                        .onSuccess(HttpValueFunctions.jsonContents("pbc_connects", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<Integer>(PBC_ACTIVE)
+                        .onSuccess(HttpValueFunctions.jsonContents("pbc_active", Integer.class))
+                        .onFailureOrException(Functions.constant(-1)))
+                .poll(new HttpPollConfig<List<String>>(RING_MEMBERS)
+                        .onSuccess(Functionals.chain(
+                                HttpValueFunctions.jsonContents("ring_members", String[].class),
+                                new Function<String[], List<String>>() {
+                                    @Nullable
+                                    @Override
+                                    public List<String> apply(@Nullable String[] strings) {
+                                        return Arrays.asList(strings);
+                                    }
+                                }
+                        ))
+                        .onFailureOrException(Functions.constant(Arrays.asList(new String[0]))));
+
+        for (AttributeSensor<Integer> sensor : ONE_MINUTE_SENSORS) {
+            httpFeedBuilder.poll(new HttpPollConfig<Integer>(sensor)
+                    .period(Duration.ONE_MINUTE)
+                    .onSuccess(HttpValueFunctions.jsonContents(sensor.getName().substring(5), Integer.class))
+                    .onFailureOrException(Functions.constant(-1)));
+        }
+
+        httpFeed = httpFeedBuilder.build();
+
+        addEnricher(Enrichers.builder().combining(NODE_GETS, NODE_PUTS).computingSum().publishing(NODE_OPS).build());
+        addEnricher(Enrichers.builder().combining(NODE_GETS_TOTAL, NODE_PUTS_TOTAL).computingSum().publishing(NODE_OPS_TOTAL).build());
+        WebAppServiceMethods.connectWebAppServerPolicies(this);
+    }
+
+    @Override
+    public void disconnectSensors() {
+        super.disconnectSensors();
+        if (httpFeed != null) {
+            httpFeed.stop();
+        }
+        disconnectServiceUpIsRunning();
+    }
+
+    @Override
+    public void joinCluster(String nodeName) {
+        getDriver().joinCluster(nodeName);
+    }
+
+    @Override
+    public void leaveCluster() {
+        getDriver().leaveCluster();
+    }
+
+    @Override
+    public void removeNode(String nodeName) {
+        getDriver().removeNode(nodeName);
+    }
+
+    @Override
+    public void bucketTypeCreate(String bucketTypeName, String bucketTypeProperties) {
+        getDriver().bucketTypeCreate(bucketTypeName, bucketTypeProperties);
+    }
+
+    @Override
+    public List<String> bucketTypeList() {
+        return getDriver().bucketTypeList();
+    }
+
+    @Override
+    public List<String> bucketTypeStatus(String bucketTypeName) {
+        return getDriver().bucketTypeStatus(bucketTypeName);
+    }
+
+    @Override
+    public void bucketTypeUpdate(String bucketTypeName, String bucketTypeProperties) {
+        getDriver().bucketTypeUpdate(bucketTypeName, bucketTypeProperties);
+    }
+
+    @Override
+    public void bucketTypeActivate(String bucketTypeName) {
+        getDriver().bucketTypeActivate(bucketTypeName);
+    }
+
+    @Override
+    public void recoverFailedNode(String nodeName) {
+        getDriver().recoverFailedNode(nodeName);
+    }
+
+    @Override
+    public Integer getRiakWebPort() {
+        return getAttribute(RiakNode.RIAK_WEB_PORT);
+    }
+
+    @Override
+    public Integer getRiakPbPort() {
+        return getAttribute(RiakNode.RIAK_PB_PORT);
+    }
+
+    @Override
+    public Integer getHandoffListenerPort() {
+        return getAttribute(RiakNode.HANDOFF_LISTENER_PORT);
+    }
+
+    @Override
+    public Integer getEpmdListenerPort() {
+        return getAttribute(RiakNode.EPMD_LISTENER_PORT);
+    }
+
+    @Override
+    public Integer getErlangPortRangeStart() {
+        return getAttribute(RiakNode.ERLANG_PORT_RANGE_START);
+    }
+
+    @Override
+    public Integer getErlangPortRangeEnd() {
+        return getAttribute(RiakNode.ERLANG_PORT_RANGE_END);
+    }
+
+    @Override
+    public Boolean isSearchEnabled() {
+        return getConfig(RiakNode.SEARCH_ENABLED);
+    }
+
+    @Override
+    public Integer getSearchSolrPort() {
+        return getConfig(RiakNode.SEARCH_SOLR_PORT);
+    }
+
+    @Override
+    public Integer getSearchSolrJmxPort() {
+        return getConfig(RiakNode.SEARCH_SOLR_JMX_PORT);
+    }
+
+    @Override
+    public String getMajorVersion() {
+        return getFullVersion().substring(0, 3);
+    }
+
+    @Override
+    public String getFullVersion() {
+        return getConfig(RiakNode.SUGGESTED_VERSION);
+    }
+
+    @Override
+    public String getOsMajorVersion() {
+        return getDriver().getOsMajorVersion();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java
new file mode 100644
index 0000000..1815673
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java
@@ -0,0 +1,614 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.riak;
+
+import static brooklyn.util.ssh.BashCommands.INSTALL_CURL;
+import static brooklyn.util.ssh.BashCommands.INSTALL_TAR;
+import static brooklyn.util.ssh.BashCommands.addSbinPathCommand;
+import static brooklyn.util.ssh.BashCommands.alternatives;
+import static brooklyn.util.ssh.BashCommands.chainGroup;
+import static brooklyn.util.ssh.BashCommands.commandToDownloadUrlAs;
+import static brooklyn.util.ssh.BashCommands.ifExecutableElse;
+import static brooklyn.util.ssh.BashCommands.ifNotExecutable;
+import static brooklyn.util.ssh.BashCommands.ok;
+import static brooklyn.util.ssh.BashCommands.sudo;
+import static brooklyn.util.text.StringEscapes.BashStringEscapes.escapeLiteralForDoubleQuotedBash;
+import static java.lang.String.format;
+
+import java.net.URI;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import brooklyn.entity.java.JavaSoftwareProcessSshDriver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.lifecycle.ScriptHelper;
+import brooklyn.entity.software.SshEffectorTasks;
+import brooklyn.location.OsDetails;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.net.Urls;
+import brooklyn.util.os.Os;
+import brooklyn.util.ssh.BashCommands;
+import brooklyn.util.task.DynamicTasks;
+import brooklyn.util.task.ssh.SshTasks;
+import brooklyn.util.text.Strings;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+
+// TODO: Alter -env ERL_CRASH_DUMP path in vm.args
+public class RiakNodeSshDriver extends JavaSoftwareProcessSshDriver implements RiakNodeDriver {
+
+    private static final Logger LOG = LoggerFactory.getLogger(RiakNodeSshDriver.class);
+    private static final String sbinPath = "$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin";
+    private static final String INSTALLING_FALLBACK = INSTALLING + "_fallback";
+
+    public RiakNodeSshDriver(final RiakNodeImpl entity, final SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    protected String getLogFileLocation() {
+        return "/var/log/riak/solr.log";
+    }
+
+    @Override
+    public RiakNodeImpl getEntity() {
+        return RiakNodeImpl.class.cast(super.getEntity());
+    }
+
+    @Override
+    public Map<String, String> getShellEnvironment() {
+        MutableMap<String, String> result = MutableMap.copyOf(super.getShellEnvironment());
+        // how to change epmd port, according to
+        // http://serverfault.com/questions/582787/how-to-change-listening-interface-of-rabbitmqs-epmd-port-4369
+        if (getEntity().getEpmdListenerPort() != null) {
+            result.put("ERL_EPMD_PORT", Integer.toString(getEntity().getEpmdListenerPort()));
+        }
+        result.put("WAIT_FOR_ERLANG", "60");
+        return result;
+    }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("riak-%s", getVersion()))));
+
+        // Set package install attribute
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+        if (osDetails.isLinux()) {
+            entity.setAttribute(RiakNode.RIAK_PACKAGE_INSTALL, true);
+        } else if (osDetails.isMac()) {
+            entity.setAttribute(RiakNode.RIAK_PACKAGE_INSTALL, false);
+        }
+    }
+
+    @Override
+    public void install() {
+        if (entity.getConfig(Attributes.DOWNLOAD_URL) != null) {
+            LOG.warn("Ignoring download.url {}, use download.url.rhelcentos or download.url.mac", entity.getConfig(Attributes.DOWNLOAD_URL));
+        }
+
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+        List<String> commands = Lists.newLinkedList();
+        if (osDetails.isLinux()) {
+            if (getEntity().isPackageDownloadUrlProvided()) {
+                commands.addAll(installLinuxFromPackageUrl());
+            } else {
+                commands.addAll(installFromPackageCloud());
+            }
+        } else if (osDetails.isMac()) {
+            commands.addAll(installMac());
+        } else if (osDetails.isWindows()) {
+            throw new UnsupportedOperationException("RiakNode not supported on Windows instances");
+        } else {
+            throw new IllegalStateException("Machine was not detected as linux, mac or windows! Installation does not know how to proceed with " +
+                    getMachine() + ". Details: " + getMachine().getMachineDetails().getOsDetails());
+        }
+
+        int result = newScript(INSTALLING)
+                .body.append(commands)
+                .failIfBodyEmpty()
+                .execute();
+
+        if (result != 0 && osDetails.isLinux()) {
+            result = newScript(INSTALLING_FALLBACK)
+                    .body.append(installLinuxFromPackageUrl())
+                    .execute();
+        }
+
+        if (result != 0) {
+            throw new IllegalStateException(String.format("Install failed with result %d", result));
+        }
+    }
+
+    private List<String> installLinuxFromPackageUrl() {
+        DynamicTasks.queueIfPossible(SshTasks.dontRequireTtyForSudo(getMachine(), SshTasks.OnFailingTask.WARN_OR_IF_DYNAMIC_FAIL_MARKING_INESSENTIAL)).orSubmitAndBlock();
+
+        String expandedInstallDir = getExpandedInstallDir();
+        String installBin = Urls.mergePaths(expandedInstallDir, "bin");
+        String saveAsYum = "riak.rpm";
+        String saveAsApt = "riak.deb";
+        OsDetails osDetails = getMachine().getOsDetails();
+
+        String downloadUrl;
+        String osReleaseCmd;
+        if ("debian".equalsIgnoreCase(osDetails.getName())) {
+            // TODO osDetails.getName() is returning "linux", instead of debian/ubuntu on AWS with jenkins image,
+            //      running as integration test targetting localhost.
+            // TODO Debian support (default debian image fails with 'sudo: command not found')
+            downloadUrl = (String)entity.getAttribute(RiakNode.DOWNLOAD_URL_DEBIAN);
+            osReleaseCmd = osDetails.getVersion().substring(0, osDetails.getVersion().indexOf("."));
+        } else {
+            // assume Ubuntu
+            downloadUrl = (String)entity.getAttribute(RiakNode.DOWNLOAD_URL_UBUNTU);
+            osReleaseCmd = "`lsb_release -sc` && " +
+                    "export OS_RELEASE=`([[ \"lucid natty precise\" =~ (^| )\\$OS_RELEASE($| ) ]] && echo $OS_RELEASE || echo precise)`";
+        }
+        String apt = chainGroup(
+                //debian fix
+                "export PATH=" + sbinPath,
+                "which apt-get",
+                ok(sudo("apt-get -y --allow-unauthenticated install logrotate libpam0g-dev libssl0.9.8")),
+                "export OS_NAME=" + Strings.toLowerCase(osDetails.getName()),
+                "export OS_RELEASE=" + osReleaseCmd,
+                String.format("wget -O %s %s", saveAsApt, downloadUrl),
+                sudo(String.format("dpkg -i %s", saveAsApt)));
+        String yum = chainGroup(
+                "which yum",
+                ok(sudo("yum -y install openssl")),
+                String.format("wget -O %s %s", saveAsYum, entity.getAttribute(RiakNode.DOWNLOAD_URL_RHEL_CENTOS)),
+                sudo(String.format("yum localinstall -y %s", saveAsYum)));
+        return ImmutableList.<String>builder()
+                .add("mkdir -p " + installBin)
+                .add(INSTALL_CURL)
+                .add(alternatives(apt, yum))
+                .add("ln -s `which riak` " + Urls.mergePaths(installBin, "riak"))
+                .add("ln -s `which riak-admin` " + Urls.mergePaths(installBin, "riak-admin"))
+                .build();
+    }
+
+    private List<String> installFromPackageCloud() {
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+        return ImmutableList.<String>builder()
+                .add(osDetails.getName().toLowerCase().contains("debian") ? addSbinPathCommand() : "")
+                .add(ifNotExecutable("curl", INSTALL_CURL))
+                .addAll(ifExecutableElse("yum", installDebianBased(), installRpmBased()))
+                .build();
+    }
+
+    private ImmutableList<String> installDebianBased() {
+        return ImmutableList.<String>builder()
+                .add("curl https://packagecloud.io/install/repositories/basho/riak/script.deb.sh | " + BashCommands.sudo("bash"))
+                .add(BashCommands.sudo("apt-get install --assume-yes riak=" + getEntity().getFullVersion() + "-1"))
+                .build();
+    }
+
+    private ImmutableList<String> installRpmBased() {
+        return ImmutableList.<String>builder()
+                .add("curl https://packagecloud.io/install/repositories/basho/riak/script.rpm.sh | " + BashCommands.sudo("bash"))
+                .add(BashCommands.sudo("yum install -y riak-" + getEntity().getFullVersion() + "*"))
+                .build();
+    }
+
+    protected List<String> installMac() {
+        String saveAs = resolver.getFilename();
+        String url = entity.getAttribute(RiakNode.DOWNLOAD_URL_MAC);
+        return ImmutableList.<String>builder()
+                .add(INSTALL_TAR)
+                .add(INSTALL_CURL)
+                .add(commandToDownloadUrlAs(url, saveAs))
+                .add("tar xzvf " + saveAs)
+                .build();
+    }
+
+    @Override
+    public void customize() {
+        checkRiakOnPath();
+
+        //create entity's runDir
+        newScript(CUSTOMIZING).execute();
+
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+
+        List<String> commands = Lists.newLinkedList();
+        commands.add(sudo("mkdir -p " + getRiakEtcDir()));
+
+        if (isVersion1()) {
+            String vmArgsTemplate = processTemplate(entity.getConfig(RiakNode.RIAK_VM_ARGS_TEMPLATE_URL));
+            String saveAsVmArgs = Urls.mergePaths(getRunDir(), "vm.args");
+            DynamicTasks.queue(SshEffectorTasks.put(saveAsVmArgs).contents(vmArgsTemplate));
+            commands.add(sudo("mv " + saveAsVmArgs + " " + getRiakEtcDir()));
+
+            String appConfigTemplate = processTemplate(entity.getConfig(RiakNode.RIAK_APP_CONFIG_TEMPLATE_URL));
+            String saveAsAppConfig = Urls.mergePaths(getRunDir(), "app.config");
+            DynamicTasks.queue(SshEffectorTasks.put(saveAsAppConfig).contents(appConfigTemplate));
+            commands.add(sudo("mv " + saveAsAppConfig + " " + getRiakEtcDir()));
+        } else {
+            String templateUrl = osDetails.isMac() ? entity.getConfig(RiakNode.RIAK_CONF_TEMPLATE_URL_MAC) :
+                    entity.getConfig(RiakNode.RIAK_CONF_TEMPLATE_URL_LINUX);
+            String riakConfContent = processTemplate(templateUrl);
+            String saveAsRiakConf = Urls.mergePaths(getRunDir(), "riak.conf");
+
+            if(Strings.isNonBlank(entity.getConfig(RiakNode.RIAK_CONF_ADDITIONAL_CONTENT))) {
+                String additionalConfigContent = processTemplateContents(entity.getConfig(RiakNode.RIAK_CONF_ADDITIONAL_CONTENT));
+                riakConfContent += "\n## Brooklyn note: additional config\n";
+                riakConfContent += additionalConfigContent;
+            }
+
+            DynamicTasks.queue(SshEffectorTasks.put(saveAsRiakConf).contents(riakConfContent));
+            commands.add(sudo("mv " + saveAsRiakConf + " " + getRiakEtcDir()));
+        }
+
+        //increase open file limit (default min for riak is: 4096)
+        //TODO: detect the actual limit then do the modification.
+        //TODO: modify ulimit for linux distros
+        //    commands.add(sudo("launchctl limit maxfiles 4096 32768"));
+        if (osDetails.isMac()) {
+            commands.add("ulimit -n 4096");
+        }
+
+        if (osDetails.isLinux() && isVersion1()) {
+            commands.add(sudo("chown -R riak:riak " + getRiakEtcDir()));
+        }
+
+        // TODO platform_*_dir
+        // TODO riak config log
+
+        ScriptHelper customizeScript = newScript(CUSTOMIZING)
+                .failOnNonZeroResultCode()
+                .body.append(commands);
+
+        if (!isRiakOnPath()) {
+            addRiakOnPath(customizeScript);
+        }
+        customizeScript.failOnNonZeroResultCode().execute();
+
+        if (osDetails.isLinux()) {
+            ImmutableMap<String, String> sysctl = ImmutableMap.<String, String>builder()
+                    .put("vm.swappiness", "0")
+                    .put("net.core.somaxconn", "40000")
+                    .put("net.ipv4.tcp_max_syn_backlog", "40000")
+                    .put("net.ipv4.tcp_sack",  "1")
+                    .put("net.ipv4.tcp_window_scaling",  "15")
+                    .put("net.ipv4.tcp_fin_timeout",     "1")
+                    .put("net.ipv4.tcp_keepalive_intvl", "30")
+                    .put("net.ipv4.tcp_tw_reuse",        "1")
+                    .put("net.ipv4.tcp_moderate_rcvbuf", "1")
+                    .build();
+
+            ScriptHelper optimize = newScript(CUSTOMIZING + "network")
+                .body.append(sudo("sysctl " + Joiner.on(' ').withKeyValueSeparator("=").join(sysctl)));
+
+            Optional<Boolean> enable = Optional.fromNullable(entity.getConfig(RiakNode.OPTIMIZE_HOST_NETWORKING));
+            if (!enable.isPresent()) optimize.inessential();
+            if (enable.or(true)) optimize.execute();
+        }
+
+        //set the riak node name
+        entity.setAttribute(RiakNode.RIAK_NODE_NAME, format("riak@%s", getSubnetHostname()));
+    }
+
+    @Override
+    public void launch() {
+        List<String> commands = Lists.newLinkedList();
+
+        if (isPackageInstall()) {
+            commands.add(addSbinPathCommand());
+            commands.add(sudo(format("sh -c \"ulimit -n %s && service riak start\"", maxOpenFiles())));
+        } else {
+            // NOTE: See instructions at http://superuser.com/questions/433746/is-there-a-fix-for-the-too-many-open-files-in-system-error-on-os-x-10-7-1
+            // for increasing the system limit for number of open files
+            commands.add("ulimit -n 65536 || true"); // `BashCommands.ok` will put this in parentheses, which will set ulimit -n in the subshell
+            commands.add(format("%s start >/dev/null 2>&1 < /dev/null &", getRiakCmd()));
+        }
+
+        ScriptHelper launchScript = newScript(LAUNCHING)
+                .body.append(commands);
+
+        if (!isRiakOnPath()) {
+            addRiakOnPath(launchScript);
+        }
+        launchScript.failOnNonZeroResultCode().execute();
+
+        String mainUri = String.format("http://%s:%s/admin", entity.getAttribute(Attributes.HOSTNAME), entity.getAttribute(RiakNode.RIAK_WEB_PORT));
+        entity.setAttribute(Attributes.MAIN_URI, URI.create(mainUri));
+    }
+
+    @Override
+    public void stop() {
+        leaveCluster();
+
+        String command = format("%s stop", getRiakCmd());
+        command = isPackageInstall() ? sudo(command) : command;
+
+        ScriptHelper stopScript = newScript(ImmutableMap.of(USE_PID_FILE, false), STOPPING)
+                .body.append(command);
+
+        if (!isRiakOnPath()) {
+            addRiakOnPath(stopScript);
+        }
+
+        int result = stopScript.failOnNonZeroResultCode().execute();
+        if (result != 0) {
+            newScript(ImmutableMap.of(USE_PID_FILE, false), STOPPING).execute();
+        }
+    }
+
+    @Override
+    public boolean isRunning() {
+        // Version 2.0.0 requires sudo for `riak ping`
+        ScriptHelper checkRunningScript = newScript(CHECK_RUNNING)
+                .body.append(sudo(format("%s ping", getRiakCmd())));
+
+        if (!isRiakOnPath()) {
+            addRiakOnPath(checkRunningScript);
+        }
+        return (checkRunningScript.execute() == 0);
+    }
+
+    public boolean isPackageInstall() {
+        return entity.getAttribute(RiakNode.RIAK_PACKAGE_INSTALL);
+    }
+
+    public boolean isRiakOnPath() {
+        return entity.getAttribute(RiakNode.RIAK_ON_PATH);
+    }
+
+    public String getRiakEtcDir() {
+        return isPackageInstall() ? "/etc/riak" : Urls.mergePaths(getExpandedInstallDir(), "etc");
+    }
+
+    protected String getRiakCmd() {
+        return isPackageInstall() ? "riak" : Urls.mergePaths(getExpandedInstallDir(), "bin/riak");
+    }
+
+    protected String getRiakAdminCmd() {
+        return isPackageInstall() ? "riak-admin" : Urls.mergePaths(getExpandedInstallDir(), "bin/riak-admin");
+    }
+
+    // TODO find a way to batch commit the changes, instead of committing for every operation.
+
+    @Override
+    public void joinCluster(String nodeName) {
+        if (getRiakName().equals(nodeName)) {
+            log.warn("Cannot join Riak node: {} to itself", nodeName);
+        } else {
+            if (!hasJoinedCluster()) {
+                ScriptHelper joinClusterScript = newScript("joinCluster")
+                        .body.append(sudo(format("%s cluster join %s", getRiakAdminCmd(), nodeName)))
+                        .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
+                        .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
+                        .failOnNonZeroResultCode();
+
+                if (!isRiakOnPath()) {
+                    addRiakOnPath(joinClusterScript);
+                }
+
+                joinClusterScript.execute();
+
+                entity.setAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Boolean.TRUE);
+            } else {
+                log.warn("entity {}: is already in the riak cluster", entity.getId());
+            }
+        }
+    }
+
+    @Override
+    public void leaveCluster() {
+        if (hasJoinedCluster()) {
+            ScriptHelper leaveClusterScript = newScript("leaveCluster")
+                    .body.append(sudo(format("%s cluster leave", getRiakAdminCmd())))
+                    .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
+                    .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
+                    .failOnNonZeroResultCode();
+
+            if (!isRiakOnPath()) {
+                addRiakOnPath(leaveClusterScript);
+            }
+
+            leaveClusterScript.execute();
+
+            entity.setAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Boolean.FALSE);
+        } else {
+            log.warn("entity {}: has already left the riak cluster", entity.getId());
+        }
+    }
+
+    @Override
+    public void removeNode(String nodeName) {
+        ScriptHelper removeNodeScript = newScript("removeNode")
+                .body.append(sudo(format("%s cluster force-remove %s", getRiakAdminCmd(), nodeName)))
+                .body.append(sudo(format("%s down %s", getRiakAdminCmd(), nodeName)))
+                .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
+                .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
+                .failOnNonZeroResultCode();
+
+        if (!isRiakOnPath()) {
+            addRiakOnPath(removeNodeScript);
+        }
+
+        removeNodeScript.execute();
+    }
+
+    @Override
+    public void bucketTypeCreate(String bucketTypeName, String bucketTypeProperties) {
+        ScriptHelper bucketTypeCreateScript = newScript("bucket-type_create " + bucketTypeName)
+                .body.append(sudo(format("%s bucket-type create %s %s",
+                        getRiakAdminCmd(),
+                        bucketTypeName,
+                        escapeLiteralForDoubleQuotedBash(bucketTypeProperties))));
+        if(!isRiakOnPath()) {
+            addRiakOnPath(bucketTypeCreateScript);
+        }
+        bucketTypeCreateScript.body.append(sudo(format("%s bucket-type activate %s", getRiakAdminCmd(), bucketTypeName)))
+                .failOnNonZeroResultCode();
+
+        bucketTypeCreateScript.execute();
+    }
+
+    @Override
+    public List<String> bucketTypeList() {
+        ScriptHelper bucketTypeListScript = newScript("bucket-types_list")
+                .body.append(sudo(format("%s bucket-type list", getRiakAdminCmd())))
+                .gatherOutput()
+                .noExtraOutput()
+                .failOnNonZeroResultCode();
+        if (!isRiakOnPath()) {
+            addRiakOnPath(bucketTypeListScript);
+        }
+        bucketTypeListScript.execute();
+        String stdout = bucketTypeListScript.getResultStdout();
+        return Arrays.asList(stdout.split("[\\r\\n]+"));
+    }
+
+    @Override
+    public List<String> bucketTypeStatus(String bucketTypeName) {
+        ScriptHelper bucketTypeStatusScript = newScript("bucket-type_status")
+                .body.append(sudo(format("%s bucket-type status %s", getRiakAdminCmd(), bucketTypeName)))
+                .gatherOutput()
+                .noExtraOutput()
+                .failOnNonZeroResultCode();
+        if (!isRiakOnPath()) {
+            addRiakOnPath(bucketTypeStatusScript);
+        }
+        bucketTypeStatusScript.execute();
+        String stdout = bucketTypeStatusScript.getResultStdout();
+        return Arrays.asList(stdout.split("[\\r\\n]+"));
+    }
+
+    @Override
+    public void bucketTypeUpdate(String bucketTypeName, String bucketTypeProperties) {
+        ScriptHelper bucketTypeStatusScript = newScript("bucket-type_update")
+                .body.append(sudo(format("%s bucket-type update %s %s",
+                        getRiakAdminCmd(),
+                        bucketTypeName,
+                        escapeLiteralForDoubleQuotedBash(bucketTypeProperties))))
+                .failOnNonZeroResultCode();
+        if (!isRiakOnPath()) {
+            addRiakOnPath(bucketTypeStatusScript);
+        }
+        bucketTypeStatusScript.execute();
+    }
+
+    @Override
+    public void bucketTypeActivate(String bucketTypeName) {
+        ScriptHelper bucketTypeStatusScript = newScript("bucket-type_activate")
+                .body.append(sudo(format("%s bucket-type activate %s", getRiakAdminCmd(), bucketTypeName)))
+                .failOnNonZeroResultCode();
+        if (!isRiakOnPath()) {
+            addRiakOnPath(bucketTypeStatusScript);
+        }
+        bucketTypeStatusScript.execute();
+    }
+
+    @Override
+    public void recoverFailedNode(String nodeName) {
+        //TODO find ways to detect a faulty/failed node
+        //argument passed 'node' is any working node in the riak cluster
+        //following the instruction from: http://docs.basho.com/riak/latest/ops/running/recovery/failed-node/
+
+        if (hasJoinedCluster()) {
+            String failedNodeName = getRiakName();
+
+
+            String stopCommand = format("%s stop", getRiakCmd());
+            stopCommand = isPackageInstall() ? sudo(stopCommand) : stopCommand;
+
+            String startCommand = format("%s start > /dev/null 2>&1 < /dev/null &", getRiakCmd());
+            startCommand = isPackageInstall() ? sudo(startCommand) : startCommand;
+
+            ScriptHelper recoverNodeScript = newScript("recoverNode")
+                    .body.append(stopCommand)
+                    .body.append(format("%s down %s", getRiakAdminCmd(), failedNodeName))
+                    .body.append(sudo(format("rm -rf %s", getRingStateDir())))
+                    .body.append(startCommand)
+                    .body.append(sudo(format("%s cluster join %s", getRiakAdminCmd(), nodeName)))
+                    .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
+                    .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
+                    .failOnNonZeroResultCode();
+
+            if (!isRiakOnPath()) {
+                addRiakOnPath(recoverNodeScript);
+            }
+
+            recoverNodeScript.execute();
+
+        } else {
+            log.warn("entity {}: is not in the riak cluster", entity.getId());
+        }
+    }
+
+    @Override
+    public void setup() {
+        if(entity.getConfig(RiakNode.SEARCH_ENABLED)) {
+            // JavaSoftwareProcessSshDriver.setup() is called in order to install java
+            super.setup();
+        }
+    }
+
+    private Boolean hasJoinedCluster() {
+        return Boolean.TRUE.equals(entity.getAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER));
+    }
+
+    protected void checkRiakOnPath() {
+        boolean riakOnPath = newScript("riakOnPath")
+                .body.append("which riak")
+                .execute() == 0;
+        entity.setAttribute(RiakNode.RIAK_ON_PATH, riakOnPath);
+    }
+
+    private String getRiakName() {
+        return entity.getAttribute(RiakNode.RIAK_NODE_NAME);
+    }
+
+    private String getRingStateDir() {
+        //TODO: check for non-package install.
+        return isPackageInstall() ? "/var/lib/riak/ring" : Urls.mergePaths(getExpandedInstallDir(), "lib/ring");
+    }
+
+    protected boolean isVersion1() {
+        return getVersion().startsWith("1.");
+    }
+
+    @Override
+    public String getOsMajorVersion() {
+        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
+        String osVersion = osDetails.getVersion();
+        return osVersion.contains(".") ? osVersion.substring(0, osVersion.indexOf(".")) : osVersion;
+    }
+
+    private void addRiakOnPath(ScriptHelper scriptHelper) {
+        Map<String, String> newPathVariable = ImmutableMap.of("PATH", sbinPath);
+//        log.warn("riak command not found on PATH. Altering future commands' environment variables from {} to {}", getShellEnvironment(), newPathVariable);
+        scriptHelper.environmentVariablesReset(newPathVariable);
+    }
+
+    public Integer maxOpenFiles() {
+        return entity.getConfig(RiakNode.RIAK_MAX_OPEN_FILES);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServer.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServer.java
new file mode 100644
index 0000000..f04231a
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServer.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import java.util.Map;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.basic.BrooklynConfigKeys;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.java.UsesJava;
+import brooklyn.entity.java.UsesJavaMXBeans;
+import brooklyn.entity.java.UsesJmx;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.location.basic.PortRanges;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.time.Duration;
+
+import com.google.common.collect.Maps;
+import com.google.common.reflect.TypeToken;
+
+/**
+ * An {@link brooklyn.entity.Entity} that represents a Solr node.
+ */
+@Catalog(name="Apache Solr Node", description="Solr is the popular, blazing fast open source enterprise search " +
+        "platform from the Apache Lucene project.", iconUrl="classpath:///solr-logo.jpeg")
+@ImplementedBy(SolrServerImpl.class)
+public interface SolrServer extends SoftwareProcess, UsesJava, UsesJmx, UsesJavaMXBeans {
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "4.7.2");
+
+    @SetFromFlag("downloadUrl")
+    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "${driver.mirrorUrl}/${version}/solr-${version}.tgz");
+
+    /** download mirror, if desired */
+    @SetFromFlag("mirrorUrl")
+    ConfigKey<String> MIRROR_URL = ConfigKeys.newStringConfigKey("solr.install.mirror.url", "URL of mirror",
+            "http://mirrors.ukfast.co.uk/sites/ftp.apache.org/lucene/solr/");
+
+    @SetFromFlag("solrPort")
+    PortAttributeSensorAndConfigKey SOLR_PORT = new PortAttributeSensorAndConfigKey("solr.http.port", "Solr HTTP communications port",
+            PortRanges.fromString("8983+"));
+
+    @SetFromFlag("solrConfigTemplateUrl")
+    ConfigKey<String> SOLR_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
+            "solr.config.templateUrl", "Template file (in freemarker format) for the solr.xml config file", 
+            "classpath://org/apache/brooklyn/entity/nosql/solr/solr.xml");
+
+    @SetFromFlag("coreConfigMap")
+    ConfigKey<Map<String, String>> SOLR_CORE_CONFIG = ConfigKeys.newConfigKey(new TypeToken<Map<String, String>>() { },
+            "solr.core.config", "Map of core names to core configuration archive URL",
+            Maps.<String, String>newHashMap());
+
+    ConfigKey<Duration> START_TIMEOUT = ConfigKeys.newConfigKeyWithDefault(BrooklynConfigKeys.START_TIMEOUT, Duration.FIVE_MINUTES);
+
+    /* Accessors used from template */
+
+    Integer getSolrPort();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerDriver.java
new file mode 100644
index 0000000..72e1049
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerDriver.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import brooklyn.entity.basic.SoftwareProcessDriver;
+import brooklyn.entity.java.JavaSoftwareProcessDriver;
+
+public interface SolrServerDriver extends JavaSoftwareProcessDriver {
+
+    Integer getSolrPort();
+
+    String getSolrConfigTemplateUrl();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerImpl.java
new file mode 100644
index 0000000..a42d15b
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerImpl.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.SoftwareProcessImpl;
+import brooklyn.event.feed.http.HttpFeed;
+import brooklyn.event.feed.http.HttpPollConfig;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.location.access.BrooklynAccessUtils;
+import com.google.common.base.Functions;
+import com.google.common.net.HostAndPort;
+
+import java.net.URI;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Implementation of {@link SolrServer}.
+ */
+public class SolrServerImpl extends SoftwareProcessImpl implements SolrServer {
+
+    @Override
+    public Integer getSolrPort() {
+        return getAttribute(SolrServer.SOLR_PORT);
+    }
+
+    @Override
+    public Class<SolrServerDriver> getDriverInterface() {
+        return SolrServerDriver.class;
+    }
+
+    private volatile HttpFeed httpFeed;
+
+    @Override 
+    protected void connectSensors() {
+        super.connectSensors();
+
+        HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getSolrPort());
+
+        String solrUri = String.format("http://%s:%d/solr", hp.getHostText(), hp.getPort());
+        setAttribute(Attributes.MAIN_URI, URI.create(solrUri));
+
+        httpFeed = HttpFeed.builder()
+                .entity(this)
+                .period(500, TimeUnit.MILLISECONDS)
+                .baseUri(solrUri)
+                .poll(new HttpPollConfig<Boolean>(SERVICE_UP)
+                        .onSuccess(HttpValueFunctions.responseCodeEquals(200))
+                        .onFailureOrException(Functions.constant(false)))
+                .build();
+    }
+
+    @Override
+    public void disconnectSensors() {
+        super.disconnectSensors();
+
+        if (httpFeed != null) httpFeed.stop();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerSshDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerSshDriver.java
new file mode 100644
index 0000000..f05624e
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/solr/SolrServerSshDriver.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.solr;
+
+import static java.lang.String.format;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import brooklyn.entity.java.JavaSoftwareProcessSshDriver;
+import brooklyn.entity.java.UsesJmx;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
+import brooklyn.entity.basic.Entities;
+import brooklyn.location.Location;
+import brooklyn.location.basic.SshMachineLocation;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.file.ArchiveUtils;
+import brooklyn.util.net.Networking;
+import brooklyn.util.net.Urls;
+import brooklyn.util.os.Os;
+import brooklyn.util.ssh.BashCommands;
+import brooklyn.util.stream.Streams;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+
+/**
+ * Start a {@link SolrServer} in a {@link Location} accessible over ssh.
+ */
+public class SolrServerSshDriver extends JavaSoftwareProcessSshDriver implements SolrServerDriver {
+
+    private static final Logger log = LoggerFactory.getLogger(SolrServerSshDriver.class);
+
+    public SolrServerSshDriver(SolrServerImpl entity, SshMachineLocation machine) {
+        super(entity, machine);
+    }
+
+    @Override
+    public Integer getSolrPort() { return entity.getAttribute(SolrServer.SOLR_PORT); }
+
+    @Override
+    public String getSolrConfigTemplateUrl() { return entity.getConfig(SolrServer.SOLR_CONFIG_TEMPLATE_URL); }
+
+    public String getMirrorUrl() { return entity.getConfig(SolrServer.MIRROR_URL); }
+
+    public String getPidFile() { return Os.mergePaths(getRunDir(), "solr.pid"); }
+
+    @Override
+    public void preInstall() {
+        resolver = Entities.newDownloader(this);
+        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("solr-%s", getVersion()))));
+    }
+
+    @Override
+    public void install() {
+        List<String> urls = resolver.getTargets();
+        String saveAs = resolver.getFilename();
+
+        List<String> commands = ImmutableList.<String>builder()
+                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
+                .add(BashCommands.INSTALL_TAR)
+                .add("tar xzfv " + saveAs)
+                .build();
+
+        newScript(INSTALLING)
+                .failOnNonZeroResultCode()
+                .body.append(commands)
+                .execute();
+    }
+
+    public Set<Integer> getPortsUsed() {
+        Set<Integer> result = Sets.newLinkedHashSet(super.getPortsUsed());
+        result.addAll(getPortMap().values());
+        return result;
+    }
+
+    private Map<String, Integer> getPortMap() {
+        return ImmutableMap.<String, Integer>builder()
+                .put("solrPort", getSolrPort())
+                .put("jmxPort", entity.getAttribute(UsesJmx.JMX_PORT))
+                .put("rmiPort", entity.getAttribute(UsesJmx.RMI_REGISTRY_PORT))
+                .build();
+    }
+
+    @Override
+    public void customize() {
+        log.debug("Customizing {}", entity);
+        Networking.checkPortsValid(getPortMap());
+
+        ImmutableList.Builder<String> commands = new ImmutableList.Builder<String>()
+                .add("mkdir contrib")
+                .add("mkdir solr")
+                .add(String.format("cp -R %s/example/{etc,contexts,lib,logs,resources,webapps} .", getExpandedInstallDir()))
+                .add(String.format("cp %s/example/start.jar .", getExpandedInstallDir()))
+                .add(String.format("cp %s/dist/*.jar lib/", getExpandedInstallDir()))
+                .add(String.format("cp %s/contrib/*/lib/*.jar contrib/", getExpandedInstallDir()));
+
+        newScript(CUSTOMIZING)
+                .body.append(commands.build())
+                .execute();
+
+        // Copy the solr.xml configuration file across
+        String configFileContents = processTemplate(getSolrConfigTemplateUrl());
+        String destinationConfigFile = String.format("%s/solr/solr.xml", getRunDir());
+        getMachine().copyTo(Streams.newInputStreamWithContents(configFileContents), destinationConfigFile);
+
+        // Copy the core definitions across
+        Map<String, String> coreConfig = entity.getConfig(SolrServer.SOLR_CORE_CONFIG);
+        for (String core : coreConfig.keySet()) {
+            String url = coreConfig.get(core);
+            String solr = Urls.mergePaths(getRunDir(), "solr");
+            ArchiveUtils.deploy(url, getMachine(), solr);
+        }
+    }
+
+    @Override
+    public void launch() {
+        newScript(MutableMap.of(USE_PID_FILE, getPidFile()), LAUNCHING)
+                .body.append("nohup java $JAVA_OPTS -jar start.jar > ./logs/console.log 2>&1 &")
+                .execute();
+    }
+
+    @Override
+    public boolean isRunning() {
+        return newScript(MutableMap.of(USE_PID_FILE, getPidFile()), CHECK_RUNNING).execute() == 0;
+    }
+
+    @Override
+    public void stop() {
+        newScript(MutableMap.of(USE_PID_FILE, getPidFile()), STOPPING).execute();
+    }
+
+    @Override
+    protected String getLogFileLocation() {
+        return Urls.mergePaths(getRunDir(), "solr", "logs", "solr.log");
+    }
+}


[22/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java
deleted file mode 100644
index 5c47fe7..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseClusterImpl.java
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import static brooklyn.util.JavaGroovyEquivalents.groovyTruth;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import javax.annotation.Nonnull;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.config.render.RendererHints;
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityInternal;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.effector.Effectors;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.event.feed.http.JsonFunctions;
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.policy.PolicySpec;
-import brooklyn.util.collections.CollectionFunctionals;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.collections.QuorumCheck;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.guava.Functionals;
-import brooklyn.util.guava.IfFunctions;
-import brooklyn.util.math.MathPredicates;
-import brooklyn.util.task.DynamicTasks;
-import brooklyn.util.task.TaskBuilder;
-import brooklyn.util.task.Tasks;
-import brooklyn.util.text.ByteSizeStrings;
-import brooklyn.util.text.StringFunctions;
-import brooklyn.util.text.Strings;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-
-public class CouchbaseClusterImpl extends DynamicClusterImpl implements CouchbaseCluster {
-    
-    /*
-     * Refactoring required:
-     * 
-     * Currently, on start() the cluster waits for an arbitrary SERVICE_UP_TIME_OUT (3 minutes) before assuming that a quorate 
-     * number of servers are available. The servers are then added to the cluster, and a further wait period of  
-     * DELAY_BEFORE_ADVERTISING_CLUSTER (30 seconds) is used before advertising the cluster
-     * 
-     * DELAY_BEFORE_ADVERTISING_CLUSTER: It should be possible to refactor this away by adding a repeater that will poll
-     * the REST API of the primary node (once established) until the API indicates that the cluster is available
-     * 
-     * SERVICE_UP_TIME_OUT: The refactoring of this would be more substantial. One method would be to remove the bulk of the 
-     * logic from the start() method, and rely entirely on the membership tracking policy and the onServerPoolMemberChanged()
-     * method. The addition of a RUNNING sensor on the nodes would allow the cluster to determine that a node is up and
-     * running but has not yet been added to the cluster. The IS_CLUSTER_INITIALIZED key could be used to determine whether
-     * or not the cluster should be initialized, or a node simply added to an existing cluster. A repeater could be used
-     * in the driver's to ensure that the method does not return until the node has been fully added
-     * 
-     * There is an (incomplete) first-pass at this here: https://github.com/Nakomis/incubator-brooklyn/compare/couchbase-running-sensor
-     * however, there have been significant changes to the cluster initialization since that work was done so it will probably
-     * need to be re-done
-     * 
-     * Additionally, during bucket creation, a HttpPoll is used to check that the bucket has been created. This should be 
-     * refactored to use a Repeater in CouchbaseNodeSshDriver.bucketCreate() in a similar way to the one employed in
-     * CouchbaseNodeSshDriver.rebalance(). Were this done, this class could simply queue the bucket creation tasks
-     * 
-     */
-    
-    private static final Logger log = LoggerFactory.getLogger(CouchbaseClusterImpl.class);
-    private final Object mutex = new Object[0];
-    // Used to serialize bucket creation as only one bucket can be created at a time,
-    // so a feed is used to determine when a bucket has finished being created
-    private final AtomicReference<HttpFeed> resetBucketCreation = new AtomicReference<HttpFeed>();
-
-    public void init() {
-        log.info("Initializing the Couchbase cluster...");
-        super.init();
-        
-        addEnricher(
-            Enrichers.builder()
-                .transforming(COUCHBASE_CLUSTER_UP_NODES)
-                .from(this)
-                .publishing(COUCHBASE_CLUSTER_UP_NODE_ADDRESSES)
-                .computing(new ListOfHostAndPort()).build() );
-        addEnricher(
-            Enrichers.builder()
-                .transforming(COUCHBASE_CLUSTER_UP_NODE_ADDRESSES)
-                .from(this)
-                .publishing(COUCHBASE_CLUSTER_CONNECTION_URL)
-                .computing(
-                    IfFunctions.<List<String>>ifPredicate(
-                        Predicates.compose(MathPredicates.lessThan(getConfig(CouchbaseCluster.INITIAL_QUORUM_SIZE)), 
-                            CollectionFunctionals.sizeFunction(0)) )
-                    .value((String)null)
-                    .defaultApply(
-                        Functionals.chain(
-                            CollectionFunctionals.<String,List<String>>limit(4), 
-                            StringFunctions.joiner(","),
-                            StringFunctions.formatter("http://%s/"))) )
-                .build() );
-        
-        Map<? extends AttributeSensor<? extends Number>, ? extends AttributeSensor<? extends Number>> enricherSetup = 
-            ImmutableMap.<AttributeSensor<? extends Number>, AttributeSensor<? extends Number>>builder()
-                .put(CouchbaseNode.OPS, CouchbaseCluster.OPS_PER_NODE)
-                .put(CouchbaseNode.COUCH_DOCS_DATA_SIZE, CouchbaseCluster.COUCH_DOCS_DATA_SIZE_PER_NODE)
-                .put(CouchbaseNode.COUCH_DOCS_ACTUAL_DISK_SIZE, CouchbaseCluster.COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE)
-                .put(CouchbaseNode.EP_BG_FETCHED, CouchbaseCluster.EP_BG_FETCHED_PER_NODE)
-                .put(CouchbaseNode.MEM_USED, CouchbaseCluster.MEM_USED_PER_NODE)
-                .put(CouchbaseNode.COUCH_VIEWS_ACTUAL_DISK_SIZE, CouchbaseCluster.COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE)
-                .put(CouchbaseNode.CURR_ITEMS, CouchbaseCluster.CURR_ITEMS_PER_NODE)
-                .put(CouchbaseNode.VB_REPLICA_CURR_ITEMS, CouchbaseCluster.VB_REPLICA_CURR_ITEMS_PER_NODE)
-                .put(CouchbaseNode.COUCH_VIEWS_DATA_SIZE, CouchbaseCluster.COUCH_VIEWS_DATA_SIZE_PER_NODE)
-                .put(CouchbaseNode.GET_HITS, CouchbaseCluster.GET_HITS_PER_NODE)
-                .put(CouchbaseNode.CMD_GET, CouchbaseCluster.CMD_GET_PER_NODE)
-                .put(CouchbaseNode.CURR_ITEMS_TOT, CouchbaseCluster.CURR_ITEMS_TOT_PER_NODE)
-            .build();
-        
-        for (AttributeSensor<? extends Number> nodeSensor : enricherSetup.keySet()) {
-            addSummingMemberEnricher(nodeSensor);
-            addAveragingMemberEnricher(nodeSensor, enricherSetup.get(nodeSensor));
-        }
-        
-        addEnricher(Enrichers.builder().updatingMap(Attributes.SERVICE_NOT_UP_INDICATORS)
-            .from(IS_CLUSTER_INITIALIZED).computing(
-                IfFunctions.ifNotEquals(true).value("The cluster is not yet completely initialized")
-                    .defaultValue(null).build()).build() );
-    }
-    
-    private void addAveragingMemberEnricher(AttributeSensor<? extends Number> fromSensor, AttributeSensor<? extends Number> toSensor) {
-        addEnricher(Enrichers.builder()
-            .aggregating(fromSensor)
-            .publishing(toSensor)
-            .fromMembers()
-            .computingAverage()
-            .build()
-        );
-    }
-
-    private void addSummingMemberEnricher(AttributeSensor<? extends Number> source) {
-        addEnricher(Enrichers.builder()
-            .aggregating(source)
-            .publishing(source)
-            .fromMembers()
-            .computingSum()
-            .build()
-        );
-    }
-
-    @Override
-    protected void doStart() {
-        setAttribute(IS_CLUSTER_INITIALIZED, false);
-        
-        super.doStart();
-
-        connectSensors();
-        
-        setAttribute(BUCKET_CREATION_IN_PROGRESS, false);
-
-        //start timeout before adding the servers
-        Tasks.setBlockingDetails("Pausing while Couchbase stabilizes");
-        Time.sleep(getConfig(NODES_STARTED_STABILIZATION_DELAY));
-
-        Optional<Set<Entity>> upNodes = Optional.<Set<Entity>>fromNullable(getAttribute(COUCHBASE_CLUSTER_UP_NODES));
-        if (upNodes.isPresent() && !upNodes.get().isEmpty()) {
-
-            Tasks.setBlockingDetails("Adding servers to Couchbase");
-            
-            //TODO: select a new primary node if this one fails
-            Entity primaryNode = upNodes.get().iterator().next();
-            ((EntityInternal) primaryNode).setAttribute(CouchbaseNode.IS_PRIMARY_NODE, true);
-            setAttribute(COUCHBASE_PRIMARY_NODE, primaryNode);
-
-            Set<Entity> serversToAdd = MutableSet.<Entity>copyOf(getUpNodes());
-
-            if (serversToAdd.size() >= getQuorumSize() && serversToAdd.size() > 1) {
-                log.info("Number of SERVICE_UP nodes:{} in cluster:{} reached Quorum:{}, adding the servers", new Object[]{serversToAdd.size(), getId(), getQuorumSize()});
-                addServers(serversToAdd);
-
-                //wait for servers to be added to the couchbase server
-                try {
-                    Tasks.setBlockingDetails("Delaying before advertising cluster up");
-                    Time.sleep(getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER));
-                } finally {
-                    Tasks.resetBlockingDetails();
-                }
-                
-                ((CouchbaseNode)getPrimaryNode()).rebalance();
-            } else {
-                if (getQuorumSize()>1) {
-                    log.warn(this+" is not quorate; will likely fail later, but proceeding for now");
-                }
-                for (Entity server: serversToAdd) {
-                    ((EntityInternal) server).setAttribute(CouchbaseNode.IS_IN_CLUSTER, true);
-                }
-            }
-                
-            if (getConfig(CREATE_BUCKETS)!=null) {
-                try {
-                    Tasks.setBlockingDetails("Creating buckets in Couchbase");
-
-                    createBuckets();
-                    DependentConfiguration.waitInTaskForAttributeReady(this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
-
-                } finally {
-                    Tasks.resetBlockingDetails();
-                }
-            }
-
-            if (getConfig(REPLICATION)!=null) {
-                try {
-                    Tasks.setBlockingDetails("Configuring replication rules");
-
-                    List<Map<String, Object>> replRules = getConfig(REPLICATION);
-                    for (Map<String, Object> replRule: replRules) {
-                        DynamicTasks.queue(Effectors.invocation(getPrimaryNode(), CouchbaseNode.ADD_REPLICATION_RULE, replRule));
-                    }
-                    DynamicTasks.waitForLast();
-
-                } finally {
-                    Tasks.resetBlockingDetails();
-                }
-            }
-
-            setAttribute(IS_CLUSTER_INITIALIZED, true);
-            
-        } else {
-            throw new IllegalStateException("No up nodes available after starting");
-        }
-    }
-
-    @Override
-    public void stop() {
-        if (resetBucketCreation.get() != null) {
-            resetBucketCreation.get().stop();
-        }
-        super.stop();
-    }
-
-    protected void connectSensors() {
-        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
-                .displayName("Controller targets tracker")
-                .configure("group", this));
-    }
-    
-    private final static class ListOfHostAndPort implements Function<Set<Entity>, List<String>> {
-        @Override public List<String> apply(Set<Entity> input) {
-            List<String> addresses = Lists.newArrayList();
-            for (Entity entity : input) {
-                addresses.add(String.format("%s",
-                        BrooklynAccessUtils.getBrooklynAccessibleAddress(entity, entity.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT))));
-            }
-            return addresses;
-        }
-    }
-
-    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override protected void onEntityChange(Entity member) {
-            ((CouchbaseClusterImpl)entity).onServerPoolMemberChanged(member);
-        }
-
-        @Override protected void onEntityAdded(Entity member) {
-            ((CouchbaseClusterImpl)entity).onServerPoolMemberChanged(member);
-        }
-
-        @Override protected void onEntityRemoved(Entity member) {
-            ((CouchbaseClusterImpl)entity).onServerPoolMemberChanged(member);
-        }
-    };
-
-    protected synchronized void onServerPoolMemberChanged(Entity member) {
-        if (log.isTraceEnabled()) log.trace("For {}, considering membership of {} which is in locations {}",
-                new Object[]{this, member, member.getLocations()});
-
-        //FIXME: make use of servers to be added after cluster initialization.
-        synchronized (mutex) {
-            if (belongsInServerPool(member)) {
-
-                Optional<Set<Entity>> upNodes = Optional.fromNullable(getUpNodes());
-                if (upNodes.isPresent()) {
-
-                    if (!upNodes.get().contains(member)) {
-                        Set<Entity> newNodes = Sets.newHashSet(getUpNodes());
-                        newNodes.add(member);
-                        setAttribute(COUCHBASE_CLUSTER_UP_NODES, newNodes);
-
-                        //add to set of servers to be added.
-                        if (isClusterInitialized()) {
-                            addServer(member);
-                        }
-                    }
-                } else {
-                    Set<Entity> newNodes = Sets.newHashSet();
-                    newNodes.add(member);
-                    setAttribute(COUCHBASE_CLUSTER_UP_NODES, newNodes);
-
-                    if (isClusterInitialized()) {
-                        addServer(member);
-                    }
-                }
-            } else {
-                Set<Entity> upNodes = getUpNodes();
-                if (upNodes != null && upNodes.contains(member)) {
-                    upNodes.remove(member);
-                    setAttribute(COUCHBASE_CLUSTER_UP_NODES, upNodes);
-                    log.info("Removing couchbase node {}: {}; from cluster", new Object[]{this, member});
-                }
-            }
-            if (log.isTraceEnabled()) log.trace("Done {} checkEntity {}", this, member);
-        }
-    }
-
-    protected boolean belongsInServerPool(Entity member) {
-        if (!groovyTruth(member.getAttribute(Startable.SERVICE_UP))) {
-            if (log.isTraceEnabled()) log.trace("Members of {}, checking {}, eliminating because not up", this, member);
-            return false;
-        }
-        if (!getMembers().contains(member)) {
-            if (log.isTraceEnabled())
-                log.trace("Members of {}, checking {}, eliminating because not member", this, member);
-
-            return false;
-        }
-        if (log.isTraceEnabled()) log.trace("Members of {}, checking {}, approving", this, member);
-
-        return true;
-    }
-
-
-    protected EntitySpec<?> getMemberSpec() {
-        EntitySpec<?> result = super.getMemberSpec();
-        if (result != null) return result;
-        return EntitySpec.create(CouchbaseNode.class);
-    }
-
-    @Override
-    public int getQuorumSize() {
-        Integer quorumSize = getConfig(CouchbaseCluster.INITIAL_QUORUM_SIZE);
-        if (quorumSize != null && quorumSize > 0)
-            return quorumSize;
-        // by default the quorum would be floor(initial_cluster_size/2) + 1
-        return (int) Math.floor(getConfig(INITIAL_SIZE) / 2) + 1;
-    }
-
-    protected int getActualSize() {
-        return Optional.fromNullable(getAttribute(CouchbaseCluster.ACTUAL_CLUSTER_SIZE)).or(-1);
-    }
-
-    private Set<Entity> getUpNodes() {
-        return getAttribute(COUCHBASE_CLUSTER_UP_NODES);
-    }
-
-    private CouchbaseNode getPrimaryNode() {
-        return (CouchbaseNode) getAttribute(COUCHBASE_PRIMARY_NODE);
-    }
-
-    @Override
-    protected void initEnrichers() {
-        addEnricher(Enrichers.builder().updatingMap(ServiceStateLogic.SERVICE_NOT_UP_INDICATORS)
-            .from(COUCHBASE_CLUSTER_UP_NODES)
-            .computing(new Function<Set<Entity>, Object>() {
-                @Override
-                public Object apply(Set<Entity> input) {
-                    if (input==null) return "Couchbase up nodes not set";
-                    if (input.isEmpty()) return "No Couchbase up nodes";
-                    if (input.size() < getQuorumSize()) return "Couchbase up nodes not quorate";
-                    return null;
-                }
-            }).build());
-        
-        if (config().getLocalRaw(UP_QUORUM_CHECK).isAbsent()) {
-            // TODO Only leaving CouchbaseQuorumCheck here in case it is contained in persisted state.
-            // If so, need a transformer and then to delete it
-            @SuppressWarnings({ "unused", "hiding" })
-            @Deprecated
-            class CouchbaseQuorumCheck implements QuorumCheck {
-                @Override
-                public boolean isQuorate(int sizeHealthy, int totalSize) {
-                    // check members count passed in AND the sensor  
-                    if (sizeHealthy < getQuorumSize()) return false;
-                    return true;
-                }
-            }
-            config().set(UP_QUORUM_CHECK, new CouchbaseClusterImpl.CouchbaseQuorumCheck(this));
-        }
-        super.initEnrichers();
-    }
-    
-    static class CouchbaseQuorumCheck implements QuorumCheck {
-        private final CouchbaseCluster cluster;
-        CouchbaseQuorumCheck(CouchbaseCluster cluster) {
-            this.cluster = cluster;
-        }
-        @Override
-        public boolean isQuorate(int sizeHealthy, int totalSize) {
-            // check members count passed in AND the sensor  
-            if (sizeHealthy < cluster.getQuorumSize()) return false;
-            return true;
-        }
-    }
-    protected void addServers(Set<Entity> serversToAdd) {
-        Preconditions.checkNotNull(serversToAdd);
-        for (Entity s : serversToAdd) {
-            addServerSeveralTimes(s, 12, Duration.TEN_SECONDS);
-        }
-    }
-
-    /** try adding in a loop because we are seeing spurious port failures in AWS */
-    protected void addServerSeveralTimes(Entity s, int numRetries, Duration delayOnFailure) {
-        try {
-            addServer(s);
-        } catch (Exception e) {
-            Exceptions.propagateIfFatal(e);
-            if (numRetries<=0) throw Exceptions.propagate(e);
-            // retry once after sleep because we are getting some odd primary-change events
-            log.warn("Error adding "+s+" to "+this+", "+numRetries+" retries remaining, will retry after delay ("+e+")");
-            Time.sleep(delayOnFailure);
-            addServerSeveralTimes(s, numRetries-1, delayOnFailure);
-        }
-    }
-
-    protected void addServer(Entity serverToAdd) {
-        Preconditions.checkNotNull(serverToAdd);
-        if (serverToAdd.equals(getPrimaryNode())) {
-            // no need to add; but we pass it in anyway because it makes the calling logic easier
-            return;
-        }
-        if (!isMemberInCluster(serverToAdd)) {
-            HostAndPort webAdmin = HostAndPort.fromParts(serverToAdd.getAttribute(SoftwareProcess.SUBNET_HOSTNAME),
-                    serverToAdd.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
-            String username = serverToAdd.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
-            String password = serverToAdd.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
-
-            if (isClusterInitialized()) {
-                Entities.invokeEffectorWithArgs(this, getPrimaryNode(), CouchbaseNode.SERVER_ADD_AND_REBALANCE, webAdmin.toString(), username, password).getUnchecked();
-            } else {
-                Entities.invokeEffectorWithArgs(this, getPrimaryNode(), CouchbaseNode.SERVER_ADD, webAdmin.toString(), username, password).getUnchecked();
-            }
-            //FIXME check feedback of whether the server was added.
-            ((EntityInternal) serverToAdd).setAttribute(CouchbaseNode.IS_IN_CLUSTER, true);
-        }
-    }
-
-    /** finds the cluster name specified for a node or a cluster, 
-     * using {@link CouchbaseCluster#CLUSTER_NAME} or falling back to the cluster (or node) ID. */
-    public static String getClusterName(Entity node) {
-        String name = node.getConfig(CLUSTER_NAME);
-        if (!Strings.isBlank(name)) return Strings.makeValidFilename(name);
-        return getClusterOrNode(node).getId();
-    }
-    
-    /** returns Couchbase cluster in ancestry, defaulting to the given node if none */
-    @Nonnull public static Entity getClusterOrNode(Entity node) {
-        Iterable<CouchbaseCluster> clusterNodes = Iterables.filter(Entities.ancestors(node), CouchbaseCluster.class);
-        return Iterables.getFirst(clusterNodes, node);
-    }
-    
-    public boolean isClusterInitialized() {
-        return Optional.fromNullable(getAttribute(IS_CLUSTER_INITIALIZED)).or(false);
-    }
-
-    public boolean isMemberInCluster(Entity e) {
-        return Optional.fromNullable(e.getAttribute(CouchbaseNode.IS_IN_CLUSTER)).or(false);
-    }
-    
-    public void createBuckets() {
-        //TODO: check for port conflicts if buckets are being created with a port
-        List<Map<String, Object>> bucketsToCreate = getConfig(CREATE_BUCKETS);
-        if (bucketsToCreate==null) return;
-        
-        Entity primaryNode = getPrimaryNode();
-
-        for (Map<String, Object> bucketMap : bucketsToCreate) {
-            String bucketName = bucketMap.containsKey("bucket") ? (String) bucketMap.get("bucket") : "default";
-            String bucketType = bucketMap.containsKey("bucket-type") ? (String) bucketMap.get("bucket-type") : "couchbase";
-            // default bucket must be on this port; other buckets can (must) specify their own (unique) port
-            Integer bucketPort = bucketMap.containsKey("bucket-port") ? (Integer) bucketMap.get("bucket-port") : 11211;
-            Integer bucketRamSize = bucketMap.containsKey("bucket-ramsize") ? (Integer) bucketMap.get("bucket-ramsize") : 100;
-            Integer bucketReplica = bucketMap.containsKey("bucket-replica") ? (Integer) bucketMap.get("bucket-replica") : 1;
-
-            createBucket(primaryNode, bucketName, bucketType, bucketPort, bucketRamSize, bucketReplica);
-        }
-    }
-
-    public void createBucket(final Entity primaryNode, final String bucketName, final String bucketType, final Integer bucketPort, final Integer bucketRamSize, final Integer bucketReplica) {
-        DynamicTasks.queueIfPossible(TaskBuilder.<Void>builder().name("Creating bucket " + bucketName).body(
-                new Callable<Void>() {
-                    @Override
-                    public Void call() throws Exception {
-                        DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
-                        if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
-                            CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
-                        }
-                        setAttribute(CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, true);
-                        HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(primaryNode, primaryNode.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
-
-                        CouchbaseClusterImpl.this.resetBucketCreation.set(HttpFeed.builder()
-                                .entity(CouchbaseClusterImpl.this)
-                                .period(500, TimeUnit.MILLISECONDS)
-                                .baseUri(String.format("http://%s/pools/default/buckets/%s", hostAndPort, bucketName))
-                                .credentials(primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME), primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD))
-                                .poll(new HttpPollConfig<Boolean>(BUCKET_CREATION_IN_PROGRESS)
-                                        .onSuccess(Functionals.chain(HttpValueFunctions.jsonContents(), JsonFunctions.walkN("nodes"), new Function<JsonElement, Boolean>() {
-                                            @Override
-                                            public Boolean apply(JsonElement input) {
-                                                // Wait until bucket has been created on all nodes and the couchApiBase element has been published (indicating that the bucket is useable)
-                                                JsonArray servers = input.getAsJsonArray();
-                                                if (servers.size() != CouchbaseClusterImpl.this.getMembers().size()) {
-                                                    return true;
-                                                }
-                                                for (JsonElement server : servers) {
-                                                    Object api = server.getAsJsonObject().get("couchApiBase");
-                                                    if (api == null || Strings.isEmpty(String.valueOf(api))) {
-                                                        return true;
-                                                    }
-                                                }
-                                                return false;
-                                            }
-                                        }))
-                                        .onFailureOrException(new Function<Object, Boolean>() {
-                                            @Override
-                                            public Boolean apply(Object input) {
-                                                if (input instanceof brooklyn.util.http.HttpToolResponse) {
-                                                    if (((brooklyn.util.http.HttpToolResponse) input).getResponseCode() == 404) {
-                                                        return true;
-                                                    }
-                                                }
-                                                if (input instanceof Throwable)
-                                                    Exceptions.propagate((Throwable) input);
-                                                throw new IllegalStateException("Unexpected response when creating bucket:" + input);
-                                            }
-                                        }))
-                                .build());
-
-                        // TODO: Bail out if bucket creation fails, to allow next bucket to proceed
-                        Entities.invokeEffectorWithArgs(CouchbaseClusterImpl.this, primaryNode, CouchbaseNode.BUCKET_CREATE, bucketName, bucketType, bucketPort, bucketRamSize, bucketReplica);
-                        DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
-                        if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
-                            CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
-                        }
-                        return null;
-                    }
-                }
-        ).build()).orSubmitAndBlock();
-    }
-    
-    static {
-        RendererHints.register(COUCH_DOCS_DATA_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
-        RendererHints.register(COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
-        RendererHints.register(MEM_USED_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
-        RendererHints.register(COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
-        RendererHints.register(COUCH_VIEWS_DATA_SIZE_PER_NODE, RendererHints.displayValue(ByteSizeStrings.metric()));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNode.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNode.java
deleted file mode 100644
index 727f942..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNode.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import java.net.URI;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.config.render.RendererHints;
-import brooklyn.entity.annotation.Effector;
-import brooklyn.entity.annotation.EffectorParam;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.MethodEffector;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.effector.Effectors;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.text.ByteSizeStrings;
-
-@Catalog(name="CouchBase Node", description="Couchbase Server is an open source, distributed (shared-nothing architecture) "
-        + "NoSQL document-oriented database that is optimized for interactive applications.")
-@ImplementedBy(CouchbaseNodeImpl.class)
-public interface CouchbaseNode extends SoftwareProcess {
-
-    @SetFromFlag("adminUsername")
-    ConfigKey<String> COUCHBASE_ADMIN_USERNAME = ConfigKeys.newStringConfigKey("couchbase.adminUsername", "Username for the admin user on the node", "Administrator");
-
-    @SetFromFlag("adminPassword")
-    ConfigKey<String> COUCHBASE_ADMIN_PASSWORD = ConfigKeys.newStringConfigKey("couchbase.adminPassword", "Password for the admin user on the node", "Password");
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION,
-            "3.0.0");
-
-    @SetFromFlag("enterprise")
-    ConfigKey<Boolean> USE_ENTERPRISE = ConfigKeys.newBooleanConfigKey("couchbase.enterprise.enabled",
-        "Whether to use Couchbase Enterprise; if false uses the community version. Defaults to true.", true);
-
-    @SetFromFlag("downloadUrl")
-    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "http://packages.couchbase.com/releases/${version}/"
-                + "couchbase-server-${driver.communityOrEnterprise}${driver.downloadLinkPreVersionSeparator}${version}${driver.downloadLinkOsTagWithPrefix}");
-
-    @SetFromFlag("clusterInitRamSize")
-    BasicAttributeSensorAndConfigKey<Integer> COUCHBASE_CLUSTER_INIT_RAM_SIZE = new BasicAttributeSensorAndConfigKey<Integer>(
-            Integer.class, "couchbase.clusterInitRamSize", "initial ram size of the cluster", 300);
-
-    PortAttributeSensorAndConfigKey COUCHBASE_WEB_ADMIN_PORT = new PortAttributeSensorAndConfigKey("couchbase.webAdminPort", "Web Administration Port", "8091+");
-    PortAttributeSensorAndConfigKey COUCHBASE_API_PORT = new PortAttributeSensorAndConfigKey("couchbase.apiPort", "Couchbase API Port", "8092+");
-    PortAttributeSensorAndConfigKey COUCHBASE_INTERNAL_BUCKET_PORT = new PortAttributeSensorAndConfigKey("couchbase.internalBucketPort", "Internal Bucket Port", "11209");
-    PortAttributeSensorAndConfigKey COUCHBASE_INTERNAL_EXTERNAL_BUCKET_PORT = new PortAttributeSensorAndConfigKey("couchbase.internalExternalBucketPort", "Internal/External Bucket Port", "11210");
-    PortAttributeSensorAndConfigKey COUCHBASE_CLIENT_INTERFACE_PROXY = new PortAttributeSensorAndConfigKey("couchbase.clientInterfaceProxy", "Client interface (proxy)", "11211");
-    PortAttributeSensorAndConfigKey COUCHBASE_INCOMING_SSL_PROXY = new PortAttributeSensorAndConfigKey("couchbase.incomingSslProxy", "Incoming SSL Proxy", "11214");
-    PortAttributeSensorAndConfigKey COUCHBASE_INTERNAL_OUTGOING_SSL_PROXY = new PortAttributeSensorAndConfigKey("couchbase.internalOutgoingSslProxy", "Internal Outgoing SSL Proxy", "11215");
-    PortAttributeSensorAndConfigKey COUCHBASE_REST_HTTPS_FOR_SSL = new PortAttributeSensorAndConfigKey("couchbase.internalRestHttpsForSsl", "Internal REST HTTPS for SSL", "18091");
-    PortAttributeSensorAndConfigKey COUCHBASE_CAPI_HTTPS_FOR_SSL = new PortAttributeSensorAndConfigKey("couchbase.internalCapiHttpsForSsl", "Internal CAPI HTTPS for SSL", "18092");
-    PortAttributeSensorAndConfigKey ERLANG_PORT_MAPPER = new PortAttributeSensorAndConfigKey("couchbase.erlangPortMapper", "Erlang Port Mapper Daemon Listener Port (epmd)", "4369");
-    PortAttributeSensorAndConfigKey NODE_DATA_EXCHANGE_PORT_RANGE_START = new PortAttributeSensorAndConfigKey("couchbase.nodeDataExchangePortRangeStart", "Node data exchange Port Range Start", "21100+");
-    PortAttributeSensorAndConfigKey NODE_DATA_EXCHANGE_PORT_RANGE_END = new PortAttributeSensorAndConfigKey("couchbase.nodeDataExchangePortRangeEnd", "Node data exchange Port Range End", "21199+");
-
-    AttributeSensor<Boolean> IS_PRIMARY_NODE = Sensors.newBooleanSensor("couchbase.isPrimaryNode", "flag to determine if the current couchbase node is the primary node for the cluster");
-    AttributeSensor<Boolean> IS_IN_CLUSTER = Sensors.newBooleanSensor("couchbase.isInCluster", "flag to determine if the current couchbase node has been added to a cluster, "
-        + "including being the first / primary node");
-    AttributeSensor<URI> COUCHBASE_WEB_ADMIN_URL = Attributes.MAIN_URI;
-    
-    // Interesting stats
-    AttributeSensor<Double> OPS = Sensors.newDoubleSensor("couchbase.stats.ops", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/ops");
-    AttributeSensor<Long> COUCH_DOCS_DATA_SIZE = Sensors.newLongSensor("couchbase.stats.couch.docs.data.size", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/couch_docs_data_size");
-    AttributeSensor<Long> COUCH_DOCS_ACTUAL_DISK_SIZE = Sensors.newLongSensor("couchbase.stats.couch.docs.actual.disk.size", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/couch_docs_actual_disk_size");
-    AttributeSensor<Long> EP_BG_FETCHED = Sensors.newLongSensor("couchbase.stats.ep.bg.fetched", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/ep_bg_fetched");
-    AttributeSensor<Long> MEM_USED = Sensors.newLongSensor("couchbase.stats.mem.used", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/mem_used");
-    AttributeSensor<Long> COUCH_VIEWS_ACTUAL_DISK_SIZE = Sensors.newLongSensor("couchbase.stats.couch.views.actual.disk.size", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/couch_views_actual_disk_size");
-    AttributeSensor<Long> CURR_ITEMS = Sensors.newLongSensor("couchbase.stats.curr.items", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/curr_items");
-    AttributeSensor<Long> VB_REPLICA_CURR_ITEMS = Sensors.newLongSensor("couchbase.stats.vb.replica.curr.items", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/vb_replica_curr_items");
-    AttributeSensor<Long> COUCH_VIEWS_DATA_SIZE = Sensors.newLongSensor("couchbase.stats.couch.views.data.size", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/couch_views_data_size");
-    AttributeSensor<Long> GET_HITS = Sensors.newLongSensor("couchbase.stats.get.hits", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/get_hits");
-    AttributeSensor<Double> CMD_GET = Sensors.newDoubleSensor("couchbase.stats.cmd.get", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/cmd_get");
-    AttributeSensor<Long> CURR_ITEMS_TOT = Sensors.newLongSensor("couchbase.stats.curr.items.tot", 
-            "Retrieved from pools/nodes/<current node>/interestingStats/curr_items_tot");
-    AttributeSensor<String> REBALANCE_STATUS = Sensors.newStringSensor("couchbase.rebalance.status", 
-            "Displays the current rebalance status from pools/nodes/rebalanceStatus");
-    
-    class MainUri {
-        public static final AttributeSensor<URI> MAIN_URI = Attributes.MAIN_URI;
-        
-        static {
-            // ROOT_URL does not need init because it refers to something already initialized
-            RendererHints.register(COUCHBASE_WEB_ADMIN_URL, RendererHints.namedActionWithUrl());
-
-            RendererHints.register(COUCH_DOCS_DATA_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
-            RendererHints.register(COUCH_DOCS_ACTUAL_DISK_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
-            RendererHints.register(MEM_USED, RendererHints.displayValue(ByteSizeStrings.metric()));
-            RendererHints.register(COUCH_VIEWS_ACTUAL_DISK_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
-            RendererHints.register(COUCH_VIEWS_DATA_SIZE, RendererHints.displayValue(ByteSizeStrings.metric()));
-        }
-    }
-    
-    // this long-winded reference is done just to trigger the initialization above
-    AttributeSensor<URI> MAIN_URI = MainUri.MAIN_URI;
-
-    MethodEffector<Void> SERVER_ADD = new MethodEffector<Void>(CouchbaseNode.class, "serverAdd");
-    MethodEffector<Void> SERVER_ADD_AND_REBALANCE = new MethodEffector<Void>(CouchbaseNode.class, "serverAddAndRebalance");
-    MethodEffector<Void> REBALANCE = new MethodEffector<Void>(CouchbaseNode.class, "rebalance");
-    MethodEffector<Void> BUCKET_CREATE = new MethodEffector<Void>(CouchbaseNode.class, "bucketCreate");
-    brooklyn.entity.Effector<Void> ADD_REPLICATION_RULE = Effectors.effector(Void.class, "addReplicationRule")
-        .description("Adds a replication rule from the indicated bucket on the cluster where this node is located "
-            + "to the indicated cluster and optional destination bucket")
-        .parameter(String.class, "fromBucket", "Bucket to be replicated")
-        .parameter(Object.class, "toCluster", "Entity (or ID) of the cluster to which this should replicate")
-        .parameter(String.class, "toBucket", "Destination bucket for replication in the toCluster, defaulting to the same as the fromBucket")
-        .buildAbstract();
-
-    @Effector(description = "add a server to a cluster")
-    public void serverAdd(@EffectorParam(name = "serverHostname") String serverToAdd, @EffectorParam(name = "username") String username, @EffectorParam(name = "password") String password);
-    
-    @Effector(description = "add a server to a cluster, and immediately rebalances")
-    public void serverAddAndRebalance(@EffectorParam(name = "serverHostname") String serverToAdd, @EffectorParam(name = "username") String username, @EffectorParam(name = "password") String password);
-
-    @Effector(description = "rebalance the couchbase cluster")
-    public void rebalance();
-    
-    @Effector(description = "create a new bucket")
-    public void bucketCreate(@EffectorParam(name = "bucketName") String bucketName, @EffectorParam(name = "bucketType") String bucketType, 
-            @EffectorParam(name = "bucketPort") Integer bucketPort, @EffectorParam(name = "bucketRamSize") Integer bucketRamSize, 
-            @EffectorParam(name = "bucketReplica") Integer bucketReplica);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java
deleted file mode 100644
index 37f2f74..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeDriver.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface CouchbaseNodeDriver extends SoftwareProcessDriver {
-    public String getOsTag();
-    public String getDownloadLinkPreVersionSeparator();
-    public String getDownloadLinkOsTagWithPrefix();
-    
-    public String getCommunityOrEnterprise();
-
-    public void serverAdd(String serverToAdd, String username, String password);
-
-    public void rebalance();
-    
-    public void bucketCreate(String bucketName, String bucketType, Integer bucketPort, Integer bucketRamSize, Integer bucketReplica);
-
-    public void serverAddAndRebalance(String serverToAdd, String username, String password);
-
-    public void addReplicationRule(Entity toCluster, String fromBucket, String toBucket);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java
deleted file mode 100644
index d7439ca..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeImpl.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import static java.lang.String.format;
-
-import java.net.URI;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.http.auth.UsernamePasswordCredentials;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.entity.effector.EffectorBody;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.SensorEvent;
-import brooklyn.event.SensorEventListener;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.event.feed.http.JsonFunctions;
-import brooklyn.location.MachineProvisioningLocation;
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.location.cloud.CloudLocationConfig;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.config.ConfigBag;
-import brooklyn.util.exceptions.Exceptions;
-import brooklyn.util.guava.Functionals;
-import brooklyn.util.guava.MaybeFunctions;
-import brooklyn.util.guava.TypeTokens;
-import brooklyn.util.http.HttpTool;
-import brooklyn.util.http.HttpToolResponse;
-import brooklyn.util.net.Urls;
-import brooklyn.util.task.Tasks;
-import brooklyn.util.text.Strings;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Charsets;
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-import com.google.common.base.Preconditions;
-import com.google.common.net.HostAndPort;
-import com.google.common.net.HttpHeaders;
-import com.google.common.net.MediaType;
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-
-public class CouchbaseNodeImpl extends SoftwareProcessImpl implements CouchbaseNode {
-
-    private static final Logger log = LoggerFactory.getLogger(CouchbaseNodeImpl.class);
-
-    private volatile HttpFeed httpFeed;
-
-    @Override
-    public Class<CouchbaseNodeDriver> getDriverInterface() {
-        return CouchbaseNodeDriver.class;
-    }
-
-    @Override
-    public CouchbaseNodeDriver getDriver() {
-        return (CouchbaseNodeDriver) super.getDriver();
-    }
-
-    @Override
-    public void init() {
-        super.init();
-
-        subscribe(this, Attributes.SERVICE_UP, new SensorEventListener<Boolean>() {
-            @Override
-            public void onEvent(SensorEvent<Boolean> booleanSensorEvent) {
-                if (Boolean.TRUE.equals(booleanSensorEvent.getValue())) {
-                    Integer webPort = getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT);
-                    Preconditions.checkNotNull(webPort, CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT+" not set for %s; is an acceptable port available?", this);
-                    String hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(CouchbaseNodeImpl.this, webPort).toString();
-                    setAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_URL, URI.create(format("http://%s", hostAndPort)));
-                }
-            }
-        });
-
-        getMutableEntityType().addEffector(ADD_REPLICATION_RULE, new EffectorBody<Void>() {
-            @Override
-            public Void call(ConfigBag parameters) {
-                addReplicationRule(parameters);
-                return null;
-            }
-        });
-    }
-
-    protected Map<String, Object> obtainProvisioningFlags(@SuppressWarnings("rawtypes") MachineProvisioningLocation location) {
-        ConfigBag result = ConfigBag.newInstance(super.obtainProvisioningFlags(location));
-        result.configure(CloudLocationConfig.OS_64_BIT, true);
-        return result.getAllConfig();
-    }
-
-    @Override
-    protected Collection<Integer> getRequiredOpenPorts() {
-        // TODO this creates a huge list of inbound ports; much better to define on a security group using range syntax!
-        int erlangRangeStart = getConfig(NODE_DATA_EXCHANGE_PORT_RANGE_START).iterator().next();
-        int erlangRangeEnd = getConfig(NODE_DATA_EXCHANGE_PORT_RANGE_END).iterator().next();
-
-        Set<Integer> newPorts = MutableSet.<Integer>copyOf(super.getRequiredOpenPorts());
-        newPorts.remove(erlangRangeStart);
-        newPorts.remove(erlangRangeEnd);
-        for (int i = erlangRangeStart; i <= erlangRangeEnd; i++)
-            newPorts.add(i);
-        return newPorts;
-    }
-
-    @Override
-    public void serverAdd(String serverToAdd, String username, String password) {
-        getDriver().serverAdd(serverToAdd, username, password);
-    }
-
-    @Override
-    public void serverAddAndRebalance(String serverToAdd, String username, String password) {
-        getDriver().serverAddAndRebalance(serverToAdd, username, password);
-    }
-
-    @Override
-    public void rebalance() {
-        getDriver().rebalance();
-    }
-
-    protected final static Function<HttpToolResponse, JsonElement> GET_THIS_NODE_STATS = Functionals.chain(
-        HttpValueFunctions.jsonContents(),
-        JsonFunctions.walk("nodes"),
-        new Function<JsonElement, JsonElement>() {
-            @Override public JsonElement apply(JsonElement input) {
-                JsonArray nodes = input.getAsJsonArray();
-                for (JsonElement element : nodes) {
-                    JsonElement thisNode = element.getAsJsonObject().get("thisNode");
-                    if (thisNode!=null && Boolean.TRUE.equals(thisNode.getAsBoolean())) {
-                        return element.getAsJsonObject().get("interestingStats");
-                    }
-                }
-                return null;
-        }}
-    );
-
-    protected final static <T> HttpPollConfig<T> getSensorFromNodeStat(AttributeSensor<T> sensor, String ...jsonPath) {
-        return new HttpPollConfig<T>(sensor)
-            .onSuccess(Functionals.chain(GET_THIS_NODE_STATS,
-                MaybeFunctions.<JsonElement>wrap(),
-                JsonFunctions.walkM(jsonPath),
-                JsonFunctions.castM(TypeTokens.getRawRawType(sensor.getTypeToken()), null)))
-            .onFailureOrException(Functions.<T>constant(null));
-    }
-
-    @Override
-    protected void postStart() {
-        super.postStart();
-        renameServerToPublicHostname();
-    }
-
-    protected void renameServerToPublicHostname() {
-        // http://docs.couchbase.com/couchbase-manual-2.5/cb-install/#couchbase-getting-started-hostnames
-        URI apiUri = null;
-        try {
-            HostAndPort accessible = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getAttribute(COUCHBASE_WEB_ADMIN_PORT));
-            apiUri = URI.create(String.format("http://%s:%d/node/controller/rename", accessible.getHostText(), accessible.getPort()));
-            UsernamePasswordCredentials credentials = new UsernamePasswordCredentials(getConfig(COUCHBASE_ADMIN_USERNAME), getConfig(COUCHBASE_ADMIN_PASSWORD));
-            HttpToolResponse response = HttpTool.httpPost(
-                    // the uri is required by the HttpClientBuilder in order to set the AuthScope of the credentials
-                    HttpTool.httpClientBuilder().uri(apiUri).credentials(credentials).build(),
-                    apiUri,
-                    MutableMap.of(
-                            HttpHeaders.CONTENT_TYPE, MediaType.FORM_DATA.toString(),
-                            HttpHeaders.ACCEPT, "*/*",
-                            // this appears needed; without it we get org.apache.http.NoHttpResponseException !?
-                            HttpHeaders.AUTHORIZATION, HttpTool.toBasicAuthorizationValue(credentials)),
-                    Charsets.UTF_8.encode("hostname="+Urls.encode(accessible.getHostText())).array());
-            log.debug("Renamed Couchbase server "+this+" via "+apiUri+": "+response);
-            if (!HttpTool.isStatusCodeHealthy(response.getResponseCode())) {
-                log.warn("Invalid response code, renaming "+apiUri+": "+response);
-            }
-        } catch (Exception e) {
-            Exceptions.propagateIfFatal(e);
-            log.warn("Error renaming server, using "+apiUri+": "+e, e);
-        }
-    }
-
-    public void connectSensors() {
-        super.connectSensors();
-        connectServiceUpIsRunning();
-
-        HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, this.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
-        httpFeed = HttpFeed.builder()
-            .entity(this)
-            .period(Duration.seconds(3))
-            .baseUri("http://" + hostAndPort + "/pools/nodes/")
-            .credentialsIfNotNull(getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME), getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD))
-            .poll(getSensorFromNodeStat(CouchbaseNode.OPS, "ops"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_DOCS_DATA_SIZE, "couch_docs_data_size"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_DOCS_ACTUAL_DISK_SIZE, "couch_docs_actual_disk_size"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.EP_BG_FETCHED, "ep_bg_fetched"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.MEM_USED, "mem_used"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_VIEWS_ACTUAL_DISK_SIZE, "couch_views_actual_disk_size"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.CURR_ITEMS, "curr_items"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.VB_REPLICA_CURR_ITEMS, "vb_replica_curr_items"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.COUCH_VIEWS_DATA_SIZE, "couch_views_data_size"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.GET_HITS, "get_hits"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.CMD_GET, "cmd_get"))
-            .poll(getSensorFromNodeStat(CouchbaseNode.CURR_ITEMS_TOT, "curr_items_tot"))
-            .poll(new HttpPollConfig<String>(CouchbaseNode.REBALANCE_STATUS)
-                    .onSuccess(HttpValueFunctions.jsonContents("rebalanceStatus", String.class))
-                    .onFailureOrException(Functions.constant("Could not retrieve")))
-            .build();
-    }
-
-    public void disconnectSensors() {
-        super.disconnectSensors();
-        disconnectServiceUpIsRunning();
-        if (httpFeed != null) {
-            httpFeed.stop();
-        }
-    }
-
-    @Override
-    public void bucketCreate(String bucketName, String bucketType, Integer bucketPort, Integer bucketRamSize, Integer bucketReplica) {
-        if (Strings.isBlank(bucketType)) bucketType = "couchbase";
-        if (bucketRamSize==null || bucketRamSize<=0) bucketRamSize = 200;
-        if (bucketReplica==null || bucketReplica<0) bucketReplica = 1;
-
-        getDriver().bucketCreate(bucketName, bucketType, bucketPort, bucketRamSize, bucketReplica);
-    }
-
-    /** exposed through {@link CouchbaseNode#ADD_REPLICATION_RULE} */
-    protected void addReplicationRule(ConfigBag ruleArgs) {
-        Object toClusterO = Preconditions.checkNotNull(ruleArgs.getStringKey("toCluster"), "toCluster must not be null");
-        if (toClusterO instanceof String) {
-            toClusterO = getManagementContext().lookup((String)toClusterO);
-        }
-        Entity toCluster = Tasks.resolving(toClusterO, Entity.class).context(getExecutionContext()).get();
-
-        String fromBucket = Preconditions.checkNotNull( (String)ruleArgs.getStringKey("fromBucket"), "fromBucket must be specified" );
-
-        String toBucket = (String)ruleArgs.getStringKey("toBucket");
-        if (toBucket==null) toBucket = fromBucket;
-
-        if (!ruleArgs.getUnusedConfig().isEmpty()) {
-            throw new IllegalArgumentException("Unsupported replication rule data: "+ruleArgs.getUnusedConfig());
-        }
-
-        getDriver().addReplicationRule(toCluster, fromBucket, toBucket);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java
deleted file mode 100644
index 6dd97d6..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseNodeSshDriver.java
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import static brooklyn.util.ssh.BashCommands.*;
-import static java.lang.String.format;
-
-import java.net.URI;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-
-import org.apache.http.auth.UsernamePasswordCredentials;
-
-import brooklyn.entity.Entity;
-import brooklyn.entity.Group;
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.drivers.downloads.BasicDownloadRequirement;
-import brooklyn.entity.drivers.downloads.DownloadProducerFromUrlAttribute;
-import brooklyn.entity.software.SshEffectorTasks;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.location.OsDetails;
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.management.Task;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.http.HttpTool;
-import brooklyn.util.http.HttpToolResponse;
-import brooklyn.util.repeat.Repeater;
-import brooklyn.util.ssh.BashCommands;
-import brooklyn.util.task.DynamicTasks;
-import brooklyn.util.task.TaskBuilder;
-import brooklyn.util.task.TaskTags;
-import brooklyn.util.task.Tasks;
-import brooklyn.util.text.NaturalOrderComparator;
-import brooklyn.util.text.StringEscapes.BashStringEscapes;
-import brooklyn.util.text.Strings;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.net.HostAndPort;
-
-public class CouchbaseNodeSshDriver extends AbstractSoftwareProcessSshDriver implements CouchbaseNodeDriver {
-
-    public CouchbaseNodeSshDriver(final CouchbaseNodeImpl entity, final SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    public static String couchbaseCli(String cmd) {
-        return "/opt/couchbase/bin/couchbase-cli " + cmd + " ";
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(getInstallDir());
-    }
-
-    @Override
-    public void install() {
-        //for reference https://github.com/urbandecoder/couchbase/blob/master/recipes/server.rb
-        //installation instructions (http://docs.couchbase.com/couchbase-manual-2.5/cb-install/#preparing-to-install)
-
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-
-        if (osDetails.isLinux()) {
-            List<String> commands = installLinux(urls, saveAs);
-            //FIXME installation return error but the server is up and running.
-            newScript(INSTALLING)
-                    .body.append(commands).execute();
-        } else {
-            Tasks.markInessential();
-            throw new IllegalStateException("Unsupported OS for installing Couchbase. Will continue but may fail later.");
-        }
-    }
-
-    private List<String> installLinux(List<String> urls, String saveAs) {
-
-        log.info("Installing " + getEntity() + " using couchbase-server-{} {}", getCommunityOrEnterprise(), getVersion());
-
-        String apt = chainGroup(
-                installPackage(MutableMap.of("apt", "python-httplib2 libssl0.9.8"), null),
-                sudo(format("dpkg -i %s", saveAs)));
-
-        String yum = chainGroup(
-                "which yum",
-                // The following prevents failure on RHEL AWS nodes:
-                // https://forums.aws.amazon.com/thread.jspa?threadID=100509
-                ok(sudo("sed -i.bk s/^enabled=1$/enabled=0/ /etc/yum/pluginconf.d/subscription-manager.conf")),
-                ok(sudo("yum check-update")),
-                sudo("yum install -y pkgconfig"),
-                // RHEL requires openssl version 098
-                sudo("[ -f /etc/redhat-release ] && (grep -i \"red hat\" /etc/redhat-release && sudo yum install -y openssl098e) || :"),
-                sudo(format("rpm --install %s", saveAs)));
-
-        String link = new DownloadProducerFromUrlAttribute().apply(new BasicDownloadRequirement(this)).getPrimaryLocations().iterator().next();
-        return ImmutableList.<String>builder()
-                .add(INSTALL_CURL)
-                .addAll(Arrays.asList(INSTALL_CURL,
-                        BashCommands.require(BashCommands.alternatives(BashCommands.simpleDownloadUrlAs(urls, saveAs),
-                                        // Referer link is required for 3.0.0; note mis-spelling is correct, as per http://en.wikipedia.org/wiki/HTTP_referer
-                                        "curl -f -L -k " + BashStringEscapes.wrapBash(link)
-                                                + " -H 'Referer: http://www.couchbase.com/downloads'"
-                                                + " -o " + saveAs),
-                                "Could not retrieve " + saveAs + " (from " + urls.size() + " sites)", 9)))
-                .add(alternatives(apt, yum))
-                .build();
-    }
-
-    @Override
-    public void customize() {
-        //TODO: add linux tweaks for couchbase
-        //http://blog.couchbase.com/often-overlooked-linux-os-tweaks
-        //http://blog.couchbase.com/kirk
-
-        //turn off swappiness
-        //vm.swappiness=0
-        //sudo echo 0 > /proc/sys/vm/swappiness
-
-        //os page cache = 20%
-
-        //disable THP
-        //sudo echo never > /sys/kernel/mm/transparent_hugepage/enabled
-        //sudo echo never > /sys/kernel/mm/transparent_hugepage/defrag
-
-        //turn off transparent huge pages
-        //limit page cache disty bytes
-        //control the rate page cache is flused ... vm.dirty_*
-    }
-
-    @Override
-    public void launch() {
-        String clusterPrefix = "--cluster-" + (isPreV3() ? "init-" : "");
-        // in v30, the cluster arguments were changed, and it became mandatory to supply a url + password (if there is none, these are ignored)
-        newScript(LAUNCHING)
-                .body.append(
-                sudo("/etc/init.d/couchbase-server start"),
-                "for i in {0..120}\n" +
-                        "do\n" +
-                        "    if [ $i -eq 120 ]; then echo REST API unavailable after 120 seconds, failing; exit 1; fi;\n" +
-                        "    curl -s " + String.format("http://localhost:%s", getWebPort()) + " > /dev/null && echo REST API available after $i seconds && break\n" +
-                        "    sleep 1\n" +
-                        "done\n" +
-                        couchbaseCli("cluster-init") +
-                        (isPreV3() ? getCouchbaseHostnameAndPort() : getCouchbaseHostnameAndCredentials()) +
-                        " " + clusterPrefix + "username=" + getUsername() +
-                        " " + clusterPrefix + "password=" + getPassword() +
-                        " " + clusterPrefix + "port=" + getWebPort() +
-                        " " + clusterPrefix + "ramsize=" + getClusterInitRamSize())
-                .execute();
-    }
-
-    @Override
-    public boolean isRunning() {
-        //TODO add a better way to check if couchbase server is running
-        return (newScript(CHECK_RUNNING)
-                .body.append(format("curl -u %s:%s http://localhost:%s/pools/nodes", getUsername(), getPassword(), getWebPort()))
-                .execute() == 0);
-    }
-
-    @Override
-    public void stop() {
-        newScript(STOPPING)
-                .body.append(sudo("/etc/init.d/couchbase-server stop"))
-                .execute();
-    }
-
-    @Override
-    public String getVersion() {
-        return entity.getConfig(CouchbaseNode.SUGGESTED_VERSION);
-    }
-
-    @Override
-    public String getOsTag() {
-        return newDownloadLinkSegmentComputer().getOsTag();
-    }
-
-    protected DownloadLinkSegmentComputer newDownloadLinkSegmentComputer() {
-        return new DownloadLinkSegmentComputer(getLocation().getOsDetails(), !isPreV3(), Strings.toString(getEntity()));
-    }
-
-    public static class DownloadLinkSegmentComputer {
-        // links are:
-        // http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.rpm
-        // http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.deb
-        // ^^^ preV3 is _ everywhere
-        // http://packages.couchbase.com/releases/3.0.0/couchbase-server-community_3.0.0-ubuntu12.04_amd64.deb
-        // ^^^ most V3 is _${version}-
-        // http://packages.couchbase.com/releases/3.0.0/couchbase-server-community-3.0.0-centos6.x86_64.rpm
-        // ^^^ but RHEL is -${version}-
-
-        @Nullable
-        private final OsDetails os;
-        @Nonnull
-        private final boolean isV3OrLater;
-        @Nonnull
-        private final String context;
-        @Nonnull
-        private final String osName;
-        @Nonnull
-        private final boolean isRpm;
-        @Nonnull
-        private final boolean is64bit;
-
-        public DownloadLinkSegmentComputer(@Nullable OsDetails os, boolean isV3OrLater, @Nonnull String context) {
-            this.os = os;
-            this.isV3OrLater = isV3OrLater;
-            this.context = context;
-            if (os == null) {
-                // guess centos as RPM is sensible default
-                log.warn("No details known for OS of " + context + "; assuming 64-bit RPM distribution of Couchbase");
-                osName = "centos";
-                isRpm = true;
-                is64bit = true;
-                return;
-            }
-            osName = os.getName().toLowerCase();
-            isRpm = !(osName.contains("deb") || osName.contains("ubuntu"));
-            is64bit = os.is64bit();
-        }
-
-        /**
-         * separator after the version number used to be _ but is - in 3.0 and later
-         */
-        public String getPreVersionSeparator() {
-            if (!isV3OrLater) return "_";
-            if (isRpm) return "-";
-            return "_";
-        }
-
-        public String getOsTag() {
-            // couchbase only provide certain versions; if on other platforms let's suck-it-and-see
-            String family;
-            if (osName.contains("debian")) family = "debian7_";
-            else if (osName.contains("ubuntu")) family = "ubuntu12.04_";
-            else if (osName.contains("centos") || osName.contains("rhel") || (osName.contains("red") && osName.contains("hat")))
-                family = "centos6.";
-            else {
-                log.warn("Unrecognised OS " + os + " of " + context + "; assuming RPM distribution of Couchbase");
-                family = "centos6.";
-            }
-
-            if (!is64bit && !isV3OrLater) {
-                // NB: 32-bit binaries aren't (yet?) available for v30
-                log.warn("32-bit binaries for Couchbase might not be available, when deploying " + context);
-            }
-            String arch = !is64bit ? "x86" : !isRpm && isV3OrLater ? "amd64" : "x86_64";
-            String fileExtension = isRpm ? ".rpm" : ".deb";
-
-            if (isV3OrLater)
-                return family + arch + fileExtension;
-            else
-                return arch + fileExtension;
-        }
-
-        public String getOsTagWithPrefix() {
-            return (!isV3OrLater ? "_" : "-") + getOsTag();
-        }
-    }
-
-    @Override
-    public String getDownloadLinkOsTagWithPrefix() {
-        return newDownloadLinkSegmentComputer().getOsTagWithPrefix();
-    }
-
-    @Override
-    public String getDownloadLinkPreVersionSeparator() {
-        return newDownloadLinkSegmentComputer().getPreVersionSeparator();
-    }
-
-    private boolean isPreV3() {
-        return NaturalOrderComparator.INSTANCE.compare(getEntity().getConfig(CouchbaseNode.SUGGESTED_VERSION), "3.0") < 0;
-    }
-
-    @Override
-    public String getCommunityOrEnterprise() {
-        Boolean isEnterprise = getEntity().getConfig(CouchbaseNode.USE_ENTERPRISE);
-        return isEnterprise ? "enterprise" : "community";
-    }
-
-    private String getUsername() {
-        return entity.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
-    }
-
-    private String getPassword() {
-        return entity.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
-    }
-
-    private String getWebPort() {
-        return "" + entity.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT);
-    }
-
-    private String getCouchbaseHostnameAndCredentials() {
-        return format("-c %s:%s -u %s -p %s", getSubnetHostname(), getWebPort(), getUsername(), getPassword());
-    }
-
-    private String getCouchbaseHostnameAndPort() {
-        return format("-c %s:%s", getSubnetHostname(), getWebPort());
-    }
-
-    private String getClusterInitRamSize() {
-        return entity.getConfig(CouchbaseNode.COUCHBASE_CLUSTER_INIT_RAM_SIZE).toString();
-    }
-
-    @Override
-    public void rebalance() {
-        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "explicitly started");
-        newScript("rebalance")
-                .body.append(
-                couchbaseCli("rebalance") + getCouchbaseHostnameAndCredentials())
-                .failOnNonZeroResultCode()
-                .execute();
-
-        // wait until the re-balance is started
-        // (if it's quick, this might miss it, but it will only block for 30s if so)
-        Repeater.create()
-                .backoff(Repeater.DEFAULT_REAL_QUICK_PERIOD, 2, Duration.millis(500))
-                .limitTimeTo(Duration.THIRTY_SECONDS)
-                .until(new Callable<Boolean>() {
-                           @Override
-                           public Boolean call() throws Exception {
-                               for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
-                                   if (isNodeRebalancing(nodeHostAndPort.toString())) {
-                                       return true;
-                                   }
-                               }
-                               return false;
-                           }
-                       }
-                ).run();
-
-        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "waiting for completion");
-        // Wait until the Couchbase node finishes the re-balancing
-        Task<Boolean> reBalance = TaskBuilder.<Boolean>builder()
-                .name("Waiting until node is rebalancing")
-                .body(new Callable<Boolean>() {
-                    @Override
-                    public Boolean call() throws Exception {
-                        return Repeater.create()
-                                .backoff(Duration.ONE_SECOND, 1.2, Duration.TEN_SECONDS)
-                                .limitTimeTo(Duration.FIVE_MINUTES)
-                                .until(new Callable<Boolean>() {
-                                    @Override
-                                    public Boolean call() throws Exception {
-                                        for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
-                                            if (isNodeRebalancing(nodeHostAndPort.toString())) {
-                                                return false;
-                                            }
-                                        }
-                                        return true;
-                                    }
-                                })
-                                .run();
-                        }
-                })
-                .build();
-        Boolean completed = DynamicTasks.queueIfPossible(reBalance)
-                .orSubmitAndBlock()
-                .andWaitForSuccess();
-        if (completed) {
-            entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "completed");
-            ServiceStateLogic.ServiceNotUpLogic.clearNotUpIndicator(getEntity(), "rebalancing");
-            log.info("Rebalanced cluster via primary node {}", getEntity());
-        } else {
-            entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "timed out");
-            ServiceStateLogic.ServiceNotUpLogic.updateNotUpIndicator(getEntity(), "rebalancing", "rebalance did not complete within time limit");
-            log.warn("Timeout rebalancing cluster via primary node {}", getEntity());
-        }
-    }
-
-    private Iterable<HostAndPort> getNodesHostAndPort() {
-        Group group = Iterables.getFirst(getEntity().getGroups(), null);
-        if (group == null) return Lists.newArrayList();
-        return Iterables.transform(group.getAttribute(CouchbaseCluster.COUCHBASE_CLUSTER_UP_NODES),
-                new Function<Entity, HostAndPort>() {
-                    @Override
-                    public HostAndPort apply(Entity input) {
-                        return BrooklynAccessUtils.getBrooklynAccessibleAddress(input, input.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));
-                    }
-                });
-    }
-
-    private boolean isNodeRebalancing(String nodeHostAndPort) {
-        HttpToolResponse response = getApiResponse("http://" + nodeHostAndPort + "/pools/default/rebalanceProgress");
-        if (response.getResponseCode() != 200) {
-            throw new IllegalStateException("failed retrieving rebalance status: " + response);
-        }
-        return !"none".equals(HttpValueFunctions.jsonContents("status", String.class).apply(response));
-    }
-
-    private HttpToolResponse getApiResponse(String uri) {
-        return HttpTool.httpGet(HttpTool.httpClientBuilder()
-                        // the uri is required by the HttpClientBuilder in order to set the AuthScope of the credentials
-                        .uri(uri)
-                        .credentials(new UsernamePasswordCredentials(getUsername(), getPassword()))
-                        .build(),
-                URI.create(uri),
-                ImmutableMap.<String, String>of());
-    }
-
-    @Override
-    public void serverAdd(String serverToAdd, String username, String password) {
-        newScript("serverAdd").body.append(couchbaseCli("server-add")
-                + getCouchbaseHostnameAndCredentials() +
-                " --server-add=" + BashStringEscapes.wrapBash(serverToAdd) +
-                " --server-add-username=" + BashStringEscapes.wrapBash(username) +
-                " --server-add-password=" + BashStringEscapes.wrapBash(password))
-                .failOnNonZeroResultCode()
-                .execute();
-    }
-
-    @Override
-    public void serverAddAndRebalance(String serverToAdd, String username, String password) {
-        newScript("serverAddAndRebalance").body.append(couchbaseCli("rebalance")
-                + getCouchbaseHostnameAndCredentials() +
-                " --server-add=" + BashStringEscapes.wrapBash(serverToAdd) +
-                " --server-add-username=" + BashStringEscapes.wrapBash(username) +
-                " --server-add-password=" + BashStringEscapes.wrapBash(password))
-                .failOnNonZeroResultCode()
-                .execute();
-        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "triggered as part of server-add");
-    }
-
-    @Override
-    public void bucketCreate(String bucketName, String bucketType, Integer bucketPort, Integer bucketRamSize, Integer bucketReplica) {
-        log.info("Adding bucket: {} to cluster {} primary node: {}", new Object[]{bucketName, CouchbaseClusterImpl.getClusterOrNode(getEntity()), getEntity()});
-
-        newScript("bucketCreate").body.append(couchbaseCli("bucket-create")
-                + getCouchbaseHostnameAndCredentials() +
-                " --bucket=" + BashStringEscapes.wrapBash(bucketName) +
-                " --bucket-type=" + BashStringEscapes.wrapBash(bucketType) +
-                " --bucket-port=" + bucketPort +
-                " --bucket-ramsize=" + bucketRamSize +
-                " --bucket-replica=" + bucketReplica)
-                .failOnNonZeroResultCode()
-                .execute();
-    }
-
-    @Override
-    public void addReplicationRule(Entity toCluster, String fromBucket, String toBucket) {
-        DynamicTasks.queue(DependentConfiguration.attributeWhenReady(toCluster, Attributes.SERVICE_UP)).getUnchecked();
-
-        String destName = CouchbaseClusterImpl.getClusterName(toCluster);
-
-        log.info("Setting up XDCR for " + fromBucket + " from " + CouchbaseClusterImpl.getClusterName(getEntity()) + " (via " + getEntity() + ") "
-                + "to " + destName + " (" + toCluster + ")");
-
-        Entity destPrimaryNode = toCluster.getAttribute(CouchbaseCluster.COUCHBASE_PRIMARY_NODE);
-        String destHostname = destPrimaryNode.getAttribute(Attributes.HOSTNAME);
-        String destUsername = toCluster.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME);
-        String destPassword = toCluster.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD);
-
-        // on the REST API there is mention of a 'type' 'continuous' but i don't see other refs to this
-
-        // PROTOCOL   Select REST protocol or memcached for replication. xmem indicates memcached while capi indicates REST protocol.
-        // looks like xmem is the default; leave off for now
-//        String replMode = "xmem";
-
-        DynamicTasks.queue(TaskTags.markInessential(SshEffectorTasks.ssh(
-                couchbaseCli("xdcr-setup") +
-                        getCouchbaseHostnameAndCredentials() +
-                        " --create" +
-                        " --xdcr-cluster-name=" + BashStringEscapes.wrapBash(destName) +
-                        " --xdcr-hostname=" + BashStringEscapes.wrapBash(destHostname) +
-                        " --xdcr-username=" + BashStringEscapes.wrapBash(destUsername) +
-                        " --xdcr-password=" + BashStringEscapes.wrapBash(destPassword)
-        ).summary("create xdcr destination " + destName).newTask()));
-
-        // would be nice to auto-create bucket, but we'll need to know the parameters; the port in particular is tedious
-//        ((CouchbaseNode)destPrimaryNode).bucketCreate(toBucket, "couchbase", null, 0, 0);
-
-        DynamicTasks.queue(SshEffectorTasks.ssh(
-                couchbaseCli("xdcr-replicate") +
-                        getCouchbaseHostnameAndCredentials() +
-                        " --create" +
-                        " --xdcr-cluster-name=" + BashStringEscapes.wrapBash(destName) +
-                        " --xdcr-from-bucket=" + BashStringEscapes.wrapBash(fromBucket) +
-                        " --xdcr-to-bucket=" + BashStringEscapes.wrapBash(toBucket)
-//            + " --xdcr-replication-mode="+replMode
-        ).summary("configure replication for " + fromBucket + " to " + destName + ":" + toBucket).newTask());
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java
deleted file mode 100644
index c0740ee..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGateway.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-@ImplementedBy(CouchbaseSyncGatewayImpl.class)
-public interface CouchbaseSyncGateway extends SoftwareProcess {
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION,
-            "1.0-beta3.1");
-
-    @SetFromFlag("downloadUrl")
-    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "http://packages.couchbase.com/releases/couchbase-sync-gateway/1.0-beta/couchbase-sync-gateway-community_${version}_${driver.osTag}");
-    
-    @SetFromFlag("couchbaseServer")
-    ConfigKey<Entity> COUCHBASE_SERVER = ConfigKeys.newConfigKey(Entity.class, "couchbaseSyncGateway.couchbaseNode", 
-            "Couchbase server node or cluster the sync gateway connects to");
-
-    @SetFromFlag("serverPool")
-    ConfigKey<String> COUCHBASE_SERVER_POOL = ConfigKeys.newStringConfigKey("couchbaseSyncGateway.serverPool", 
-            "Couchbase Server pool name in which to find buckets", "default");
-    
-    @SetFromFlag("couchbaseServerBucket")
-    ConfigKey<String> COUCHBASE_SERVER_BUCKET = ConfigKeys.newStringConfigKey("couchbaseSyncGateway.serverBucket", 
-            "Name of the Couchbase bucket to use", "sync_gateway");
-
-    @SetFromFlag("pretty")
-    ConfigKey<Boolean> PRETTY = ConfigKeys.newBooleanConfigKey("couchbaseSyncGateway.pretty", 
-            "Pretty-print JSON responses. This is useful for debugging, but reduces performance.", false);
-
-    @SetFromFlag("verbose")
-    ConfigKey<Boolean> VERBOSE = ConfigKeys.newBooleanConfigKey("couchbaseSyncGateway.verbose", 
-            "Logs more information about requests.", false);
-
-    AttributeSensor<String> COUCHBASE_SERVER_WEB_URL = Sensors.newStringSensor("couchbaseSyncGateway.serverWebUrl", 
-            "The Url and web port of the couchbase server to connect to");
-    
-    AttributeSensor<String> MANAGEMENT_URL = Sensors.newStringSensor("coucbaseSyncGateway.managementUrl", 
-            "Management URL for Couchbase Sycn Gateway");
-
-    PortAttributeSensorAndConfigKey SYNC_REST_API_PORT = new PortAttributeSensorAndConfigKey("couchbaseSyncGateway.syncRestPort", 
-            "Port the Sync REST API listens on", "4984");
-    
-    PortAttributeSensorAndConfigKey ADMIN_REST_API_PORT = new PortAttributeSensorAndConfigKey("couchbaseSyncGateway.adminRestPort", 
-            "Port the Admin REST API listens on", "4985");
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java
deleted file mode 100644
index 148ec0b..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayDriver.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-public interface CouchbaseSyncGatewayDriver extends SoftwareProcessDriver {
-
-    public String getOsTag();
-    
-}
\ No newline at end of file



[04/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java
deleted file mode 100644
index 4c7e08c..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerIntegrationTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-import org.apache.solr.common.SolrDocument;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-/**
- * Solr integration tests.
- *
- * Test the operation of the {@link SolrServer} class.
- */
-public class SolrServerIntegrationTest extends AbstractSolrServerTest {
-
-    /**
-     * Test that a node starts and sets SERVICE_UP correctly.
-     */
-    @Test(groups = "Integration")
-    public void canStartupAndShutdown() {
-        solr = app.createAndManageChild(EntitySpec.create(SolrServer.class));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
-        Entities.dumpInfo(app);
-
-        solr.stop();
-
-        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, false);
-    }
-
-    /**
-     * Test that a core can be created and used with SolrJ client.
-     */
-    @Test(groups = "Integration")
-    public void testConnection() throws Exception {
-        solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
-                .configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
-
-        SolrJSupport client = new SolrJSupport(solr, "example");
-
-        Iterable<SolrDocument> results = client.getDocuments();
-        assertTrue(Iterables.isEmpty(results));
-
-        client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
-        client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
-        client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
-        client.commit();
-
-        results = client.getDocuments();
-        assertEquals(Iterables.size(results), 3);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerLiveTest.java
deleted file mode 100644
index 82fb107..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerLiveTest.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-import java.util.Map;
-
-import org.apache.solr.common.SolrDocument;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.text.Strings;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-/**
- * Solr live tests.
- *
- * Test the operation of the {@link SolrServer} class using the jclouds {@code rackspace-cloudservers-uk}
- * and {@code aws-ec2} providers, with different OS images. The tests use the {@link SolrJSupport} class
- * to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
- * set, usually in the {@code .brooklyn/brooklyn.properties} file.
- */
-public class SolrServerLiveTest extends AbstractSolrServerTest {
-
-    private static final Logger log = LoggerFactory.getLogger(SolrServerLiveTest.class);
-
-    @DataProvider(name = "virtualMachineData")
-    public Object[][] provideVirtualMachineData() {
-        return new Object[][] { // ImageId, Provider, Region, Description (for logging)
-            new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
-            new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" }, 
-            new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" }, 
-        };
-    }
-
-    @Test(groups = "Live", dataProvider = "virtualMachineData")
-    protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
-        log.info("Testing Solr on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
-
-        Map<String, String> properties = MutableMap.of("imageId", imageId);
-        testLocation = app.getManagementContext().getLocationRegistry()
-                .resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
-        solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
-                .configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
-
-        SolrJSupport client = new SolrJSupport(solr, "example");
-
-        Iterable<SolrDocument> results = client.getDocuments();
-        assertTrue(Iterables.isEmpty(results));
-
-        client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
-        client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
-        client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
-        client.commit();
-
-        results = client.getDocuments();
-        assertEquals(Iterables.size(results), 3);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java
new file mode 100644
index 0000000..ab158bd
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AbstractCassandraNodeTest.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.testng.annotations.BeforeMethod;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.location.Location;
+
+/**
+ * Cassandra test framework for integration and live tests.
+ */
+public class AbstractCassandraNodeTest extends BrooklynAppLiveTestSupport {
+
+    protected Location testLocation;
+    protected CassandraNode cassandra;
+
+    @BeforeMethod(alwaysRun = true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        testLocation = app.newLocalhostProvisioningLocation();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java
new file mode 100644
index 0000000..b7587d7
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/AstyanaxSupport.java
@@ -0,0 +1,331 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNull;
+import static org.testng.Assert.assertTrue;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.Assert;
+
+import brooklyn.entity.basic.Attributes;
+import brooklyn.util.exceptions.Exceptions;
+import brooklyn.util.text.Identifiers;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.netflix.astyanax.AstyanaxContext;
+import com.netflix.astyanax.Cluster;
+import com.netflix.astyanax.Keyspace;
+import com.netflix.astyanax.MutationBatch;
+import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
+import com.netflix.astyanax.connectionpool.OperationResult;
+import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
+import com.netflix.astyanax.connectionpool.exceptions.SchemaDisagreementException;
+import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
+import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
+import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
+import com.netflix.astyanax.model.Column;
+import com.netflix.astyanax.model.ColumnFamily;
+import com.netflix.astyanax.model.ColumnList;
+import com.netflix.astyanax.serializers.StringSerializer;
+import com.netflix.astyanax.thrift.ThriftFamilyFactory;
+
+/**
+ * Cassandra testing using Astyanax API.
+ */
+public class AstyanaxSupport {
+    private static final Logger log = LoggerFactory.getLogger(AstyanaxSupport.class);
+
+    public final String clusterName;
+    public final String hostname;
+    public final int thriftPort;
+    
+    public AstyanaxSupport(CassandraNode node) {
+        this(node.getClusterName(), node.getAttribute(Attributes.HOSTNAME), node.getThriftPort());
+    }
+    
+    public AstyanaxSupport(String clusterName, String hostname, int thriftPort) {
+        this.clusterName = clusterName;
+        this.hostname = hostname;
+        this.thriftPort = thriftPort;
+    }
+    
+    public AstyanaxContext<Keyspace> newAstyanaxContextForKeyspace(String keyspace) {
+        AstyanaxContext<Keyspace> context = new AstyanaxContext.Builder()
+                .forCluster(clusterName)
+                .forKeyspace(keyspace)
+                .withAstyanaxConfiguration(new AstyanaxConfigurationImpl()
+                        .setDiscoveryType(NodeDiscoveryType.NONE))
+                .withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("BrooklynPool")
+                        .setPort(thriftPort)
+                        .setMaxConnsPerHost(1)
+                        .setConnectTimeout(5000) // 10s
+                        .setSeeds(String.format("%s:%d", hostname, thriftPort)))
+                .withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
+                .buildKeyspace(ThriftFamilyFactory.getInstance());
+
+        context.start();
+        return context;
+    }
+    
+    public AstyanaxContext<Cluster> newAstyanaxContextForCluster() {
+        AstyanaxContext<Cluster> context = new AstyanaxContext.Builder()
+                .forCluster(clusterName)
+                .withAstyanaxConfiguration(new AstyanaxConfigurationImpl()
+                        .setDiscoveryType(NodeDiscoveryType.NONE))
+                .withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("BrooklynPool")
+                        .setPort(thriftPort)
+                        .setMaxConnsPerHost(1)
+                        .setConnectTimeout(5000) // 10s
+                        .setSeeds(String.format("%s:%d", hostname, thriftPort)))
+                .withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
+                .buildCluster(ThriftFamilyFactory.getInstance());
+
+        context.start();
+        return context;
+    }
+    
+    public static class AstyanaxSample extends AstyanaxSupport {
+        
+        public static class Builder {
+            protected CassandraNode node;
+            protected String clusterName;
+            protected String hostname;
+            protected Integer thriftPort;
+            protected String columnFamilyName = Identifiers.makeRandomId(8);
+            
+            public Builder node(CassandraNode val) {
+                this.node = val;
+                clusterName = node.getClusterName();
+                hostname = node.getAttribute(Attributes.HOSTNAME);
+                thriftPort = node.getThriftPort();
+                return this;
+            }
+            public Builder host(String clusterName, String hostname, int thriftPort) {
+                this.clusterName = clusterName;
+                this.hostname = hostname;
+                this.thriftPort = thriftPort;
+                return this;
+            }
+            public Builder columnFamilyName(String val) {
+                this.columnFamilyName = val;
+                return this;
+            }
+            public AstyanaxSample build() {
+                return new AstyanaxSample(this);
+            }
+        }
+        
+        public static Builder builder() {
+            return new Builder();
+        }
+        
+        public final String columnFamilyName;
+        public final ColumnFamily<String, String> sampleColumnFamily;
+
+        public AstyanaxSample(CassandraNode node) {
+            this(builder().node(node));
+        }
+
+        public AstyanaxSample(String clusterName, String hostname, int thriftPort) {
+            this(builder().host(clusterName, hostname, thriftPort));
+        }
+
+        protected AstyanaxSample(Builder builder) {
+            super(builder.clusterName, builder.hostname, builder.thriftPort);
+            columnFamilyName = checkNotNull(builder.columnFamilyName, "columnFamilyName");
+            sampleColumnFamily = new ColumnFamily<String, String>(
+                    columnFamilyName, // Column Family Name
+                    StringSerializer.get(), // Key Serializer
+                    StringSerializer.get()); // Column Serializer
+        }
+
+        /**
+         * Exercise the {@link CassandraNode} using the Astyanax API.
+         */
+        public void astyanaxTest() throws Exception {
+            String keyspaceName = "BrooklynTests_"+Identifiers.makeRandomId(8);
+            writeData(keyspaceName);
+            readData(keyspaceName);
+        }
+
+        /**
+         * Write to a {@link CassandraNode} using the Astyanax API.
+         * @throws ConnectionException 
+         */
+        public void writeData(String keyspaceName) throws ConnectionException {
+            // Create context
+            AstyanaxContext<Keyspace> context = newAstyanaxContextForKeyspace(keyspaceName);
+            try {
+                Keyspace keyspace = context.getEntity();
+                try {
+                    checkNull(keyspace.describeKeyspace().getColumnFamily(columnFamilyName), "key space for column family "+columnFamilyName);
+                } catch (Exception ek) {
+                    // (Re) Create keyspace if needed (including if family name already existed, 
+                    // e.g. due to a timeout on previous attempt)
+                    log.debug("repairing Cassandra error by re-creating keyspace "+keyspace+": "+ek);
+                    try {
+                        log.debug("dropping Cassandra keyspace "+keyspace);
+                        keyspace.dropKeyspace();
+                    } catch (Exception e) {
+                        /* Ignore */ 
+                        log.debug("Cassandra keyspace "+keyspace+" could not be dropped (probably did not exist): "+e);
+                    }
+                    try {
+                        keyspace.createKeyspace(ImmutableMap.<String, Object>builder()
+                                .put("strategy_options", ImmutableMap.<String, Object>of("replication_factor", "1"))
+                                .put("strategy_class", "SimpleStrategy")
+                                .build());
+                    } catch (SchemaDisagreementException e) {
+                        // discussion (but not terribly helpful) at http://stackoverflow.com/questions/6770894/schemadisagreementexception
+                        // let's just try again after a delay
+                        // (seems to have no effect; trying to fix by starting first node before others)
+                        log.warn("error creating Cassandra keyspace "+keyspace+" (retrying): "+e);
+                        Time.sleep(Duration.FIVE_SECONDS);
+                        keyspace.createKeyspace(ImmutableMap.<String, Object>builder()
+                                .put("strategy_options", ImmutableMap.<String, Object>of("replication_factor", "1"))
+                                .put("strategy_class", "SimpleStrategy")
+                                .build());
+                    }
+                }
+                
+                assertNull(keyspace.describeKeyspace().getColumnFamily("Rabbits"), "key space for arbitrary column family Rabbits");
+                assertNull(keyspace.describeKeyspace().getColumnFamily(columnFamilyName), "key space for column family "+columnFamilyName);
+
+                // Create column family
+                keyspace.createColumnFamily(sampleColumnFamily, null);
+
+                // Insert rows
+                MutationBatch m = keyspace.prepareMutationBatch();
+                m.withRow(sampleColumnFamily, "one")
+                        .putColumn("name", "Alice", null)
+                        .putColumn("company", "Cloudsoft Corp", null);
+                m.withRow(sampleColumnFamily, "two")
+                        .putColumn("name", "Bob", null)
+                        .putColumn("company", "Cloudsoft Corp", null)
+                        .putColumn("pet", "Cat", null);
+
+                OperationResult<Void> insert = m.execute();
+                assertEquals(insert.getHost().getHostName(), hostname);
+                assertTrue(insert.getLatency() > 0L);
+            } finally {
+                context.shutdown();
+            }
+        }
+
+        /**
+         * Read from a {@link CassandraNode} using the Astyanax API.
+         * @throws ConnectionException 
+         */
+        public void readData(String keyspaceName) throws ConnectionException {
+            // Create context
+            AstyanaxContext<Keyspace> context = newAstyanaxContextForKeyspace(keyspaceName);
+            try {
+                Keyspace keyspace = context.getEntity();
+
+                // Query data
+                OperationResult<ColumnList<String>> query = keyspace.prepareQuery(sampleColumnFamily)
+                        .getKey("one")
+                        .execute();
+                assertEquals(query.getHost().getHostName(), hostname);
+                assertTrue(query.getLatency() > 0L);
+
+                ColumnList<String> columns = query.getResult();
+                assertEquals(columns.size(), 2);
+
+                // Lookup columns in response by name
+                String name = columns.getColumnByName("name").getStringValue();
+                assertEquals(name, "Alice");
+
+                // Iterate through the columns
+                for (Column<String> c : columns) {
+                    assertTrue(ImmutableList.of("name", "company").contains(c.getName()));
+                }
+            } finally {
+                context.shutdown();
+            }
+        }
+        
+
+        /**
+         * Returns the keyspace name to which the data has been written. If it fails the first time,
+         * then will increment the keyspace name. This is because the failure could be a response timeout,
+         * where the keyspace really has been created so subsequent attempts with the same name will 
+         * fail (because we assert that the keyspace did not exist).
+         */
+        public String writeData(String keyspacePrefix, int numRetries) throws ConnectionException {
+            int retryCount = 0;
+            while (true) {
+                try {
+                    String keyspaceName = keyspacePrefix + (retryCount > 0 ? "" : "_"+retryCount);
+                    writeData(keyspaceName);
+                    return keyspaceName;
+                } catch (Exception e) {
+                    log.warn("Error writing data - attempt "+(retryCount+1)+" of "+(numRetries+1)+": "+e, e);
+                    if (++retryCount > numRetries)
+                        throw Exceptions.propagate(e);
+                }
+            }
+        }
+
+        /**
+         * Repeatedly tries to read data from the given keyspace name. Asserts that the data is the
+         * same as would be written by calling {@code writeData(keyspaceName)}.
+         */
+        public void readData(String keyspaceName, int numRetries) throws ConnectionException {
+            int retryCount = 0;
+            while (true) {
+                try {
+                    readData(keyspaceName);
+                    return;
+                } catch (Exception e) {
+                    log.warn("Error reading data - attempt "+(retryCount+1)+" of "+(numRetries+1)+": "+e, e);
+                    if (++retryCount > numRetries)
+                        throw Exceptions.propagate(e);
+                }
+            }
+        }
+
+        /**
+         * Like {@link Assert#assertNull(Object, String)}, except throws IllegalStateException instead
+         */
+        private void checkNull(Object obj, String msg) {
+            if (obj != null) {
+                throw new IllegalStateException("Not null: "+msg+"; obj="+obj);
+            }
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        AstyanaxSample support = new AstyanaxSample("ignored", "ec2-79-125-32-2.eu-west-1.compute.amazonaws.com", 9160);
+        AstyanaxContext<Cluster> context = support.newAstyanaxContextForCluster();
+        try {
+            System.out.println(context.getEntity().describeSchemaVersions());
+        } finally {
+            context.shutdown();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java
new file mode 100644
index 0000000..ddd6243
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterIntegrationTest.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import java.math.BigInteger;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.apache.brooklyn.entity.nosql.cassandra.TokenGenerators.PosNeg63TokenGenerator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.test.Asserts;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+
+/**
+ * An integration test of the {@link CassandraDatacenter} entity.
+ *
+ * Tests that a one node cluster can be started on localhost and data can be written/read, using the Astyanax API.
+ * 
+ * NOTE: If these tests fail with "Timeout waiting for SERVICE_UP" and "java.lang.IllegalStateException: Unable to contact any seeds!" 
+ * or "java.lang.RuntimeException: Unable to gossip with any seeds" appears in the log, it may be that the broadcast_address 
+ * (set to InetAddress.getLocalHost().getHostName()) is not resolving to the value specified in listen_address 
+ * (InetAddress.getLocalHost().getHostAddress()). You can work round this issue by ensuring that you machine has only one 
+ * address, e.g. by disabling wireless if you are also using a wired connection
+ */
+public class CassandraDatacenterIntegrationTest extends BrooklynAppLiveTestSupport {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterIntegrationTest.class);
+
+    protected Location testLocation;
+    protected CassandraDatacenter cluster;
+
+    @BeforeMethod(alwaysRun = true)
+    @Override
+    public void setUp() throws Exception {
+        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
+        super.setUp();
+        testLocation = app.newLocalhostProvisioningLocation();
+    }
+
+    @AfterMethod(alwaysRun=true)
+    @Override
+    public void tearDown() throws Exception {
+        super.tearDown();
+        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
+    }
+    
+
+    @Test(groups = "Integration")
+    public void testStartAndShutdownClusterSizeOne() throws Exception {
+        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
+                .configure("initialSize", 1)
+                .configure("tokenShift", 42);
+        runStartAndShutdownClusterSizeOne(spec, true);
+    }
+    
+    /**
+     * Cassandra v2 needs Java >= 1.7. If you have java 6 as the defult locally, then you can use
+     * something like {@code .configure("shell.env", MutableMap.of("JAVA_HOME", "/Library/Java/JavaVirtualMachines/jdk1.7.0_51.jdk/Contents/Home"))}
+     */
+    @Test(groups = "Integration")
+    public void testStartAndShutdownClusterSizeOneCassandraVersion2() throws Exception {
+        String version = "2.0.9";
+        
+        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
+                .configure(CassandraNode.SUGGESTED_VERSION, version)
+                .configure("initialSize", 1);
+        runStartAndShutdownClusterSizeOne(spec, false);
+    }
+    
+    /**
+     * Test that a single node cluster starts up and allows access via the Astyanax API.
+     * Only one node because Cassandra can only run one node per VM!
+     */
+    protected void runStartAndShutdownClusterSizeOne(EntitySpec<CassandraDatacenter> datacenterSpec, final boolean assertToken) throws Exception {
+        cluster = app.createAndManageChild(datacenterSpec);
+        assertEquals(cluster.getCurrentSize().intValue(), 0);
+
+        app.start(ImmutableList.of(testLocation));
+        Entities.dumpInfo(app);
+        
+        final CassandraNode node = (CassandraNode) Iterables.get(cluster.getMembers(), 0);
+        String nodeAddr = checkNotNull(node.getAttribute(CassandraNode.HOSTNAME), "hostname") + ":" + checkNotNull(node.getAttribute(CassandraNode.THRIFT_PORT), "thriftPort");
+        
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.GROUP_SIZE, 1);
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CASSANDRA_CLUSTER_NODES, ImmutableList.of(nodeAddr));
+
+        EntityTestUtils.assertAttributeEqualsEventually(node, Startable.SERVICE_UP, true);
+        if (assertToken) {
+            PosNeg63TokenGenerator tg = new PosNeg63TokenGenerator();
+            tg.growingCluster(1);
+            EntityTestUtils.assertAttributeEqualsEventually(node, CassandraNode.TOKEN, tg.newToken().add(BigInteger.valueOf(42)));
+        }
+
+        // may take some time to be consistent (with new thrift_latency checks on the node,
+        // contactability should not be an issue, but consistency still might be)
+        Asserts.succeedsEventually(MutableMap.of("timeout", 120*1000), new Runnable() {
+            public void run() {
+                boolean open = CassandraDatacenterLiveTest.isSocketOpen(node);
+                Boolean consistant = open ? CassandraDatacenterLiveTest.areVersionsConsistent(node) : null;
+                Integer numPeers = node.getAttribute(CassandraNode.PEERS);
+                Integer liveNodeCount = node.getAttribute(CassandraNode.LIVE_NODE_COUNT);
+                String msg = "consistency:  "
+                        + (!open ? "unreachable" : consistant==null ? "error" : consistant)+"; "
+                        + "peer group sizes: "+numPeers + "; live node count: " + liveNodeCount;
+                assertTrue(open, msg);
+                assertEquals(consistant, Boolean.TRUE, msg);
+                if (assertToken) {
+                    assertEquals(numPeers, (Integer)1, msg);
+                } else {
+                    assertTrue(numPeers != null && numPeers >= 1, msg);
+                }
+                assertEquals(liveNodeCount, (Integer)1, msg);
+            }});
+        
+        CassandraDatacenterLiveTest.checkConnectionRepeatedly(2, 5, ImmutableList.of(node));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java
new file mode 100644
index 0000000..d29bc1a
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterLiveTest.java
@@ -0,0 +1,310 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+import static org.testng.Assert.assertTrue;
+
+import java.math.BigInteger;
+import java.net.Socket;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.apache.brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.Assert;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.test.Asserts;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.text.Identifiers;
+import brooklyn.util.time.Duration;
+import brooklyn.util.time.Time;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.netflix.astyanax.AstyanaxContext;
+import com.netflix.astyanax.Cluster;
+import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
+
+/**
+ * A live test of the {@link CassandraDatacenter} entity.
+ *
+ * Tests that a two node cluster can be started on Amazon EC2 and data written on one {@link CassandraNode}
+ * can be read from another, using the Astyanax API.
+ */
+public class CassandraDatacenterLiveTest extends BrooklynAppLiveTestSupport {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterLiveTest.class);
+    
+    private String provider = 
+            "aws-ec2:eu-west-1";
+//            "rackspace-cloudservers-uk";
+//            "named:hpcloud-compute-at";
+//            "localhost";
+//            "jcloudsByon:(provider=\"aws-ec2\",region=\"us-east-1\",user=\"aled\",hosts=\"i-6f374743,i-35324219,i-1135453d\")";
+
+    protected Location testLocation;
+    protected CassandraDatacenter cluster;
+
+    @BeforeMethod(alwaysRun = true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        testLocation = mgmt.getLocationRegistry().resolve(provider);
+    }
+
+    @AfterMethod(alwaysRun=true)
+    @Override
+    public void tearDown() throws Exception {
+        super.tearDown();
+    }
+    
+    @Test(groups = "Live")
+    public void testDatacenter() throws Exception {
+        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
+                .configure("initialSize", 2)
+                .configure("clusterName", "CassandraClusterLiveTest");
+        runCluster(spec, false);
+    }
+    
+    @Test(groups = "Live")
+    public void testDatacenterWithVnodes() throws Exception {
+        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
+                .configure("initialSize", 2)
+                .configure(CassandraDatacenter.USE_VNODES, true)
+                .configure("clusterName", "CassandraClusterLiveTest");
+        runCluster(spec, true);
+    }
+    
+    /*
+     * TODO on some distros (e.g. CentOS?), it comes pre-installed with java 6. Installing java 7 
+     * didn't seem to be enough. I also had to set JAVA_HOME:
+     *     .configure("shell.env", MutableMap.of("JAVA_HOME", "/etc/alternatives/java_sdk_1.7.0"))
+     * However, that would break other deployments such as on Ubuntu where JAVA_HOME would be different.
+     */
+    @Test(groups = "Live")
+    public void testDatacenterWithVnodesVersion2() throws Exception {
+        EntitySpec<CassandraDatacenter> spec = EntitySpec.create(CassandraDatacenter.class)
+                .configure("initialSize", 2)
+                .configure(CassandraNode.SUGGESTED_VERSION, "2.0.9")
+                .configure(CassandraDatacenter.USE_VNODES, true)
+                .configure("clusterName", "CassandraClusterLiveTest");
+        runCluster(spec, true);
+    }
+
+    @Test(groups = {"Live", "Acceptance"}, invocationCount=10)
+    public void testManyTimes() throws Exception {
+        testDatacenter();
+    }
+
+    /**
+     * Test a Cassandra Datacenter:
+     * <ol>
+     *   <li>Create two node datacenter
+     *   <li>Confirm allows access via the Astyanax API through both nodes.
+     *   <li>Confirm can size
+     * </ol>
+     */
+    protected void runCluster(EntitySpec<CassandraDatacenter> datacenterSpec, boolean usesVnodes) throws Exception {
+        cluster = app.createAndManageChild(datacenterSpec);
+        assertEquals(cluster.getCurrentSize().intValue(), 0);
+
+        app.start(ImmutableList.of(testLocation));
+
+        // Check cluster is up and healthy
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.GROUP_SIZE, 2);
+        Entities.dumpInfo(app);
+        List<CassandraNode> members = castToCassandraNodes(cluster.getMembers());
+        assertNodesConsistent(members);
+
+        if (usesVnodes) {
+            assertVnodeTokensConsistent(members);
+        } else {
+            assertSingleTokenConsistent(members);
+        }
+        
+        // Can connect via Astyanax
+        checkConnectionRepeatedly(2, 5, members);
+
+        // Resize
+        cluster.resize(3);
+        assertEquals(cluster.getMembers().size(), 3, "members="+cluster.getMembers());
+        if (usesVnodes) {
+            assertVnodeTokensConsistent(castToCassandraNodes(cluster.getMembers()));
+        } else {
+            assertSingleTokenConsistent(castToCassandraNodes(cluster.getMembers()));
+        }
+        checkConnectionRepeatedly(2, 5, cluster.getMembers());
+    }
+
+    protected static List<CassandraNode> castToCassandraNodes(Collection<? extends Entity> rawnodes) {
+        final List<CassandraNode> nodes = Lists.newArrayList();
+        for (Entity node : rawnodes) {
+            nodes.add((CassandraNode) node);
+        }
+        return nodes;
+    }
+
+    protected static void assertNodesConsistent(final List<CassandraNode> nodes) {
+        final Integer expectedLiveNodeCount = nodes.size();
+        // may take some time to be consistent (with new thrift_latency checks on the node,
+        // contactability should not be an issue, but consistency still might be)
+        Asserts.succeedsEventually(MutableMap.of("timeout", Duration.TWO_MINUTES), new Runnable() {
+            public void run() {
+                for (Entity n : nodes) {
+                    CassandraNode node = (CassandraNode) n;
+                    EntityTestUtils.assertAttributeEquals(node, Startable.SERVICE_UP, true);
+                    String errmsg = "node="+node+"; hostname="+node.getAttribute(Attributes.HOSTNAME)+"; port="+node.getThriftPort();
+                    assertTrue(isSocketOpen(node), errmsg);
+                    assertTrue(areVersionsConsistent(node), errmsg);
+                    EntityTestUtils.assertAttributeEquals(node, CassandraNode.LIVE_NODE_COUNT, expectedLiveNodeCount);
+                }
+            }});
+    }
+    
+    protected static void assertSingleTokenConsistent(final List<CassandraNode> nodes) {
+        final int numNodes = nodes.size();
+        Asserts.succeedsEventually(MutableMap.of("timeout", Duration.TWO_MINUTES), new Runnable() {
+            public void run() {
+                Set<BigInteger> alltokens = Sets.newLinkedHashSet();
+                for (Entity node : nodes) {
+                    EntityTestUtils.assertAttributeEquals(node, Startable.SERVICE_UP, true);
+                    EntityTestUtils.assertConfigEquals(node, CassandraNode.NUM_TOKENS_PER_NODE, 1);
+                    EntityTestUtils.assertAttributeEquals(node, CassandraNode.PEERS, numNodes);
+                    BigInteger token = node.getAttribute(CassandraNode.TOKEN);
+                    Set<BigInteger> tokens = node.getAttribute(CassandraNode.TOKENS);
+                    assertNotNull(token);
+                    assertEquals(tokens, ImmutableSet.of(token));
+                    alltokens.addAll(tokens);
+                }
+                assertEquals(alltokens.size(), numNodes);
+            }});
+    }
+
+    protected static void assertVnodeTokensConsistent(final List<CassandraNode> nodes) {
+        final int numNodes = nodes.size();
+        final int tokensPerNode = Iterables.get(nodes, 0).getNumTokensPerNode();
+        
+        Asserts.succeedsEventually(MutableMap.of("timeout", Duration.TWO_MINUTES), new Runnable() {
+            public void run() {
+                Set<BigInteger> alltokens = Sets.newLinkedHashSet();
+                for (Entity node : nodes) {
+                    EntityTestUtils.assertAttributeEquals(node, Startable.SERVICE_UP, true);
+                    EntityTestUtils.assertAttributeEquals(node, CassandraNode.PEERS, tokensPerNode*numNodes);
+                    EntityTestUtils.assertConfigEquals(node, CassandraNode.NUM_TOKENS_PER_NODE, 256);
+                    BigInteger token = node.getAttribute(CassandraNode.TOKEN);
+                    Set<BigInteger> tokens = node.getAttribute(CassandraNode.TOKENS);
+                    assertNotNull(token);
+                    assertEquals(tokens.size(), tokensPerNode, "tokens="+tokens);
+                    alltokens.addAll(tokens);
+                }
+                assertEquals(alltokens.size(), tokensPerNode*numNodes);
+            }});
+    }
+
+    protected static void checkConnectionRepeatedly(int totalAttemptsAllowed, int numRetriesPerAttempt, Iterable<? extends Entity> nodes) throws Exception {
+        int attemptNum = 0;
+        while (true) {
+            try {
+                checkConnection(numRetriesPerAttempt, nodes);
+                return;
+            } catch (Exception e) {
+                attemptNum++;
+                if (attemptNum >= totalAttemptsAllowed) {
+                    log.warn("Cassandra not usable, "+attemptNum+" attempts; failing: "+e, e);
+                    throw e;                
+                }
+                log.warn("Cassandra not usable (attempt "+attemptNum+" of "+totalAttemptsAllowed+"), trying again after delay: "+e, e);
+                Time.sleep(Duration.TEN_SECONDS);
+            }
+        }
+    }
+
+    protected static void checkConnection(int numRetries, Iterable<? extends Entity> nodes) throws ConnectionException {
+        CassandraNode first = (CassandraNode) Iterables.get(nodes, 0);
+        
+        // have been seeing intermittent SchemaDisagreementException errors on AWS, probably due to Astyanax / how we are using it
+        // (confirmed that clocks are in sync)
+        String uniqueName = Identifiers.makeRandomId(8);
+        AstyanaxSample astyanaxFirst = AstyanaxSample.builder().node(first).columnFamilyName(uniqueName).build();
+        Map<String, List<String>> versions;
+        AstyanaxContext<Cluster> context = astyanaxFirst.newAstyanaxContextForCluster();
+        try {
+            versions = context.getEntity().describeSchemaVersions();
+        } finally {
+            context.shutdown();
+        }
+            
+        log.info("Cassandra schema versions are: "+versions);
+        if (versions.size() > 1) {
+            Assert.fail("Inconsistent versions on Cassandra start: "+versions);
+        }
+        String keyspacePrefix = "BrooklynTests_"+Identifiers.makeRandomId(8);
+
+        String keyspaceName = astyanaxFirst.writeData(keyspacePrefix, numRetries);
+
+        for (Entity node : nodes) {
+            AstyanaxSample astyanaxSecond = AstyanaxSample.builder().node((CassandraNode)node).columnFamilyName(uniqueName).build();
+            astyanaxSecond.readData(keyspaceName, numRetries);
+        }
+    }
+
+    protected static Boolean areVersionsConsistent(CassandraNode node) {
+        AstyanaxContext<Cluster> context = null;
+        try {
+            context = new AstyanaxSample(node).newAstyanaxContextForCluster();
+            Map<String, List<String>> v = context.getEntity().describeSchemaVersions();
+            return v.size() == 1;
+        } catch (Exception e) {
+            return null;
+        } finally {
+            if (context != null) context.shutdown();
+        }
+    }
+
+    protected static boolean isSocketOpen(CassandraNode node) {
+        try {
+            Socket s = new Socket(node.getAttribute(Attributes.HOSTNAME), node.getThriftPort());
+            s.close();
+            return true;
+        } catch (Exception e) {
+            return false;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java
new file mode 100644
index 0000000..4c2a248
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterRebindIntegrationTest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertNotNull;
+
+import java.math.BigInteger;
+import java.util.Set;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.proxy.nginx.NginxController;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.rebind.RebindOptions;
+import brooklyn.entity.rebind.RebindTestFixtureWithApp;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.base.Predicates;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+
+/**
+ * Test the operation of the {@link NginxController} class.
+ */
+public class CassandraDatacenterRebindIntegrationTest extends RebindTestFixtureWithApp {
+    private static final Logger LOG = LoggerFactory.getLogger(CassandraDatacenterRebindIntegrationTest.class);
+
+    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
+    
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
+        super.setUp();
+        localhostProvisioningLocation = origApp.newLocalhostProvisioningLocation();
+    }
+
+    @AfterMethod(alwaysRun=true)
+    @Override
+    public void tearDown() throws Exception {
+        super.tearDown();
+        CassandraNodeIntegrationTest.assertCassandraPortsAvailableEventually();
+    }
+    
+    /**
+     * Test that Brooklyn can rebind to a single node datacenter.
+     */
+    @Test(groups = "Integration")
+    public void testRebindDatacenterOfSizeOne() throws Exception {
+        CassandraDatacenter origDatacenter = origApp.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
+                .configure("initialSize", 1));
+
+        origApp.start(ImmutableList.of(localhostProvisioningLocation));
+        CassandraNode origNode = (CassandraNode) Iterables.get(origDatacenter.getMembers(), 0);
+
+        EntityTestUtils.assertAttributeEqualsEventually(origDatacenter, CassandraDatacenter.GROUP_SIZE, 1);
+        CassandraDatacenterLiveTest.assertNodesConsistent(ImmutableList.of(origNode));
+        CassandraDatacenterLiveTest.assertSingleTokenConsistent(ImmutableList.of(origNode));
+        CassandraDatacenterLiveTest.checkConnectionRepeatedly(2, 5, ImmutableList.of(origNode));
+        BigInteger origToken = origNode.getAttribute(CassandraNode.TOKEN);
+        Set<BigInteger> origTokens = origNode.getAttribute(CassandraNode.TOKENS);
+        assertNotNull(origToken);
+        
+        newApp = rebind(RebindOptions.create().terminateOrigManagementContext(true));
+        final CassandraDatacenter newDatacenter = (CassandraDatacenter) Iterables.find(newApp.getChildren(), Predicates.instanceOf(CassandraDatacenter.class));
+        final CassandraNode newNode = (CassandraNode) Iterables.find(newDatacenter.getMembers(), Predicates.instanceOf(CassandraNode.class));
+        
+        EntityTestUtils.assertAttributeEqualsEventually(newDatacenter, CassandraDatacenter.GROUP_SIZE, 1);
+        EntityTestUtils.assertAttributeEqualsEventually(newNode, Startable.SERVICE_UP, true);
+        EntityTestUtils.assertAttributeEqualsEventually(newNode, CassandraNode.TOKEN, origToken);
+        EntityTestUtils.assertAttributeEqualsEventually(newNode, CassandraNode.TOKENS, origTokens);
+        CassandraDatacenterLiveTest.assertNodesConsistent(ImmutableList.of(newNode));
+        CassandraDatacenterLiveTest.assertSingleTokenConsistent(ImmutableList.of(newNode));
+        CassandraDatacenterLiveTest.checkConnectionRepeatedly(2, 5, ImmutableList.of(newNode));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java
new file mode 100644
index 0000000..3a1d202
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterTest.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertEquals;
+
+import java.math.BigInteger;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.Assert;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppUnitTestSupport;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.EmptySoftwareProcess;
+import brooklyn.entity.basic.EmptySoftwareProcessSshDriver;
+import brooklyn.entity.basic.EntityInternal;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.LocationSpec;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.ResourceUtils;
+import brooklyn.util.javalang.JavaClassNames;
+import brooklyn.util.text.TemplateProcessor;
+import brooklyn.util.time.Duration;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+
+public class CassandraDatacenterTest extends BrooklynAppUnitTestSupport {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterTest.class);
+    
+    private LocalhostMachineProvisioningLocation loc;
+    private CassandraDatacenter cluster;
+    
+    @BeforeMethod(alwaysRun=true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        loc = mgmt.getLocationManager().createLocation(LocationSpec.create(LocalhostMachineProvisioningLocation.class));
+    }
+    
+    @Test
+    public void testPopulatesInitialSeeds() throws Exception {
+        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
+                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
+                .configure(CassandraDatacenter.TOKEN_SHIFT, BigInteger.ZERO)
+                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
+                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
+
+        app.start(ImmutableList.of(loc));
+        EmptySoftwareProcess e1 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 0);
+        EmptySoftwareProcess e2 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 1);
+        
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e1, e2));
+    }
+    
+    @Test(groups="Integration") // because takes approx 2 seconds
+    public void testUpdatesSeedsOnFailuresAndAdditions() throws Exception {
+        doTestUpdatesSeedsOnFailuresAndAdditions(true, false);
+    }
+    
+    protected void doTestUpdatesSeedsOnFailuresAndAdditions(boolean fast, boolean checkSeedsConstantOnRejoining) throws Exception {
+        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
+                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
+                .configure(CassandraDatacenter.TOKEN_SHIFT, BigInteger.ZERO)
+                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
+                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
+
+        app.start(ImmutableList.of(loc));
+        EmptySoftwareProcess e1 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 0);
+        EmptySoftwareProcess e2 = (EmptySoftwareProcess) Iterables.get(cluster.getMembers(), 1);
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e1, e2));
+        log.debug("Test "+JavaClassNames.niceClassAndMethod()+", cluster "+cluster+" has "+cluster.getMembers()+"; e1="+e1+" e2="+e2);
+        
+        // calling the driver stop for this entity will cause SERVICE_UP to become false, and stay false
+        // (and that's all it does, incidentally); if we just set the attribute it will become true on serviceUp sensor feed
+        ((EmptySoftwareProcess)e1).getDriver().stop();
+        // not necessary, but speeds things up:
+        if (fast)
+            ((EntityInternal)e1).setAttribute(Attributes.SERVICE_UP, false);
+        
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e2));
+
+        cluster.resize(3);
+        EmptySoftwareProcess e3 = (EmptySoftwareProcess) Iterables.getOnlyElement(Sets.difference(ImmutableSet.copyOf(cluster.getMembers()), ImmutableSet.of(e1,e2)));
+        log.debug("Test "+JavaClassNames.niceClassAndMethod()+", cluster "+cluster+" has "+cluster.getMembers()+"; e3="+e3);
+        try {
+            EntityTestUtils.assertAttributeEqualsEventually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e2, e3));
+        } finally {
+            log.debug("Test "+JavaClassNames.niceClassAndMethod()+", cluster "+cluster+" has "+cluster.getMembers()+"; seeds "+cluster.getAttribute(CassandraDatacenter.CURRENT_SEEDS));
+        }
+        
+        if (!checkSeedsConstantOnRejoining) {
+            // cluster should not revert to e1+e2, simply because e1 has come back; but e1 should rejoin the group
+            // (not that important, and waits for 1s, so only done as part of integration)
+            ((EmptySoftwareProcessSshDriver)(((EmptySoftwareProcess)e1).getDriver())).launch();
+            if (fast)
+                ((EntityInternal)e1).setAttribute(Attributes.SERVICE_UP, true);
+            EntityTestUtils.assertAttributeEqualsEventually(e1, CassandraNode.SERVICE_UP, true);
+            EntityTestUtils.assertAttributeEqualsContinually(cluster, CassandraDatacenter.CURRENT_SEEDS, ImmutableSet.<Entity>of(e2, e3));
+        }
+    }
+    
+    @Test
+    public void testPopulatesInitialTokens() throws Exception {
+        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
+                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
+                .configure(CassandraDatacenter.TOKEN_SHIFT, BigInteger.ZERO)
+                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
+                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
+
+        app.start(ImmutableList.of(loc));
+
+        Set<BigInteger> tokens = Sets.newLinkedHashSet();
+        Set<BigInteger> tokens2 = Sets.newLinkedHashSet();
+        for (Entity member : cluster.getMembers()) {
+            BigInteger memberToken = member.getConfig(CassandraNode.TOKEN);
+            Set<BigInteger > memberTokens = member.getConfig(CassandraNode.TOKENS);
+            if (memberToken != null) tokens.add(memberToken);
+            if (memberTokens != null) tokens2.addAll(memberTokens);
+        }
+        assertEquals(tokens, ImmutableSet.of(new BigInteger("-9223372036854775808"), BigInteger.ZERO));
+        assertEquals(tokens2, ImmutableSet.of());
+    }
+    
+    @Test
+    public void testDoesNotPopulateInitialTokens() throws Exception {
+        cluster = app.createAndManageChild(EntitySpec.create(CassandraDatacenter.class)
+                .configure(CassandraDatacenter.INITIAL_SIZE, 2)
+                .configure(CassandraDatacenter.USE_VNODES, true)
+                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
+                .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class)));
+
+        app.start(ImmutableList.of(loc));
+
+        Set<BigInteger> tokens = Sets.newLinkedHashSet();
+        Set<BigInteger> tokens2 = Sets.newLinkedHashSet();
+        for (Entity member : cluster.getMembers()) {
+            BigInteger memberToken = member.getConfig(CassandraNode.TOKEN);
+            Set<BigInteger > memberTokens = member.getConfig(CassandraNode.TOKENS);
+            if (memberToken != null) tokens.add(memberToken);
+            if (memberTokens != null) tokens2.addAll(memberTokens);
+        }
+        assertEquals(tokens, ImmutableSet.of());
+        assertEquals(tokens2, ImmutableSet.of());
+    }
+    
+    public static class MockInputForTemplate {
+        public BigInteger getToken() { return new BigInteger("-9223372036854775808"); }
+        public String getTokensAsString() { return "" + getToken(); }
+        public int getNumTokensPerNode() { return 1; }
+        public String getSeeds() { return ""; }
+        public int getGossipPort() { return 1234; }
+        public int getSslGossipPort() { return 1234; }
+        public int getThriftPort() { return 1234; }
+        public int getNativeTransportPort() { return 1234; }
+        public String getClusterName() { return "Mock"; }
+        public String getEndpointSnitchName() { return ""; }
+        public String getListenAddress() { return "0"; }
+        public String getBroadcastAddress() { return "0"; }
+        public String getRpcAddress() { return "0"; }
+        public String getRunDir() { return "/tmp/mock"; }
+    }
+    
+    @Test
+    public void testBigIntegerFormattedCorrectly() {
+        Map<String, Object> substitutions = ImmutableMap.<String, Object>builder()
+                .put("entity", new MockInputForTemplate())
+                .put("driver", new MockInputForTemplate())
+                .build();
+
+        String templatedUrl = CassandraNode.CASSANDRA_CONFIG_TEMPLATE_URL.getDefaultValue();
+        String url = TemplateProcessor.processTemplateContents(templatedUrl, ImmutableMap.of("entity", ImmutableMap.of("majorMinorVersion", "1.2")));
+        String templateContents = new ResourceUtils(this).getResourceAsString(url);
+        String processedTemplate = TemplateProcessor.processTemplateContents(templateContents, substitutions);
+        Assert.assertEquals(processedTemplate.indexOf("775,808"), -1);
+        Assert.assertTrue(processedTemplate.indexOf("-9223372036854775808") > 0);
+    }
+    
+    @Test(groups="Integration") // because takes approx 30 seconds
+    public void testUpdatesSeedsFastishManyTimes() throws Exception {
+        final int COUNT = 20;
+        for (int i=0; i<COUNT; i++) {
+            log.info("Test "+JavaClassNames.niceClassAndMethod()+", iteration "+(i+1)+" of "+COUNT);
+            try {
+                doTestUpdatesSeedsOnFailuresAndAdditions(true, true);
+                tearDown();
+                setUp();
+            } catch (Exception e) {
+                log.warn("Error in "+JavaClassNames.niceClassAndMethod()+", iteration "+(i+1)+" of "+COUNT, e);
+                throw e;
+            }
+        }
+    }
+    
+    @Test(groups="Integration") // because takes approx 5 seconds
+    public void testUpdateSeedsSlowAndRejoining() throws Exception {
+        final int COUNT = 1;
+        for (int i=0; i<COUNT; i++) {
+            log.info("Test "+JavaClassNames.niceClassAndMethod()+", iteration "+(i+1)+" of "+COUNT);
+            doTestUpdatesSeedsOnFailuresAndAdditions(false, true);
+            tearDown();
+            setUp();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java
new file mode 100644
index 0000000..cbf55ed
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricTest.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertEquals;
+
+import java.util.Collection;
+import java.util.Set;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraFabric;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppUnitTestSupport;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.AbstractEntity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.EmptySoftwareProcess;
+import brooklyn.entity.basic.EntityInternal;
+import brooklyn.entity.basic.EntityLocal;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.basic.ServiceStateLogic;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.location.LocationSpec;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+
+public class CassandraFabricTest extends BrooklynAppUnitTestSupport {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraFabricTest.class);
+    
+    private LocalhostMachineProvisioningLocation loc1;
+    private LocalhostMachineProvisioningLocation loc2;
+    private CassandraFabric fabric;
+    
+    @BeforeMethod(alwaysRun=true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        loc1 = mgmt.getLocationManager().createLocation(LocationSpec.create(LocalhostMachineProvisioningLocation.class));
+        loc2 = mgmt.getLocationManager().createLocation(LocationSpec.create(LocalhostMachineProvisioningLocation.class));
+    }
+    
+    @Test
+    public void testPopulatesInitialSeeds() throws Exception {
+        fabric = app.createAndManageChild(EntitySpec.create(CassandraFabric.class)
+                .configure(CassandraFabric.INITIAL_QUORUM_SIZE, 2)
+                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
+                .configure(CassandraFabric.MEMBER_SPEC, EntitySpec.create(CassandraDatacenter.class)
+                        .configure(CassandraDatacenter.INITIAL_SIZE, 2)
+                        .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(EmptySoftwareProcess.class))));
+
+        app.start(ImmutableList.of(loc1, loc2));
+        CassandraDatacenter d1 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 0);
+        CassandraDatacenter d2 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 1);
+
+        final EmptySoftwareProcess d1a = (EmptySoftwareProcess) Iterables.get(d1.getMembers(), 0);
+        final EmptySoftwareProcess d1b = (EmptySoftwareProcess) Iterables.get(d1.getMembers(), 1);
+
+        final EmptySoftwareProcess d2a = (EmptySoftwareProcess) Iterables.get(d2.getMembers(), 0);
+        final EmptySoftwareProcess d2b = (EmptySoftwareProcess) Iterables.get(d2.getMembers(), 1);
+
+        Predicate<Set<Entity>> predicate = new Predicate<Set<Entity>>() {
+            @Override public boolean apply(Set<Entity> input) {
+                return input != null && input.size() >= 2 &&
+                        Sets.intersection(input, ImmutableSet.of(d1a, d1b)).size() == 1 &&
+                        Sets.intersection(input, ImmutableSet.of(d2a, d2b)).size() == 1;
+            }
+        };
+        EntityTestUtils.assertAttributeEventually(fabric, CassandraFabric.CURRENT_SEEDS, predicate);
+        EntityTestUtils.assertAttributeEventually(d1, CassandraDatacenter.CURRENT_SEEDS, predicate);
+        EntityTestUtils.assertAttributeEventually(d2, CassandraDatacenter.CURRENT_SEEDS, predicate);
+        
+        Set<Entity> seeds = fabric.getAttribute(CassandraFabric.CURRENT_SEEDS);
+        assertEquals(d1.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
+        assertEquals(d2.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
+        log.info("Seeds="+seeds);
+    }
+
+    @Test
+    public void testPopulatesInitialSeedsWhenNodesOfOneClusterComeUpBeforeTheOtherCluster() throws Exception {
+        fabric = app.createAndManageChild(EntitySpec.create(CassandraFabric.class)
+                .configure(CassandraFabric.INITIAL_QUORUM_SIZE, 2)
+                .configure(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER, Duration.ZERO)
+                .configure(CassandraFabric.MEMBER_SPEC, EntitySpec.create(CassandraDatacenter.class)
+                        .configure(CassandraDatacenter.INITIAL_SIZE, 2)
+                        .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(DummyCassandraNode.class))));
+
+        Thread t = new Thread() {
+            public void run() {
+                app.start(ImmutableList.of(loc1, loc2));
+            }
+        };
+        t.start();
+        try {
+            EntityTestUtils.assertGroupSizeEqualsEventually(fabric, 2);
+            CassandraDatacenter d1 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 0);
+            CassandraDatacenter d2 = (CassandraDatacenter) Iterables.get(fabric.getMembers(), 1);
+    
+            EntityTestUtils.assertGroupSizeEqualsEventually(d1, 2);
+            final DummyCassandraNode d1a = (DummyCassandraNode) Iterables.get(d1.getMembers(), 0);
+            final DummyCassandraNode d1b = (DummyCassandraNode) Iterables.get(d1.getMembers(), 1);
+    
+            EntityTestUtils.assertGroupSizeEqualsEventually(d2, 2);
+            final DummyCassandraNode d2a = (DummyCassandraNode) Iterables.get(d2.getMembers(), 0);
+            final DummyCassandraNode d2b = (DummyCassandraNode) Iterables.get(d2.getMembers(), 1);
+
+            d1a.setAttribute(Attributes.HOSTNAME, "d1a");
+            d1b.setAttribute(Attributes.HOSTNAME, "d1b");
+            
+            Thread.sleep(1000);
+            d2a.setAttribute(Attributes.HOSTNAME, "d2a");
+            d2b.setAttribute(Attributes.HOSTNAME, "d2b");
+            
+            Predicate<Set<Entity>> predicate = new Predicate<Set<Entity>>() {
+                @Override public boolean apply(Set<Entity> input) {
+                    return input != null && input.size() >= 2 &&
+                            Sets.intersection(input, ImmutableSet.of(d1a, d1b)).size() == 1 &&
+                            Sets.intersection(input, ImmutableSet.of(d2a, d2b)).size() == 1;
+                }
+            };
+            EntityTestUtils.assertAttributeEventually(fabric, CassandraFabric.CURRENT_SEEDS, predicate);
+            EntityTestUtils.assertAttributeEventually(d1, CassandraDatacenter.CURRENT_SEEDS, predicate);
+            EntityTestUtils.assertAttributeEventually(d2, CassandraDatacenter.CURRENT_SEEDS, predicate);
+            
+            Set<Entity> seeds = fabric.getAttribute(CassandraFabric.CURRENT_SEEDS);
+            assertEquals(d1.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
+            assertEquals(d2.getAttribute(CassandraDatacenter.CURRENT_SEEDS), seeds);
+            log.info("Seeds="+seeds);
+        } finally {
+            log.info("Failed seeds; fabric="+fabric.getAttribute(CassandraFabric.CURRENT_SEEDS));
+            t.interrupt();
+        }
+    }
+    
+    
+    @ImplementedBy(DummyCassandraNodeImpl.class)
+    public interface DummyCassandraNode extends Entity, Startable, EntityLocal, EntityInternal {
+    }
+    
+    public static class DummyCassandraNodeImpl extends AbstractEntity implements DummyCassandraNode {
+
+        @Override
+        public void start(Collection<? extends Location> locations) {
+            ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
+        }
+
+        @Override
+        public void stop() {
+            ServiceStateLogic.setExpectedState(this, Lifecycle.STOPPING);
+        }
+
+        @Override
+        public void restart() {
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java
new file mode 100644
index 0000000..495843f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.apache.brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.collect.ImmutableList;
+
+public class CassandraNodeEc2LiveTest extends AbstractEc2LiveTest {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraNodeEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        log.info("Testing Cassandra on {}", loc);
+
+        CassandraNode cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
+                .configure("thriftPort", "9876+")
+                .configure("clusterName", "TestCluster"));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, CassandraNode.SERVICE_UP, true);
+
+        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
+        astyanax.astyanaxTest();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java
new file mode 100644
index 0000000..b5a657f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+import static org.testng.Assert.assertTrue;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.apache.brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.test.Asserts;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.NetworkingTestUtils;
+import brooklyn.util.math.MathPredicates;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+
+/**
+ * Cassandra integration tests.
+ *
+ * Test the operation of the {@link CassandraNode} class.
+ */
+public class CassandraNodeIntegrationTest extends AbstractCassandraNodeTest {
+
+    private static final Logger LOG = LoggerFactory.getLogger(CassandraNodeIntegrationTest.class);
+
+    public static void assertCassandraPortsAvailableEventually() {
+        Map<String, Integer> ports = getCassandraDefaultPorts();
+        NetworkingTestUtils.assertPortsAvailableEventually(ports);
+        LOG.info("Confirmed Cassandra ports are available: "+ports);
+    }
+    
+    public static Map<String, Integer> getCassandraDefaultPorts() {
+        List<PortAttributeSensorAndConfigKey> ports = ImmutableList.of(
+                CassandraNode.GOSSIP_PORT, 
+                CassandraNode.SSL_GOSSIP_PORT, 
+                CassandraNode.THRIFT_PORT, 
+                CassandraNode.NATIVE_TRANSPORT_PORT, 
+                CassandraNode.RMI_REGISTRY_PORT);
+        Map<String, Integer> result = Maps.newLinkedHashMap();
+        for (PortAttributeSensorAndConfigKey key : ports) {
+            result.put(key.getName(), key.getConfigKey().getDefaultValue().iterator().next());
+        }
+        return result;
+    }
+
+    @BeforeMethod(alwaysRun = true)
+    @Override
+    public void setUp() throws Exception {
+        assertCassandraPortsAvailableEventually();
+        super.setUp();
+    }
+    
+    @AfterMethod(alwaysRun=true)
+    @Override
+    public void tearDown() throws Exception {
+        super.tearDown();
+        assertCassandraPortsAvailableEventually();
+    }
+    
+    /**
+     * Test that a node starts and sets SERVICE_UP correctly.
+     */
+    @Test(groups = "Integration")
+    public void canStartupAndShutdown() {
+        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
+                .configure("jmxPort", "11099+")
+                .configure("rmiRegistryPort", "19001+"));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, true);
+        Entities.dumpInfo(app);
+
+        cassandra.stop();
+
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, false);
+    }
+
+    /**
+     * Test that a keyspace and column family can be created and used with Astyanax client.
+     */
+    @Test(groups = "Integration")
+    public void testConnection() throws Exception {
+        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
+                .configure("jmxPort", "11099+")
+                .configure("rmiRegistryPort", "19001+")
+                .configure("thriftPort", "9876+"));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, true);
+
+        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
+        astyanax.astyanaxTest();
+    }
+    
+    /**
+     * Cassandra v2 needs Java >= 1.7. If you have java 6 as the defult locally, then you can use
+     * something like {@code .configure("shell.env", MutableMap.of("JAVA_HOME", "/Library/Java/JavaVirtualMachines/jdk1.7.0_51.jdk/Contents/Home"))}
+     */
+    @Test(groups = "Integration")
+    public void testCassandraVersion2() throws Exception {
+        // TODO In v2.0.10, the bin/cassandra script changed to add an additional check for JMX connectivity.
+        // This causes cassandera script to hang for us (presumably due to the CLASSPATH/JVM_OPTS we're passing
+        // in, regarding JMX agent).
+        // See:
+        //  - https://issues.apache.org/jira/browse/CASSANDRA-7254
+        //  - https://github.com/apache/cassandra/blame/trunk/bin/cassandra#L211-216
+        
+        String version = "2.0.9";
+        String majorMinorVersion = "2.0";
+        
+        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
+                .configure(CassandraNode.SUGGESTED_VERSION, version)
+                .configure(CassandraNode.NUM_TOKENS_PER_NODE, 256)
+                .configure("jmxPort", "11099+")
+                .configure("rmiRegistryPort", "19001+"));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, true);
+        Entities.dumpInfo(app);
+
+        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
+        astyanax.astyanaxTest();
+
+        assertEquals(cassandra.getMajorMinorVersion(), majorMinorVersion);
+        
+        Asserts.succeedsEventually(new Runnable() {
+            @Override public void run() {
+                assertNotNull(cassandra.getAttribute(CassandraNode.TOKEN));
+                assertNotNull(cassandra.getAttribute(CassandraNode.TOKENS));
+                assertEquals(cassandra.getAttribute(CassandraNode.TOKENS).size(), 256, "tokens="+cassandra.getAttribute(CassandraNode.TOKENS));
+                
+                assertEquals(cassandra.getAttribute(CassandraNode.PEERS), (Integer)256);
+                assertEquals(cassandra.getAttribute(CassandraNode.LIVE_NODE_COUNT), (Integer)1);
+        
+                assertTrue(cassandra.getAttribute(CassandraNode.SERVICE_UP_JMX));
+                assertNotNull(cassandra.getAttribute(CassandraNode.THRIFT_PORT_LATENCY));
+        
+                assertNotNull(cassandra.getAttribute(CassandraNode.READ_PENDING));
+                assertNotNull(cassandra.getAttribute(CassandraNode.READ_ACTIVE));
+                EntityTestUtils.assertAttribute(cassandra, CassandraNode.READ_COMPLETED, MathPredicates.greaterThanOrEqual(1));
+                assertNotNull(cassandra.getAttribute(CassandraNode.WRITE_PENDING));
+                assertNotNull(cassandra.getAttribute(CassandraNode.WRITE_ACTIVE));
+                EntityTestUtils.assertAttribute(cassandra, CassandraNode.WRITE_COMPLETED, MathPredicates.greaterThanOrEqual(1));
+                
+                assertNotNull(cassandra.getAttribute(CassandraNode.READS_PER_SECOND_LAST));
+                assertNotNull(cassandra.getAttribute(CassandraNode.WRITES_PER_SECOND_LAST));
+        
+                assertNotNull(cassandra.getAttribute(CassandraNode.THRIFT_PORT_LATENCY_IN_WINDOW));
+                assertNotNull(cassandra.getAttribute(CassandraNode.READS_PER_SECOND_IN_WINDOW));
+                assertNotNull(cassandra.getAttribute(CassandraNode.WRITES_PER_SECOND_IN_WINDOW));
+                
+                // an example MXBean
+                EntityTestUtils.assertAttribute(cassandra, CassandraNode.MAX_HEAP_MEMORY, MathPredicates.greaterThanOrEqual(1));
+            }});
+
+        cassandra.stop();
+
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, false);
+    }
+}



[18/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakClusterImpl.java
deleted file mode 100644
index 7b256c0..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakClusterImpl.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import static brooklyn.util.JavaGroovyEquivalents.groovyTruth;
-
-import java.net.URI;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.EntityInternal;
-import brooklyn.entity.basic.EntityPredicates;
-import brooklyn.entity.basic.Lifecycle;
-import brooklyn.entity.basic.ServiceStateLogic;
-import brooklyn.entity.basic.ServiceStateLogic.ServiceNotUpLogic;
-import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
-import brooklyn.entity.group.DynamicClusterImpl;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.DependentConfiguration;
-import brooklyn.policy.EnricherSpec;
-import brooklyn.policy.PolicySpec;
-import brooklyn.util.task.Tasks;
-import brooklyn.util.time.Duration;
-import brooklyn.util.time.Time;
-
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-public class RiakClusterImpl extends DynamicClusterImpl implements RiakCluster {
-
-    private static final Logger log = LoggerFactory.getLogger(RiakClusterImpl.class);
-
-    private transient Object mutex = new Object[0];
-
-    public void init() {
-        super.init();
-        log.info("Initializing the riak cluster...");
-        setAttribute(IS_CLUSTER_INIT, false);
-    }
-
-    @Override
-    protected void doStart() {
-        super.doStart();
-        connectSensors();
-
-        try {
-            Duration delay = getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER);
-            Tasks.setBlockingDetails("Sleeping for "+delay+" before advertising cluster available");
-            Time.sleep(delay);
-        } finally {
-            Tasks.resetBlockingDetails();
-        }
-
-        //FIXME: add a quorum to tolerate failed nodes before setting on fire.
-        @SuppressWarnings("unchecked")
-        Optional<Entity> anyNode = Iterables.tryFind(getMembers(), Predicates.and(
-                Predicates.instanceOf(RiakNode.class),
-                EntityPredicates.attributeEqualTo(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true),
-                EntityPredicates.attributeEqualTo(RiakNode.SERVICE_UP, true)));
-        if (anyNode.isPresent()) {
-            setAttribute(IS_CLUSTER_INIT, true);
-        } else {
-            log.warn("No Riak Nodes are found on the cluster: {}. Initialization Failed", getId());
-            ServiceStateLogic.setExpectedState(this, Lifecycle.ON_FIRE);
-        }
-    }
-
-    protected EntitySpec<?> getMemberSpec() {
-        EntitySpec<?> result = config().get(MEMBER_SPEC);
-        if (result!=null) return result;
-        return EntitySpec.create(RiakNode.class);
-    }
-
-    protected void connectSensors() {
-        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
-                .displayName("Controller targets tracker")
-                .configure("sensorsToTrack", ImmutableSet.of(RiakNode.SERVICE_UP))
-                .configure("group", this));
-
-        EnricherSpec<?> first = Enrichers.builder()
-                 .aggregating(Attributes.MAIN_URI)
-                 .publishing(Attributes.MAIN_URI)
-                 .computing(new Function<Collection<URI>,URI>() {
-                    @Override
-                    public URI apply(Collection<URI> input) {
-                        return input.iterator().next();
-                    } })
-                 .fromMembers()
-                 .build();
-        addEnricher(first);
-        
-        Map<? extends AttributeSensor<? extends Number>, ? extends AttributeSensor<? extends Number>> enricherSetup = 
-            ImmutableMap.<AttributeSensor<? extends Number>, AttributeSensor<? extends Number>>builder()
-                .put(RiakNode.NODE_PUTS, RiakCluster.NODE_PUTS_1MIN_PER_NODE)
-                .put(RiakNode.NODE_GETS, RiakCluster.NODE_GETS_1MIN_PER_NODE)
-                .put(RiakNode.NODE_OPS, RiakCluster.NODE_OPS_1MIN_PER_NODE)
-            .build();
-        // construct sum and average over cluster
-        for (AttributeSensor<? extends Number> nodeSensor : enricherSetup.keySet()) {
-            addSummingMemberEnricher(nodeSensor);
-            addAveragingMemberEnricher(nodeSensor, enricherSetup.get(nodeSensor));
-        }
-    }
-
-    private void addAveragingMemberEnricher(AttributeSensor<? extends Number> fromSensor, AttributeSensor<? extends Number> toSensor) {
-        addEnricher(Enrichers.builder()
-            .aggregating(fromSensor)
-            .publishing(toSensor)
-            .fromMembers()
-            .computingAverage()
-            .build()
-        );
-    }
-
-    private void addSummingMemberEnricher(AttributeSensor<? extends Number> source) {
-        addEnricher(Enrichers.builder()
-            .aggregating(source)
-            .publishing(source)
-            .fromMembers()
-            .computingSum()
-            .build()
-        );
-    }
-
-    protected void onServerPoolMemberChanged(final Entity member) {
-        synchronized (mutex) {
-            log.trace("For {}, considering membership of {} which is in locations {}", new Object[]{ this, member, member.getLocations() });
-
-            Map<Entity, String> nodes = getAttribute(RIAK_CLUSTER_NODES);
-            if (belongsInServerPool(member)) {
-                // TODO can we discover the nodes by asking the riak cluster, rather than assuming what we add will be in there?
-                // TODO and can we do join as part of node starting?
-
-                if (nodes == null) {
-                    nodes = Maps.newLinkedHashMap();
-                }
-                String riakName = getRiakName(member);
-                Preconditions.checkNotNull(riakName);
-
-                // flag a first node to be the first node in the riak cluster.
-                Boolean firstNode = getAttribute(IS_FIRST_NODE_SET);
-                if (!Boolean.TRUE.equals(firstNode)) {
-                    setAttribute(IS_FIRST_NODE_SET, Boolean.TRUE);
-
-                    nodes.put(member, riakName);
-                    setAttribute(RIAK_CLUSTER_NODES, nodes);
-
-                    ((EntityInternal) member).setAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Boolean.TRUE);
-
-                    log.info("Added initial Riak node {}: {}; {} to new cluster", new Object[] { this, member, getRiakName(member) });
-                } else {
-                    // TODO: be wary of erroneous nodes but are still flagged 'in cluster'
-                    // add the new node to be part of the riak cluster.
-                    Optional<Entity> anyNodeInCluster = Iterables.tryFind(nodes.keySet(), Predicates.and(
-                            Predicates.instanceOf(RiakNode.class),
-                            EntityPredicates.attributeEqualTo(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true)));
-                    if (anyNodeInCluster.isPresent()) {
-                        if (!nodes.containsKey(member) && member.getAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER) == null) {
-                            String anyNodeName = anyNodeInCluster.get().getAttribute(RiakNode.RIAK_NODE_NAME);
-                            Entities.invokeEffectorWithArgs(this, member, RiakNode.JOIN_RIAK_CLUSTER, anyNodeName).blockUntilEnded();
-                            nodes.put(member, riakName);
-                            setAttribute(RIAK_CLUSTER_NODES, nodes);
-                            log.info("Added Riak node {}: {}; {} to cluster", new Object[] { this, member, getRiakName(member) });
-                        }
-                    } else {
-                        log.error("isFirstNodeSet, but no cluster members found to add {}", member.getId());
-                    }
-                }
-            } else {
-                if (nodes != null && nodes.containsKey(member)) {
-                    DependentConfiguration.attributeWhenReady(member, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Predicates.equalTo(false)).blockUntilEnded(Duration.TWO_MINUTES);
-                    @SuppressWarnings("unchecked")
-                    Optional<Entity> anyNodeInCluster = Iterables.tryFind(nodes.keySet(), Predicates.and(
-                            Predicates.instanceOf(RiakNode.class),
-                            EntityPredicates.attributeEqualTo(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true),
-                            Predicates.not(Predicates.equalTo(member))));
-                    if (anyNodeInCluster.isPresent()) {
-                        Entities.invokeEffectorWithArgs(this, anyNodeInCluster.get(), RiakNode.REMOVE_FROM_CLUSTER, getRiakName(member)).blockUntilEnded();
-                    }
-                    nodes.remove(member);
-                    setAttribute(RIAK_CLUSTER_NODES, nodes);
-                    log.info("Removed Riak node {}: {}; {} from cluster", new Object[]{ this, member, getRiakName(member) });
-                }
-            }
-
-            ServiceNotUpLogic.updateNotUpIndicatorRequiringNonEmptyMap(this, RIAK_CLUSTER_NODES);
-
-            calculateClusterAddresses();
-        }
-    }
-
-    private void calculateClusterAddresses() {
-        List<String> addresses = Lists.newArrayList();
-        List<String> addressesPbPort = Lists.newArrayList();
-        for (Entity entity : this.getMembers()) {
-            if (entity instanceof RiakNode && entity.getAttribute(Attributes.SERVICE_UP)) {
-                RiakNode riakNode = (RiakNode) entity;
-                addresses.add(riakNode.getAttribute(Attributes.SUBNET_HOSTNAME) + ":" + riakNode.getAttribute(RiakNode.RIAK_WEB_PORT));
-                addressesPbPort.add(riakNode.getAttribute(Attributes.SUBNET_HOSTNAME) + ":" + riakNode.getAttribute(RiakNode.RIAK_PB_PORT));
-            }
-        }
-        setAttribute(RiakCluster.NODE_LIST, Joiner.on(",").join(addresses));
-        setAttribute(RiakCluster.NODE_LIST_PB_PORT, Joiner.on(",").join(addressesPbPort));
-    }
-
-    protected boolean belongsInServerPool(Entity member) {
-        if (!groovyTruth(member.getAttribute(Startable.SERVICE_UP))) {
-            log.trace("Members of {}, checking {}, eliminating because not up", this, member);
-            return false;
-        }
-        if (!getMembers().contains(member)) {
-            log.trace("Members of {}, checking {}, eliminating because not member", this, member);
-            return false;
-        }
-        log.trace("Members of {}, checking {}, approving", this, member);
-
-        return true;
-    }
-
-    private String getRiakName(Entity node) {
-        return node.getAttribute(RiakNode.RIAK_NODE_NAME);
-    }
-
-    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
-        @Override
-        protected void onEntityEvent(EventType type, Entity entity) {
-            ((RiakClusterImpl) super.entity).onServerPoolMemberChanged(entity);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNode.java b/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNode.java
deleted file mode 100644
index fa2bbbb..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNode.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import java.net.URI;
-import java.util.List;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.annotation.Effector;
-import brooklyn.entity.annotation.EffectorParam;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.MethodEffector;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.java.UsesJava;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.AttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.reflect.TypeToken;
-
-@Catalog(name="Riak Node", description="Riak is a distributed NoSQL key-value data store that offers "
-        + "extremely high availability, fault tolerance, operational simplicity and scalability.")
-@ImplementedBy(RiakNodeImpl.class)
-public interface RiakNode extends SoftwareProcess, UsesJava {
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION,
-            "Version to install (Default 2.0.5)", "2.0.5");
-
-    @SetFromFlag("optimizeNetworking")
-    ConfigKey<Boolean> OPTIMIZE_HOST_NETWORKING  = ConfigKeys.newBooleanConfigKey("riak.networking.optimize", "Optimize host networking when running in a VM", Boolean.TRUE);
-
-    // vm.args and app.config are used for pre-version 2.0.0. Later versions use the (simplified) riak.conf
-    // see https://github.com/joedevivo/ricon/blob/master/cuttlefish.md
-    @SetFromFlag("vmArgsTemplateUrl")
-    ConfigKey<String> RIAK_VM_ARGS_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
-            "riak.vmArgs.templateUrl", "Template file (in freemarker format) for the vm.args config file",
-            "classpath://brooklyn/entity/nosql/riak/vm.args");
-    @SetFromFlag("appConfigTemplateUrl")
-    ConfigKey<String> RIAK_APP_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
-            "riak.appConfig.templateUrl", "Template file (in freemarker format) for the app.config config file",
-            "classpath://brooklyn/entity/nosql/riak/app.config");
-    @SetFromFlag("appConfigTemplateUrlLinux")
-    ConfigKey<String> RIAK_CONF_TEMPLATE_URL_LINUX = ConfigKeys.newStringConfigKey(
-            "riak.riakConf.templateUrl.linux", "Template file (in freemarker format) for the app.config config file",
-            "classpath://brooklyn/entity/nosql/riak/riak.conf");
-    @SetFromFlag("appConfigTemplateUrlMac")
-    ConfigKey<String> RIAK_CONF_TEMPLATE_URL_MAC = ConfigKeys.newStringConfigKey(
-            "riak.riakConf.templateUrl.mac", "Template file (in freemarker format) for the app.config config file",
-            "classpath://brooklyn/entity/nosql/riak/riak-mac.conf");
-
-    ConfigKey<String> RIAK_CONF_ADDITIONAL_CONTENT = ConfigKeys.newStringConfigKey(
-            "riak.riakConf.additionalContent", "Template file (in freemarker format) for setting up additional settings in the riak.conf file", "");
-    
-    // maxOpenFiles' default value (65536) is based on the Basho's recommendation - http://docs.basho.com/riak/latest/ops/tuning/open-files-limit/ 
-    @SetFromFlag("maxOpenFiles")
-    ConfigKey<Integer> RIAK_MAX_OPEN_FILES = ConfigKeys.newIntegerConfigKey(
-            "riak.max.open.files", "Number of the open files required by Riak", 65536);
-    
-    @SetFromFlag("downloadUrlRhelCentos")
-    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_RHEL_CENTOS = ConfigKeys.newTemplateSensorAndConfigKey("download.url.rhelcentos",
-            "URL pattern for downloading the linux RPM installer (will substitute things like ${version} automatically)",
-            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/rhel/" +
-                    "${entity.osMajorVersion}/riak-${entity.fullVersion}-1.el${entity.osMajorVersion}.x86_64.rpm");
-
-    @SetFromFlag("downloadUrlUbuntu")
-    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_UBUNTU = ConfigKeys.newTemplateSensorAndConfigKey("download.url.ubuntu",
-            "URL pattern for downloading the linux Ubuntu installer (will substitute things like ${version} automatically)",
-            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/ubuntu/" +
-                    "$OS_RELEASE/riak_${entity.fullVersion}-1_amd64.deb");
-
-    @SetFromFlag("downloadUrlDebian")
-    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_DEBIAN = ConfigKeys.newTemplateSensorAndConfigKey("download.url.debian",
-            "URL pattern for downloading the linux Debian installer (will substitute things like ${version} automatically)",
-            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/debian/" +
-                    "$OS_RELEASE/riak_${entity.fullVersion}-1_amd64.deb");
-
-    @SetFromFlag("downloadUrlMac")
-    AttributeSensorAndConfigKey<String, String> DOWNLOAD_URL_MAC = ConfigKeys.newTemplateSensorAndConfigKey("download.url.mac",
-            "URL pattern for downloading the MAC binaries tarball (will substitute things like ${version} automatically)",
-            "http://s3.amazonaws.com/downloads.basho.com/riak/${entity.majorVersion}/${entity.fullVersion}/osx/10.8/riak-${entity.fullVersion}-OSX-x86_64.tar.gz");
-
-    // NB these two needed for clients to access
-    @SetFromFlag("riakWebPort")
-    PortAttributeSensorAndConfigKey RIAK_WEB_PORT = new PortAttributeSensorAndConfigKey("riak.webPort", "Riak Web Port", "8098+");
-
-    @SetFromFlag("riakPbPort")
-    PortAttributeSensorAndConfigKey RIAK_PB_PORT = new PortAttributeSensorAndConfigKey("riak.pbPort", "Riak Protocol Buffers Port", "8087+");
-
-    AttributeSensor<Boolean> RIAK_PACKAGE_INSTALL = Sensors.newBooleanSensor(
-            "riak.install.package", "Flag to indicate whether Riak was installed using an OS package");
-    AttributeSensor<Boolean> RIAK_ON_PATH = Sensors.newBooleanSensor(
-            "riak.install.onPath", "Flag to indicate whether Riak is available on the PATH");
-
-    AttributeSensor<Boolean> RIAK_NODE_HAS_JOINED_CLUSTER = Sensors.newBooleanSensor(
-            "riak.node.riakNodeHasJoinedCluster", "Flag to indicate whether the Riak node has joined a cluster member");
-
-    AttributeSensor<String> RIAK_NODE_NAME = Sensors.newStringSensor("riak.node", "Returns the riak node name as defined in vm.args");
-
-    // these needed for nodes to talk to each other, but not clients (so ideally set up in the security group for internal access)
-    PortAttributeSensorAndConfigKey HANDOFF_LISTENER_PORT = new PortAttributeSensorAndConfigKey("handoffListenerPort", "Handoff Listener Port", "8099+");
-    PortAttributeSensorAndConfigKey EPMD_LISTENER_PORT = new PortAttributeSensorAndConfigKey("epmdListenerPort", "Erlang Port Mapper Daemon Listener Port", "4369");
-    PortAttributeSensorAndConfigKey ERLANG_PORT_RANGE_START = new PortAttributeSensorAndConfigKey("erlangPortRangeStart", "Erlang Port Range Start", "6000+");
-    PortAttributeSensorAndConfigKey ERLANG_PORT_RANGE_END = new PortAttributeSensorAndConfigKey("erlangPortRangeEnd", "Erlang Port Range End", "7999+");
-
-    @SetFromFlag("searchEnabled")
-    ConfigKey<Boolean> SEARCH_ENABLED = ConfigKeys.newBooleanConfigKey("riak.search", "Deploy Solr and configure Riak to use it", false);
-
-    /**
-     * http://docs.basho.com/riak/latest/dev/using/search/
-     * Solr is powered by Riak's Yokozuna engine and it is used through the riak webport
-     * So SEARCH_SOLR_PORT shouldn't be exposed
-     */
-    ConfigKey<Integer> SEARCH_SOLR_PORT = ConfigKeys.newIntegerConfigKey("search.solr.port", "Solr port", 8983);
-    ConfigKey<Integer> SEARCH_SOLR_JMX_PORT = ConfigKeys.newIntegerConfigKey("search.solr.jmx_port", "Solr port", 8985);
-
-    AttributeSensor<Integer> NODE_GETS = Sensors.newIntegerSensor("riak.node.gets", "Gets in the last minute");
-    AttributeSensor<Integer> NODE_GETS_TOTAL = Sensors.newIntegerSensor("riak.node.gets.total", "Total gets since node started");
-    AttributeSensor<Integer> NODE_PUTS = Sensors.newIntegerSensor("riak.node.puts", "Puts in the last minute");
-    AttributeSensor<Integer> NODE_PUTS_TOTAL = Sensors.newIntegerSensor("riak.node.puts.total", "Total puts since node started");
-    AttributeSensor<Integer> VNODE_GETS = Sensors.newIntegerSensor("riak.vnode.gets");
-    AttributeSensor<Integer> VNODE_GETS_TOTAL = Sensors.newIntegerSensor("riak.vnode.gets.total");
-
-    //Sensors for Riak Node Counters (within 1 minute window or lifetime of node.
-    //http://docs.basho.com/riak/latest/ops/running/stats-and-monitoring/#Statistics-from-Riak
-    AttributeSensor<Integer> VNODE_PUTS = Sensors.newIntegerSensor("riak.vnode.puts");
-    AttributeSensor<Integer> VNODE_PUTS_TOTAL = Sensors.newIntegerSensor("riak.vnode.puts.total");
-    AttributeSensor<Integer> READ_REPAIRS_TOTAL = Sensors.newIntegerSensor("riak.read.repairs.total");
-    AttributeSensor<Integer> COORD_REDIRS_TOTAL = Sensors.newIntegerSensor("riak.coord.redirs.total");
-    //Additional Riak node counters
-    AttributeSensor<Integer> MEMORY_PROCESSES_USED = Sensors.newIntegerSensor("riak.memory.processes.used");
-    AttributeSensor<Integer> SYS_PROCESS_COUNT = Sensors.newIntegerSensor("riak.sys.process.count");
-    AttributeSensor<Integer> PBC_CONNECTS = Sensors.newIntegerSensor("riak.pbc.connects");
-    AttributeSensor<Integer> PBC_ACTIVE = Sensors.newIntegerSensor("riak.pbc.active");
-    @SuppressWarnings("serial")
-    AttributeSensor<List<String>> RING_MEMBERS = Sensors.newSensor(new TypeToken<List<String>>() {},
-            "ring.members", "all the riak nodes in the ring");
-    
-    AttributeSensor<Integer> NODE_OPS = Sensors.newIntegerSensor("riak.node.ops", "Sum of node gets and puts in the last minute");
-    AttributeSensor<Integer> NODE_OPS_TOTAL = Sensors.newIntegerSensor("riak.node.ops.total", "Sum of node gets and puts since the node started");
-
-    MethodEffector<Void> JOIN_RIAK_CLUSTER = new MethodEffector<Void>(RiakNode.class, "joinCluster");
-    MethodEffector<Void> LEAVE_RIAK_CLUSTER = new MethodEffector<Void>(RiakNode.class, "leaveCluster");
-    MethodEffector<Void> REMOVE_FROM_CLUSTER = new MethodEffector<Void>(RiakNode.class, "removeNode");
-
-    AttributeSensor<Integer> RIAK_NODE_GET_FSM_TIME_MEAN = Sensors.newIntegerSensor("riak.node_get_fsm_time_mean", "Time between reception of client read request and subsequent response to client");
-    AttributeSensor<Integer> RIAK_NODE_PUT_FSM_TIME_MEAN = Sensors.newIntegerSensor("riak.node_put_fsm_time_mean", "Time between reception of client write request and subsequent response to client");
-    AttributeSensor<Integer> RIAK_OBJECT_COUNTER_MERGE_TIME_MEAN = Sensors.newIntegerSensor("riak.object_counter_merge_time_mean", "Time it takes to perform an Update Counter operation");
-    AttributeSensor<Integer> RIAK_OBJECT_SET_MERGE_TIME_MEAN = Sensors.newIntegerSensor("riak.object_set_merge_time_mean", "Time it takes to perform an Update Set operation");
-    AttributeSensor<Integer> RIAK_OBJECT_MAP_MERGE_TIME_MEAN = Sensors.newIntegerSensor("riak.object_map_merge_time_mean", "Time it takes to perform an Update Map operation");
-    AttributeSensor<Integer> RIAK_CONSISTENT_GET_TIME_MEAN = Sensors.newIntegerSensor("riak.consistent_get_time_mean", "Strongly consistent read latency");
-    AttributeSensor<Integer> RIAK_CONSISTENT_PUT_TIME_MEAN = Sensors.newIntegerSensor("riak.consistent_put_time_mean", "Strongly consistent write latency");
-
-    List<AttributeSensor<Integer>> ONE_MINUTE_SENSORS = ImmutableList.of(RIAK_NODE_GET_FSM_TIME_MEAN, RIAK_NODE_PUT_FSM_TIME_MEAN,
-            RIAK_OBJECT_COUNTER_MERGE_TIME_MEAN, RIAK_OBJECT_SET_MERGE_TIME_MEAN, RIAK_OBJECT_MAP_MERGE_TIME_MEAN,
-            RIAK_CONSISTENT_GET_TIME_MEAN, RIAK_CONSISTENT_PUT_TIME_MEAN);
-
-    AttributeSensor<URI> RIAK_CONSOLE_URI = Attributes.MAIN_URI;
-
-    // accessors, for use from template file
-    Integer getRiakWebPort();
-
-    Integer getRiakPbPort();
-
-    Integer getHandoffListenerPort();
-
-    Integer getEpmdListenerPort();
-
-    Integer getErlangPortRangeStart();
-
-    Integer getErlangPortRangeEnd();
-
-    Boolean isSearchEnabled();
-
-    Integer getSearchSolrPort();
-
-    Integer getSearchSolrJmxPort();
-
-    String getFullVersion();
-
-    String getMajorVersion();
-
-    String getOsMajorVersion();
-
-    // TODO add commitCluster() effector and add effectors joinCluster, leaveCluster, removeNode, recoverFailedNode which do not execute commitCluster()
-    // the commit where the commitCluster effector was available is adbf2dc1cb5df98b1e52d3ab35fa6bb4983b722f
-
-    @Effector(description = "Join the Riak cluster on the given node")
-    void joinCluster(@EffectorParam(name = "nodeName") String nodeName);
-
-    @Effector(description = "Leave the Riak cluster")
-    void leaveCluster();
-
-    @Effector(description = "Remove the given node from the Riak cluster")
-    void removeNode(@EffectorParam(name = "nodeName") String nodeName);
-
-    @Effector(description = "Recover and join the Riak cluster on the given node")
-    void recoverFailedNode(@EffectorParam(name = "nodeName") String nodeName);
-
-    @Effector(description = "Create or modify a bucket type before activation")
-    void bucketTypeCreate(@EffectorParam(name = "bucketTypeName") String bucketTypeName,
-                          @EffectorParam(name = "bucketTypeProperties") String bucketTypeProperties);
-
-    @Effector(description = "List all currently available bucket types and their activation status")
-    List<String> bucketTypeList();
-
-    @Effector(description = "Display the status and properties of a specific bucket type")
-    List<String> bucketTypeStatus(@EffectorParam(name = "bucketTypeName") String bucketTypeName);
-
-    @Effector(description = "Update a bucket type after activation")
-    void bucketTypeUpdate(@EffectorParam(name = "bucketTypeName") String bucketTypeName,
-                          @EffectorParam(name = "bucketTypeProperties") String bucketTypeProperties);
-
-    @Effector(description = "Activate a bucket type")
-    void bucketTypeActivate(@EffectorParam(name = "bucketTypeName") String bucketTypeName);
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeDriver.java
deleted file mode 100644
index 5fca3cc..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeDriver.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-
-import java.util.List;
-
-public interface RiakNodeDriver extends SoftwareProcessDriver {
-
-    String getRiakEtcDir();
-
-    void joinCluster(String nodeName);
-
-    void leaveCluster();
-
-    void removeNode(String nodeName);
-
-    void recoverFailedNode(String nodeName);
-
-    String getOsMajorVersion();
-
-    void bucketTypeCreate(String bucketTypeName, String bucketTypeProperties);
-
-    List<String> bucketTypeList();
-
-    List<String> bucketTypeStatus(String bucketTypeName);
-
-    void bucketTypeUpdate(String bucketTypeName, String bucketTypeProperties);
-
-    void bucketTypeActivate(String bucketTypeName);
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeImpl.java
deleted file mode 100644
index 7dda317..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeImpl.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import javax.annotation.Nullable;
-
-import brooklyn.enricher.Enrichers;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.entity.webapp.WebAppServiceMethods;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.AttributeSensorAndConfigKey;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.location.MachineProvisioningLocation;
-import brooklyn.location.access.BrooklynAccessUtils;
-import brooklyn.location.cloud.CloudLocationConfig;
-import brooklyn.util.collections.MutableSet;
-import brooklyn.util.config.ConfigBag;
-import brooklyn.util.guava.Functionals;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-import com.google.common.collect.ContiguousSet;
-import com.google.common.collect.DiscreteDomain;
-import com.google.common.collect.Range;
-import com.google.common.net.HostAndPort;
-
-public class RiakNodeImpl extends SoftwareProcessImpl implements RiakNode {
-
-    private volatile HttpFeed httpFeed;
-
-    @Override
-    public RiakNodeDriver getDriver() {
-        return (RiakNodeDriver) super.getDriver();
-    }
-
-    @Override
-    public Class<RiakNodeDriver> getDriverInterface() {
-        return RiakNodeDriver.class;
-    }
-
-    @Override
-    public void init() {
-        super.init();
-        // fail fast if config files not avail
-        Entities.getRequiredUrlConfig(this, RIAK_VM_ARGS_TEMPLATE_URL);
-        Entities.getRequiredUrlConfig(this, RIAK_APP_CONFIG_TEMPLATE_URL);
-        
-        Integer defaultMaxOpenFiles = RIAK_MAX_OPEN_FILES.getDefaultValue();
-        Integer maxOpenFiles = getConfig(RiakNode.RIAK_MAX_OPEN_FILES);
-        Preconditions.checkArgument(maxOpenFiles >= defaultMaxOpenFiles , "Specified number of open files : %s : is less than the required minimum",
-                maxOpenFiles, defaultMaxOpenFiles);
-    }
-
-    @SuppressWarnings("rawtypes")
-    public boolean isPackageDownloadUrlProvided() {
-        AttributeSensorAndConfigKey[] downloadProperties = { DOWNLOAD_URL_RHEL_CENTOS, DOWNLOAD_URL_UBUNTU, DOWNLOAD_URL_DEBIAN };
-        for (AttributeSensorAndConfigKey property : downloadProperties) {
-            if (!((ConfigurationSupportInternal) config()).getRaw(property).isAbsent()) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    @Override
-    protected Map<String, Object> obtainProvisioningFlags(@SuppressWarnings("rawtypes") MachineProvisioningLocation location) {
-        ConfigBag result = ConfigBag.newInstance(super.obtainProvisioningFlags(location));
-        result.configure(CloudLocationConfig.OS_64_BIT, true);
-        return result.getAllConfig();
-    }
-
-    @Override
-    protected Collection<Integer> getRequiredOpenPorts() {
-        // TODO this creates a huge list of inbound ports; much better to define on a security group using range syntax!
-        int erlangRangeStart = getConfig(ERLANG_PORT_RANGE_START).iterator().next();
-        int erlangRangeEnd = getConfig(ERLANG_PORT_RANGE_END).iterator().next();
-
-        Set<Integer> ports = MutableSet.copyOf(super.getRequiredOpenPorts());
-        Set<Integer> erlangPorts = ContiguousSet.create(Range.open(erlangRangeStart, erlangRangeEnd), DiscreteDomain.integers());
-        ports.addAll(erlangPorts);
-
-        return ports;
-    }
-
-    @Override
-    public void connectSensors() {
-        super.connectSensors();
-        connectServiceUpIsRunning();
-        HostAndPort accessible = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getRiakWebPort());
-
-        HttpFeed.Builder httpFeedBuilder = HttpFeed.builder()
-                .entity(this)
-                .period(500, TimeUnit.MILLISECONDS)
-                .baseUri(String.format("http://%s/stats", accessible.toString()))
-                .poll(new HttpPollConfig<Integer>(NODE_GETS)
-                        .onSuccess(HttpValueFunctions.jsonContents("node_gets", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(NODE_GETS_TOTAL)
-                        .onSuccess(HttpValueFunctions.jsonContents("node_gets_total", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(NODE_PUTS)
-                        .onSuccess(HttpValueFunctions.jsonContents("node_puts", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(NODE_PUTS_TOTAL)
-                        .onSuccess(HttpValueFunctions.jsonContents("node_puts_total", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(VNODE_GETS)
-                        .onSuccess(HttpValueFunctions.jsonContents("vnode_gets", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(VNODE_GETS_TOTAL)
-                        .onSuccess(HttpValueFunctions.jsonContents("vnode_gets_total", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(VNODE_PUTS)
-                        .onSuccess(HttpValueFunctions.jsonContents("vnode_puts", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(VNODE_PUTS_TOTAL)
-                        .onSuccess(HttpValueFunctions.jsonContents("vnode_puts_total", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(READ_REPAIRS_TOTAL)
-                        .onSuccess(HttpValueFunctions.jsonContents("read_repairs_total", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(COORD_REDIRS_TOTAL)
-                        .onSuccess(HttpValueFunctions.jsonContents("coord_redirs_total", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(MEMORY_PROCESSES_USED)
-                        .onSuccess(HttpValueFunctions.jsonContents("memory_processes_used", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(SYS_PROCESS_COUNT)
-                        .onSuccess(HttpValueFunctions.jsonContents("sys_process_count", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(PBC_CONNECTS)
-                        .onSuccess(HttpValueFunctions.jsonContents("pbc_connects", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<Integer>(PBC_ACTIVE)
-                        .onSuccess(HttpValueFunctions.jsonContents("pbc_active", Integer.class))
-                        .onFailureOrException(Functions.constant(-1)))
-                .poll(new HttpPollConfig<List<String>>(RING_MEMBERS)
-                        .onSuccess(Functionals.chain(
-                                HttpValueFunctions.jsonContents("ring_members", String[].class),
-                                new Function<String[], List<String>>() {
-                                    @Nullable
-                                    @Override
-                                    public List<String> apply(@Nullable String[] strings) {
-                                        return Arrays.asList(strings);
-                                    }
-                                }
-                        ))
-                        .onFailureOrException(Functions.constant(Arrays.asList(new String[0]))));
-
-        for (AttributeSensor<Integer> sensor : ONE_MINUTE_SENSORS) {
-            httpFeedBuilder.poll(new HttpPollConfig<Integer>(sensor)
-                    .period(Duration.ONE_MINUTE)
-                    .onSuccess(HttpValueFunctions.jsonContents(sensor.getName().substring(5), Integer.class))
-                    .onFailureOrException(Functions.constant(-1)));
-        }
-
-        httpFeed = httpFeedBuilder.build();
-
-        addEnricher(Enrichers.builder().combining(NODE_GETS, NODE_PUTS).computingSum().publishing(NODE_OPS).build());
-        addEnricher(Enrichers.builder().combining(NODE_GETS_TOTAL, NODE_PUTS_TOTAL).computingSum().publishing(NODE_OPS_TOTAL).build());
-        WebAppServiceMethods.connectWebAppServerPolicies(this);
-    }
-
-    @Override
-    public void disconnectSensors() {
-        super.disconnectSensors();
-        if (httpFeed != null) {
-            httpFeed.stop();
-        }
-        disconnectServiceUpIsRunning();
-    }
-
-    @Override
-    public void joinCluster(String nodeName) {
-        getDriver().joinCluster(nodeName);
-    }
-
-    @Override
-    public void leaveCluster() {
-        getDriver().leaveCluster();
-    }
-
-    @Override
-    public void removeNode(String nodeName) {
-        getDriver().removeNode(nodeName);
-    }
-
-    @Override
-    public void bucketTypeCreate(String bucketTypeName, String bucketTypeProperties) {
-        getDriver().bucketTypeCreate(bucketTypeName, bucketTypeProperties);
-    }
-
-    @Override
-    public List<String> bucketTypeList() {
-        return getDriver().bucketTypeList();
-    }
-
-    @Override
-    public List<String> bucketTypeStatus(String bucketTypeName) {
-        return getDriver().bucketTypeStatus(bucketTypeName);
-    }
-
-    @Override
-    public void bucketTypeUpdate(String bucketTypeName, String bucketTypeProperties) {
-        getDriver().bucketTypeUpdate(bucketTypeName, bucketTypeProperties);
-    }
-
-    @Override
-    public void bucketTypeActivate(String bucketTypeName) {
-        getDriver().bucketTypeActivate(bucketTypeName);
-    }
-
-    @Override
-    public void recoverFailedNode(String nodeName) {
-        getDriver().recoverFailedNode(nodeName);
-    }
-
-    @Override
-    public Integer getRiakWebPort() {
-        return getAttribute(RiakNode.RIAK_WEB_PORT);
-    }
-
-    @Override
-    public Integer getRiakPbPort() {
-        return getAttribute(RiakNode.RIAK_PB_PORT);
-    }
-
-    @Override
-    public Integer getHandoffListenerPort() {
-        return getAttribute(RiakNode.HANDOFF_LISTENER_PORT);
-    }
-
-    @Override
-    public Integer getEpmdListenerPort() {
-        return getAttribute(RiakNode.EPMD_LISTENER_PORT);
-    }
-
-    @Override
-    public Integer getErlangPortRangeStart() {
-        return getAttribute(RiakNode.ERLANG_PORT_RANGE_START);
-    }
-
-    @Override
-    public Integer getErlangPortRangeEnd() {
-        return getAttribute(RiakNode.ERLANG_PORT_RANGE_END);
-    }
-
-    @Override
-    public Boolean isSearchEnabled() {
-        return getConfig(RiakNode.SEARCH_ENABLED);
-    }
-
-    @Override
-    public Integer getSearchSolrPort() {
-        return getConfig(RiakNode.SEARCH_SOLR_PORT);
-    }
-
-    @Override
-    public Integer getSearchSolrJmxPort() {
-        return getConfig(RiakNode.SEARCH_SOLR_JMX_PORT);
-    }
-
-    @Override
-    public String getMajorVersion() {
-        return getFullVersion().substring(0, 3);
-    }
-
-    @Override
-    public String getFullVersion() {
-        return getConfig(RiakNode.SUGGESTED_VERSION);
-    }
-
-    @Override
-    public String getOsMajorVersion() {
-        return getDriver().getOsMajorVersion();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java
deleted file mode 100644
index 7ad15d7..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/riak/RiakNodeSshDriver.java
+++ /dev/null
@@ -1,614 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import static brooklyn.util.ssh.BashCommands.INSTALL_CURL;
-import static brooklyn.util.ssh.BashCommands.INSTALL_TAR;
-import static brooklyn.util.ssh.BashCommands.addSbinPathCommand;
-import static brooklyn.util.ssh.BashCommands.alternatives;
-import static brooklyn.util.ssh.BashCommands.chainGroup;
-import static brooklyn.util.ssh.BashCommands.commandToDownloadUrlAs;
-import static brooklyn.util.ssh.BashCommands.ifExecutableElse;
-import static brooklyn.util.ssh.BashCommands.ifNotExecutable;
-import static brooklyn.util.ssh.BashCommands.ok;
-import static brooklyn.util.ssh.BashCommands.sudo;
-import static brooklyn.util.text.StringEscapes.BashStringEscapes.escapeLiteralForDoubleQuotedBash;
-import static java.lang.String.format;
-
-import java.net.URI;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import brooklyn.entity.java.JavaSoftwareProcessSshDriver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.basic.lifecycle.ScriptHelper;
-import brooklyn.entity.software.SshEffectorTasks;
-import brooklyn.location.OsDetails;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.net.Urls;
-import brooklyn.util.os.Os;
-import brooklyn.util.ssh.BashCommands;
-import brooklyn.util.task.DynamicTasks;
-import brooklyn.util.task.ssh.SshTasks;
-import brooklyn.util.text.Strings;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-
-// TODO: Alter -env ERL_CRASH_DUMP path in vm.args
-public class RiakNodeSshDriver extends JavaSoftwareProcessSshDriver implements RiakNodeDriver {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RiakNodeSshDriver.class);
-    private static final String sbinPath = "$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin";
-    private static final String INSTALLING_FALLBACK = INSTALLING + "_fallback";
-
-    public RiakNodeSshDriver(final RiakNodeImpl entity, final SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    protected String getLogFileLocation() {
-        return "/var/log/riak/solr.log";
-    }
-
-    @Override
-    public RiakNodeImpl getEntity() {
-        return RiakNodeImpl.class.cast(super.getEntity());
-    }
-
-    @Override
-    public Map<String, String> getShellEnvironment() {
-        MutableMap<String, String> result = MutableMap.copyOf(super.getShellEnvironment());
-        // how to change epmd port, according to
-        // http://serverfault.com/questions/582787/how-to-change-listening-interface-of-rabbitmqs-epmd-port-4369
-        if (getEntity().getEpmdListenerPort() != null) {
-            result.put("ERL_EPMD_PORT", Integer.toString(getEntity().getEpmdListenerPort()));
-        }
-        result.put("WAIT_FOR_ERLANG", "60");
-        return result;
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("riak-%s", getVersion()))));
-
-        // Set package install attribute
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-        if (osDetails.isLinux()) {
-            entity.setAttribute(RiakNode.RIAK_PACKAGE_INSTALL, true);
-        } else if (osDetails.isMac()) {
-            entity.setAttribute(RiakNode.RIAK_PACKAGE_INSTALL, false);
-        }
-    }
-
-    @Override
-    public void install() {
-        if (entity.getConfig(Attributes.DOWNLOAD_URL) != null) {
-            LOG.warn("Ignoring download.url {}, use download.url.rhelcentos or download.url.mac", entity.getConfig(Attributes.DOWNLOAD_URL));
-        }
-
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-        List<String> commands = Lists.newLinkedList();
-        if (osDetails.isLinux()) {
-            if (getEntity().isPackageDownloadUrlProvided()) {
-                commands.addAll(installLinuxFromPackageUrl());
-            } else {
-                commands.addAll(installFromPackageCloud());
-            }
-        } else if (osDetails.isMac()) {
-            commands.addAll(installMac());
-        } else if (osDetails.isWindows()) {
-            throw new UnsupportedOperationException("RiakNode not supported on Windows instances");
-        } else {
-            throw new IllegalStateException("Machine was not detected as linux, mac or windows! Installation does not know how to proceed with " +
-                    getMachine() + ". Details: " + getMachine().getMachineDetails().getOsDetails());
-        }
-
-        int result = newScript(INSTALLING)
-                .body.append(commands)
-                .failIfBodyEmpty()
-                .execute();
-
-        if (result != 0 && osDetails.isLinux()) {
-            result = newScript(INSTALLING_FALLBACK)
-                    .body.append(installLinuxFromPackageUrl())
-                    .execute();
-        }
-
-        if (result != 0) {
-            throw new IllegalStateException(String.format("Install failed with result %d", result));
-        }
-    }
-
-    private List<String> installLinuxFromPackageUrl() {
-        DynamicTasks.queueIfPossible(SshTasks.dontRequireTtyForSudo(getMachine(), SshTasks.OnFailingTask.WARN_OR_IF_DYNAMIC_FAIL_MARKING_INESSENTIAL)).orSubmitAndBlock();
-
-        String expandedInstallDir = getExpandedInstallDir();
-        String installBin = Urls.mergePaths(expandedInstallDir, "bin");
-        String saveAsYum = "riak.rpm";
-        String saveAsApt = "riak.deb";
-        OsDetails osDetails = getMachine().getOsDetails();
-
-        String downloadUrl;
-        String osReleaseCmd;
-        if ("debian".equalsIgnoreCase(osDetails.getName())) {
-            // TODO osDetails.getName() is returning "linux", instead of debian/ubuntu on AWS with jenkins image,
-            //      running as integration test targetting localhost.
-            // TODO Debian support (default debian image fails with 'sudo: command not found')
-            downloadUrl = (String)entity.getAttribute(RiakNode.DOWNLOAD_URL_DEBIAN);
-            osReleaseCmd = osDetails.getVersion().substring(0, osDetails.getVersion().indexOf("."));
-        } else {
-            // assume Ubuntu
-            downloadUrl = (String)entity.getAttribute(RiakNode.DOWNLOAD_URL_UBUNTU);
-            osReleaseCmd = "`lsb_release -sc` && " +
-                    "export OS_RELEASE=`([[ \"lucid natty precise\" =~ (^| )\\$OS_RELEASE($| ) ]] && echo $OS_RELEASE || echo precise)`";
-        }
-        String apt = chainGroup(
-                //debian fix
-                "export PATH=" + sbinPath,
-                "which apt-get",
-                ok(sudo("apt-get -y --allow-unauthenticated install logrotate libpam0g-dev libssl0.9.8")),
-                "export OS_NAME=" + Strings.toLowerCase(osDetails.getName()),
-                "export OS_RELEASE=" + osReleaseCmd,
-                String.format("wget -O %s %s", saveAsApt, downloadUrl),
-                sudo(String.format("dpkg -i %s", saveAsApt)));
-        String yum = chainGroup(
-                "which yum",
-                ok(sudo("yum -y install openssl")),
-                String.format("wget -O %s %s", saveAsYum, entity.getAttribute(RiakNode.DOWNLOAD_URL_RHEL_CENTOS)),
-                sudo(String.format("yum localinstall -y %s", saveAsYum)));
-        return ImmutableList.<String>builder()
-                .add("mkdir -p " + installBin)
-                .add(INSTALL_CURL)
-                .add(alternatives(apt, yum))
-                .add("ln -s `which riak` " + Urls.mergePaths(installBin, "riak"))
-                .add("ln -s `which riak-admin` " + Urls.mergePaths(installBin, "riak-admin"))
-                .build();
-    }
-
-    private List<String> installFromPackageCloud() {
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-        return ImmutableList.<String>builder()
-                .add(osDetails.getName().toLowerCase().contains("debian") ? addSbinPathCommand() : "")
-                .add(ifNotExecutable("curl", INSTALL_CURL))
-                .addAll(ifExecutableElse("yum", installDebianBased(), installRpmBased()))
-                .build();
-    }
-
-    private ImmutableList<String> installDebianBased() {
-        return ImmutableList.<String>builder()
-                .add("curl https://packagecloud.io/install/repositories/basho/riak/script.deb.sh | " + BashCommands.sudo("bash"))
-                .add(BashCommands.sudo("apt-get install --assume-yes riak=" + getEntity().getFullVersion() + "-1"))
-                .build();
-    }
-
-    private ImmutableList<String> installRpmBased() {
-        return ImmutableList.<String>builder()
-                .add("curl https://packagecloud.io/install/repositories/basho/riak/script.rpm.sh | " + BashCommands.sudo("bash"))
-                .add(BashCommands.sudo("yum install -y riak-" + getEntity().getFullVersion() + "*"))
-                .build();
-    }
-
-    protected List<String> installMac() {
-        String saveAs = resolver.getFilename();
-        String url = entity.getAttribute(RiakNode.DOWNLOAD_URL_MAC);
-        return ImmutableList.<String>builder()
-                .add(INSTALL_TAR)
-                .add(INSTALL_CURL)
-                .add(commandToDownloadUrlAs(url, saveAs))
-                .add("tar xzvf " + saveAs)
-                .build();
-    }
-
-    @Override
-    public void customize() {
-        checkRiakOnPath();
-
-        //create entity's runDir
-        newScript(CUSTOMIZING).execute();
-
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-
-        List<String> commands = Lists.newLinkedList();
-        commands.add(sudo("mkdir -p " + getRiakEtcDir()));
-
-        if (isVersion1()) {
-            String vmArgsTemplate = processTemplate(entity.getConfig(RiakNode.RIAK_VM_ARGS_TEMPLATE_URL));
-            String saveAsVmArgs = Urls.mergePaths(getRunDir(), "vm.args");
-            DynamicTasks.queue(SshEffectorTasks.put(saveAsVmArgs).contents(vmArgsTemplate));
-            commands.add(sudo("mv " + saveAsVmArgs + " " + getRiakEtcDir()));
-
-            String appConfigTemplate = processTemplate(entity.getConfig(RiakNode.RIAK_APP_CONFIG_TEMPLATE_URL));
-            String saveAsAppConfig = Urls.mergePaths(getRunDir(), "app.config");
-            DynamicTasks.queue(SshEffectorTasks.put(saveAsAppConfig).contents(appConfigTemplate));
-            commands.add(sudo("mv " + saveAsAppConfig + " " + getRiakEtcDir()));
-        } else {
-            String templateUrl = osDetails.isMac() ? entity.getConfig(RiakNode.RIAK_CONF_TEMPLATE_URL_MAC) :
-                    entity.getConfig(RiakNode.RIAK_CONF_TEMPLATE_URL_LINUX);
-            String riakConfContent = processTemplate(templateUrl);
-            String saveAsRiakConf = Urls.mergePaths(getRunDir(), "riak.conf");
-
-            if(Strings.isNonBlank(entity.getConfig(RiakNode.RIAK_CONF_ADDITIONAL_CONTENT))) {
-                String additionalConfigContent = processTemplateContents(entity.getConfig(RiakNode.RIAK_CONF_ADDITIONAL_CONTENT));
-                riakConfContent += "\n## Brooklyn note: additional config\n";
-                riakConfContent += additionalConfigContent;
-            }
-
-            DynamicTasks.queue(SshEffectorTasks.put(saveAsRiakConf).contents(riakConfContent));
-            commands.add(sudo("mv " + saveAsRiakConf + " " + getRiakEtcDir()));
-        }
-
-        //increase open file limit (default min for riak is: 4096)
-        //TODO: detect the actual limit then do the modification.
-        //TODO: modify ulimit for linux distros
-        //    commands.add(sudo("launchctl limit maxfiles 4096 32768"));
-        if (osDetails.isMac()) {
-            commands.add("ulimit -n 4096");
-        }
-
-        if (osDetails.isLinux() && isVersion1()) {
-            commands.add(sudo("chown -R riak:riak " + getRiakEtcDir()));
-        }
-
-        // TODO platform_*_dir
-        // TODO riak config log
-
-        ScriptHelper customizeScript = newScript(CUSTOMIZING)
-                .failOnNonZeroResultCode()
-                .body.append(commands);
-
-        if (!isRiakOnPath()) {
-            addRiakOnPath(customizeScript);
-        }
-        customizeScript.failOnNonZeroResultCode().execute();
-
-        if (osDetails.isLinux()) {
-            ImmutableMap<String, String> sysctl = ImmutableMap.<String, String>builder()
-                    .put("vm.swappiness", "0")
-                    .put("net.core.somaxconn", "40000")
-                    .put("net.ipv4.tcp_max_syn_backlog", "40000")
-                    .put("net.ipv4.tcp_sack",  "1")
-                    .put("net.ipv4.tcp_window_scaling",  "15")
-                    .put("net.ipv4.tcp_fin_timeout",     "1")
-                    .put("net.ipv4.tcp_keepalive_intvl", "30")
-                    .put("net.ipv4.tcp_tw_reuse",        "1")
-                    .put("net.ipv4.tcp_moderate_rcvbuf", "1")
-                    .build();
-
-            ScriptHelper optimize = newScript(CUSTOMIZING + "network")
-                .body.append(sudo("sysctl " + Joiner.on(' ').withKeyValueSeparator("=").join(sysctl)));
-
-            Optional<Boolean> enable = Optional.fromNullable(entity.getConfig(RiakNode.OPTIMIZE_HOST_NETWORKING));
-            if (!enable.isPresent()) optimize.inessential();
-            if (enable.or(true)) optimize.execute();
-        }
-
-        //set the riak node name
-        entity.setAttribute(RiakNode.RIAK_NODE_NAME, format("riak@%s", getSubnetHostname()));
-    }
-
-    @Override
-    public void launch() {
-        List<String> commands = Lists.newLinkedList();
-
-        if (isPackageInstall()) {
-            commands.add(addSbinPathCommand());
-            commands.add(sudo(format("sh -c \"ulimit -n %s && service riak start\"", maxOpenFiles())));
-        } else {
-            // NOTE: See instructions at http://superuser.com/questions/433746/is-there-a-fix-for-the-too-many-open-files-in-system-error-on-os-x-10-7-1
-            // for increasing the system limit for number of open files
-            commands.add("ulimit -n 65536 || true"); // `BashCommands.ok` will put this in parentheses, which will set ulimit -n in the subshell
-            commands.add(format("%s start >/dev/null 2>&1 < /dev/null &", getRiakCmd()));
-        }
-
-        ScriptHelper launchScript = newScript(LAUNCHING)
-                .body.append(commands);
-
-        if (!isRiakOnPath()) {
-            addRiakOnPath(launchScript);
-        }
-        launchScript.failOnNonZeroResultCode().execute();
-
-        String mainUri = String.format("http://%s:%s/admin", entity.getAttribute(Attributes.HOSTNAME), entity.getAttribute(RiakNode.RIAK_WEB_PORT));
-        entity.setAttribute(Attributes.MAIN_URI, URI.create(mainUri));
-    }
-
-    @Override
-    public void stop() {
-        leaveCluster();
-
-        String command = format("%s stop", getRiakCmd());
-        command = isPackageInstall() ? sudo(command) : command;
-
-        ScriptHelper stopScript = newScript(ImmutableMap.of(USE_PID_FILE, false), STOPPING)
-                .body.append(command);
-
-        if (!isRiakOnPath()) {
-            addRiakOnPath(stopScript);
-        }
-
-        int result = stopScript.failOnNonZeroResultCode().execute();
-        if (result != 0) {
-            newScript(ImmutableMap.of(USE_PID_FILE, false), STOPPING).execute();
-        }
-    }
-
-    @Override
-    public boolean isRunning() {
-        // Version 2.0.0 requires sudo for `riak ping`
-        ScriptHelper checkRunningScript = newScript(CHECK_RUNNING)
-                .body.append(sudo(format("%s ping", getRiakCmd())));
-
-        if (!isRiakOnPath()) {
-            addRiakOnPath(checkRunningScript);
-        }
-        return (checkRunningScript.execute() == 0);
-    }
-
-    public boolean isPackageInstall() {
-        return entity.getAttribute(RiakNode.RIAK_PACKAGE_INSTALL);
-    }
-
-    public boolean isRiakOnPath() {
-        return entity.getAttribute(RiakNode.RIAK_ON_PATH);
-    }
-
-    public String getRiakEtcDir() {
-        return isPackageInstall() ? "/etc/riak" : Urls.mergePaths(getExpandedInstallDir(), "etc");
-    }
-
-    protected String getRiakCmd() {
-        return isPackageInstall() ? "riak" : Urls.mergePaths(getExpandedInstallDir(), "bin/riak");
-    }
-
-    protected String getRiakAdminCmd() {
-        return isPackageInstall() ? "riak-admin" : Urls.mergePaths(getExpandedInstallDir(), "bin/riak-admin");
-    }
-
-    // TODO find a way to batch commit the changes, instead of committing for every operation.
-
-    @Override
-    public void joinCluster(String nodeName) {
-        if (getRiakName().equals(nodeName)) {
-            log.warn("Cannot join Riak node: {} to itself", nodeName);
-        } else {
-            if (!hasJoinedCluster()) {
-                ScriptHelper joinClusterScript = newScript("joinCluster")
-                        .body.append(sudo(format("%s cluster join %s", getRiakAdminCmd(), nodeName)))
-                        .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
-                        .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
-                        .failOnNonZeroResultCode();
-
-                if (!isRiakOnPath()) {
-                    addRiakOnPath(joinClusterScript);
-                }
-
-                joinClusterScript.execute();
-
-                entity.setAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Boolean.TRUE);
-            } else {
-                log.warn("entity {}: is already in the riak cluster", entity.getId());
-            }
-        }
-    }
-
-    @Override
-    public void leaveCluster() {
-        if (hasJoinedCluster()) {
-            ScriptHelper leaveClusterScript = newScript("leaveCluster")
-                    .body.append(sudo(format("%s cluster leave", getRiakAdminCmd())))
-                    .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
-                    .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
-                    .failOnNonZeroResultCode();
-
-            if (!isRiakOnPath()) {
-                addRiakOnPath(leaveClusterScript);
-            }
-
-            leaveClusterScript.execute();
-
-            entity.setAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, Boolean.FALSE);
-        } else {
-            log.warn("entity {}: has already left the riak cluster", entity.getId());
-        }
-    }
-
-    @Override
-    public void removeNode(String nodeName) {
-        ScriptHelper removeNodeScript = newScript("removeNode")
-                .body.append(sudo(format("%s cluster force-remove %s", getRiakAdminCmd(), nodeName)))
-                .body.append(sudo(format("%s down %s", getRiakAdminCmd(), nodeName)))
-                .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
-                .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
-                .failOnNonZeroResultCode();
-
-        if (!isRiakOnPath()) {
-            addRiakOnPath(removeNodeScript);
-        }
-
-        removeNodeScript.execute();
-    }
-
-    @Override
-    public void bucketTypeCreate(String bucketTypeName, String bucketTypeProperties) {
-        ScriptHelper bucketTypeCreateScript = newScript("bucket-type_create " + bucketTypeName)
-                .body.append(sudo(format("%s bucket-type create %s %s",
-                        getRiakAdminCmd(),
-                        bucketTypeName,
-                        escapeLiteralForDoubleQuotedBash(bucketTypeProperties))));
-        if(!isRiakOnPath()) {
-            addRiakOnPath(bucketTypeCreateScript);
-        }
-        bucketTypeCreateScript.body.append(sudo(format("%s bucket-type activate %s", getRiakAdminCmd(), bucketTypeName)))
-                .failOnNonZeroResultCode();
-
-        bucketTypeCreateScript.execute();
-    }
-
-    @Override
-    public List<String> bucketTypeList() {
-        ScriptHelper bucketTypeListScript = newScript("bucket-types_list")
-                .body.append(sudo(format("%s bucket-type list", getRiakAdminCmd())))
-                .gatherOutput()
-                .noExtraOutput()
-                .failOnNonZeroResultCode();
-        if (!isRiakOnPath()) {
-            addRiakOnPath(bucketTypeListScript);
-        }
-        bucketTypeListScript.execute();
-        String stdout = bucketTypeListScript.getResultStdout();
-        return Arrays.asList(stdout.split("[\\r\\n]+"));
-    }
-
-    @Override
-    public List<String> bucketTypeStatus(String bucketTypeName) {
-        ScriptHelper bucketTypeStatusScript = newScript("bucket-type_status")
-                .body.append(sudo(format("%s bucket-type status %s", getRiakAdminCmd(), bucketTypeName)))
-                .gatherOutput()
-                .noExtraOutput()
-                .failOnNonZeroResultCode();
-        if (!isRiakOnPath()) {
-            addRiakOnPath(bucketTypeStatusScript);
-        }
-        bucketTypeStatusScript.execute();
-        String stdout = bucketTypeStatusScript.getResultStdout();
-        return Arrays.asList(stdout.split("[\\r\\n]+"));
-    }
-
-    @Override
-    public void bucketTypeUpdate(String bucketTypeName, String bucketTypeProperties) {
-        ScriptHelper bucketTypeStatusScript = newScript("bucket-type_update")
-                .body.append(sudo(format("%s bucket-type update %s %s",
-                        getRiakAdminCmd(),
-                        bucketTypeName,
-                        escapeLiteralForDoubleQuotedBash(bucketTypeProperties))))
-                .failOnNonZeroResultCode();
-        if (!isRiakOnPath()) {
-            addRiakOnPath(bucketTypeStatusScript);
-        }
-        bucketTypeStatusScript.execute();
-    }
-
-    @Override
-    public void bucketTypeActivate(String bucketTypeName) {
-        ScriptHelper bucketTypeStatusScript = newScript("bucket-type_activate")
-                .body.append(sudo(format("%s bucket-type activate %s", getRiakAdminCmd(), bucketTypeName)))
-                .failOnNonZeroResultCode();
-        if (!isRiakOnPath()) {
-            addRiakOnPath(bucketTypeStatusScript);
-        }
-        bucketTypeStatusScript.execute();
-    }
-
-    @Override
-    public void recoverFailedNode(String nodeName) {
-        //TODO find ways to detect a faulty/failed node
-        //argument passed 'node' is any working node in the riak cluster
-        //following the instruction from: http://docs.basho.com/riak/latest/ops/running/recovery/failed-node/
-
-        if (hasJoinedCluster()) {
-            String failedNodeName = getRiakName();
-
-
-            String stopCommand = format("%s stop", getRiakCmd());
-            stopCommand = isPackageInstall() ? sudo(stopCommand) : stopCommand;
-
-            String startCommand = format("%s start > /dev/null 2>&1 < /dev/null &", getRiakCmd());
-            startCommand = isPackageInstall() ? sudo(startCommand) : startCommand;
-
-            ScriptHelper recoverNodeScript = newScript("recoverNode")
-                    .body.append(stopCommand)
-                    .body.append(format("%s down %s", getRiakAdminCmd(), failedNodeName))
-                    .body.append(sudo(format("rm -rf %s", getRingStateDir())))
-                    .body.append(startCommand)
-                    .body.append(sudo(format("%s cluster join %s", getRiakAdminCmd(), nodeName)))
-                    .body.append(sudo(format("%s cluster plan", getRiakAdminCmd())))
-                    .body.append(sudo(format("%s cluster commit", getRiakAdminCmd())))
-                    .failOnNonZeroResultCode();
-
-            if (!isRiakOnPath()) {
-                addRiakOnPath(recoverNodeScript);
-            }
-
-            recoverNodeScript.execute();
-
-        } else {
-            log.warn("entity {}: is not in the riak cluster", entity.getId());
-        }
-    }
-
-    @Override
-    public void setup() {
-        if(entity.getConfig(RiakNode.SEARCH_ENABLED)) {
-            // JavaSoftwareProcessSshDriver.setup() is called in order to install java
-            super.setup();
-        }
-    }
-
-    private Boolean hasJoinedCluster() {
-        return Boolean.TRUE.equals(entity.getAttribute(RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER));
-    }
-
-    protected void checkRiakOnPath() {
-        boolean riakOnPath = newScript("riakOnPath")
-                .body.append("which riak")
-                .execute() == 0;
-        entity.setAttribute(RiakNode.RIAK_ON_PATH, riakOnPath);
-    }
-
-    private String getRiakName() {
-        return entity.getAttribute(RiakNode.RIAK_NODE_NAME);
-    }
-
-    private String getRingStateDir() {
-        //TODO: check for non-package install.
-        return isPackageInstall() ? "/var/lib/riak/ring" : Urls.mergePaths(getExpandedInstallDir(), "lib/ring");
-    }
-
-    protected boolean isVersion1() {
-        return getVersion().startsWith("1.");
-    }
-
-    @Override
-    public String getOsMajorVersion() {
-        OsDetails osDetails = getMachine().getMachineDetails().getOsDetails();
-        String osVersion = osDetails.getVersion();
-        return osVersion.contains(".") ? osVersion.substring(0, osVersion.indexOf(".")) : osVersion;
-    }
-
-    private void addRiakOnPath(ScriptHelper scriptHelper) {
-        Map<String, String> newPathVariable = ImmutableMap.of("PATH", sbinPath);
-//        log.warn("riak command not found on PATH. Altering future commands' environment variables from {} to {}", getShellEnvironment(), newPathVariable);
-        scriptHelper.environmentVariablesReset(newPathVariable);
-    }
-
-    public Integer maxOpenFiles() {
-        return entity.getConfig(RiakNode.RIAK_MAX_OPEN_FILES);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServer.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServer.java b/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServer.java
deleted file mode 100644
index fc8d28e..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServer.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import java.util.Map;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.basic.BrooklynConfigKeys;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.java.UsesJava;
-import brooklyn.entity.java.UsesJavaMXBeans;
-import brooklyn.entity.java.UsesJmx;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.location.basic.PortRanges;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.time.Duration;
-
-import com.google.common.collect.Maps;
-import com.google.common.reflect.TypeToken;
-
-/**
- * An {@link brooklyn.entity.Entity} that represents a Solr node.
- */
-@Catalog(name="Apache Solr Node", description="Solr is the popular, blazing fast open source enterprise search " +
-        "platform from the Apache Lucene project.", iconUrl="classpath:///solr-logo.jpeg")
-@ImplementedBy(SolrServerImpl.class)
-public interface SolrServer extends SoftwareProcess, UsesJava, UsesJmx, UsesJavaMXBeans {
-
-    @SetFromFlag("version")
-    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "4.7.2");
-
-    @SetFromFlag("downloadUrl")
-    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
-            SoftwareProcess.DOWNLOAD_URL, "${driver.mirrorUrl}/${version}/solr-${version}.tgz");
-
-    /** download mirror, if desired */
-    @SetFromFlag("mirrorUrl")
-    ConfigKey<String> MIRROR_URL = ConfigKeys.newStringConfigKey("solr.install.mirror.url", "URL of mirror",
-            "http://mirrors.ukfast.co.uk/sites/ftp.apache.org/lucene/solr/");
-
-    @SetFromFlag("solrPort")
-    PortAttributeSensorAndConfigKey SOLR_PORT = new PortAttributeSensorAndConfigKey("solr.http.port", "Solr HTTP communications port",
-            PortRanges.fromString("8983+"));
-
-    @SetFromFlag("solrConfigTemplateUrl")
-    ConfigKey<String> SOLR_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
-            "solr.config.templateUrl", "Template file (in freemarker format) for the solr.xml config file", 
-            "classpath://brooklyn/entity/nosql/solr/solr.xml");
-
-    @SetFromFlag("coreConfigMap")
-    ConfigKey<Map<String, String>> SOLR_CORE_CONFIG = ConfigKeys.newConfigKey(new TypeToken<Map<String, String>>() { },
-            "solr.core.config", "Map of core names to core configuration archive URL",
-            Maps.<String, String>newHashMap());
-
-    ConfigKey<Duration> START_TIMEOUT = ConfigKeys.newConfigKeyWithDefault(BrooklynConfigKeys.START_TIMEOUT, Duration.FIVE_MINUTES);
-
-    /* Accessors used from template */
-
-    Integer getSolrPort();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerDriver.java
deleted file mode 100644
index dd44499..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerDriver.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import brooklyn.entity.basic.SoftwareProcessDriver;
-import brooklyn.entity.java.JavaSoftwareProcessDriver;
-
-public interface SolrServerDriver extends JavaSoftwareProcessDriver {
-
-    Integer getSolrPort();
-
-    String getSolrConfigTemplateUrl();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerImpl.java
deleted file mode 100644
index 3d32a93..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerImpl.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.SoftwareProcessImpl;
-import brooklyn.event.feed.http.HttpFeed;
-import brooklyn.event.feed.http.HttpPollConfig;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.location.access.BrooklynAccessUtils;
-import com.google.common.base.Functions;
-import com.google.common.net.HostAndPort;
-
-import java.net.URI;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Implementation of {@link SolrServer}.
- */
-public class SolrServerImpl extends SoftwareProcessImpl implements SolrServer {
-
-    @Override
-    public Integer getSolrPort() {
-        return getAttribute(SolrServer.SOLR_PORT);
-    }
-
-    @Override
-    public Class<SolrServerDriver> getDriverInterface() {
-        return SolrServerDriver.class;
-    }
-
-    private volatile HttpFeed httpFeed;
-
-    @Override 
-    protected void connectSensors() {
-        super.connectSensors();
-
-        HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getSolrPort());
-
-        String solrUri = String.format("http://%s:%d/solr", hp.getHostText(), hp.getPort());
-        setAttribute(Attributes.MAIN_URI, URI.create(solrUri));
-
-        httpFeed = HttpFeed.builder()
-                .entity(this)
-                .period(500, TimeUnit.MILLISECONDS)
-                .baseUri(solrUri)
-                .poll(new HttpPollConfig<Boolean>(SERVICE_UP)
-                        .onSuccess(HttpValueFunctions.responseCodeEquals(200))
-                        .onFailureOrException(Functions.constant(false)))
-                .build();
-    }
-
-    @Override
-    public void disconnectSensors() {
-        super.disconnectSensors();
-
-        if (httpFeed != null) httpFeed.stop();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerSshDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerSshDriver.java b/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerSshDriver.java
deleted file mode 100644
index 2174b36..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/solr/SolrServerSshDriver.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import static java.lang.String.format;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import brooklyn.entity.java.JavaSoftwareProcessSshDriver;
-import brooklyn.entity.java.UsesJmx;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.basic.AbstractSoftwareProcessSshDriver;
-import brooklyn.entity.basic.Entities;
-import brooklyn.location.Location;
-import brooklyn.location.basic.SshMachineLocation;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.file.ArchiveUtils;
-import brooklyn.util.net.Networking;
-import brooklyn.util.net.Urls;
-import brooklyn.util.os.Os;
-import brooklyn.util.ssh.BashCommands;
-import brooklyn.util.stream.Streams;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
-/**
- * Start a {@link SolrServer} in a {@link Location} accessible over ssh.
- */
-public class SolrServerSshDriver extends JavaSoftwareProcessSshDriver implements SolrServerDriver {
-
-    private static final Logger log = LoggerFactory.getLogger(SolrServerSshDriver.class);
-
-    public SolrServerSshDriver(SolrServerImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public Integer getSolrPort() { return entity.getAttribute(SolrServer.SOLR_PORT); }
-
-    @Override
-    public String getSolrConfigTemplateUrl() { return entity.getConfig(SolrServer.SOLR_CONFIG_TEMPLATE_URL); }
-
-    public String getMirrorUrl() { return entity.getConfig(SolrServer.MIRROR_URL); }
-
-    public String getPidFile() { return Os.mergePaths(getRunDir(), "solr.pid"); }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("solr-%s", getVersion()))));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        List<String> commands = ImmutableList.<String>builder()
-                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
-                .add(BashCommands.INSTALL_TAR)
-                .add("tar xzfv " + saveAs)
-                .build();
-
-        newScript(INSTALLING)
-                .failOnNonZeroResultCode()
-                .body.append(commands)
-                .execute();
-    }
-
-    public Set<Integer> getPortsUsed() {
-        Set<Integer> result = Sets.newLinkedHashSet(super.getPortsUsed());
-        result.addAll(getPortMap().values());
-        return result;
-    }
-
-    private Map<String, Integer> getPortMap() {
-        return ImmutableMap.<String, Integer>builder()
-                .put("solrPort", getSolrPort())
-                .put("jmxPort", entity.getAttribute(UsesJmx.JMX_PORT))
-                .put("rmiPort", entity.getAttribute(UsesJmx.RMI_REGISTRY_PORT))
-                .build();
-    }
-
-    @Override
-    public void customize() {
-        log.debug("Customizing {}", entity);
-        Networking.checkPortsValid(getPortMap());
-
-        ImmutableList.Builder<String> commands = new ImmutableList.Builder<String>()
-                .add("mkdir contrib")
-                .add("mkdir solr")
-                .add(String.format("cp -R %s/example/{etc,contexts,lib,logs,resources,webapps} .", getExpandedInstallDir()))
-                .add(String.format("cp %s/example/start.jar .", getExpandedInstallDir()))
-                .add(String.format("cp %s/dist/*.jar lib/", getExpandedInstallDir()))
-                .add(String.format("cp %s/contrib/*/lib/*.jar contrib/", getExpandedInstallDir()));
-
-        newScript(CUSTOMIZING)
-                .body.append(commands.build())
-                .execute();
-
-        // Copy the solr.xml configuration file across
-        String configFileContents = processTemplate(getSolrConfigTemplateUrl());
-        String destinationConfigFile = String.format("%s/solr/solr.xml", getRunDir());
-        getMachine().copyTo(Streams.newInputStreamWithContents(configFileContents), destinationConfigFile);
-
-        // Copy the core definitions across
-        Map<String, String> coreConfig = entity.getConfig(SolrServer.SOLR_CORE_CONFIG);
-        for (String core : coreConfig.keySet()) {
-            String url = coreConfig.get(core);
-            String solr = Urls.mergePaths(getRunDir(), "solr");
-            ArchiveUtils.deploy(url, getMachine(), solr);
-        }
-    }
-
-    @Override
-    public void launch() {
-        newScript(MutableMap.of(USE_PID_FILE, getPidFile()), LAUNCHING)
-                .body.append("nohup java $JAVA_OPTS -jar start.jar > ./logs/console.log 2>&1 &")
-                .execute();
-    }
-
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of(USE_PID_FILE, getPidFile()), CHECK_RUNNING).execute() == 0;
-    }
-
-    @Override
-    public void stop() {
-        newScript(MutableMap.of(USE_PID_FILE, getPidFile()), STOPPING).execute();
-    }
-
-    @Override
-    protected String getLogFileLocation() {
-        return Urls.mergePaths(getRunDir(), "solr", "logs", "solr.log");
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraCluster.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraCluster.java
new file mode 100644
index 0000000..890ab60
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraCluster.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import brooklyn.entity.proxying.ImplementedBy;
+
+/**
+ * @deprecated since 0.7.0; use {@link CassandraDatacenter} which is equivalent but has
+ * a less ambiguous name; <em>Cluster</em> in Cassandra corresponds to what Brooklyn terms a <em>Fabric</em>.
+ */
+@Deprecated
+@ImplementedBy(CassandraClusterImpl.class)
+public interface CassandraCluster extends CassandraDatacenter {
+}



[05/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java
deleted file mode 100644
index 948a5c4..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertFalse;
-import static org.testng.Assert.assertNotEquals;
-import static org.testng.Assert.assertNotNull;
-
-import java.util.Collection;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.entity.Entity;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.Asserts;
-import brooklyn.util.time.Duration;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.mongodb.DBObject;
-
-public class MongoDBReplicaSetIntegrationTest extends BrooklynAppLiveTestSupport {
-
-    @SuppressWarnings("unused")
-    private static final Logger log = LoggerFactory.getLogger(MongoDBReplicaSetIntegrationTest.class);
-    
-    private Collection<LocalhostMachineProvisioningLocation> locs;
-
-    // Replica sets can take a while to start
-    private static final Duration TIMEOUT = Duration.of(3, TimeUnit.MINUTES);
-
-    @BeforeMethod(alwaysRun=true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        locs = ImmutableList.of(app.newLocalhostProvisioningLocation());
-    }
-
-    /**
-     * Creates and starts a replica set, asserts it reaches the given size
-     * and that the primary and secondaries are non-null.
-     */
-    private MongoDBReplicaSet makeAndStartReplicaSet(final Integer size, final String testDescription) {
-        // Sets secondaryPreferred so we can read from slaves.
-        final MongoDBReplicaSet replicaSet = app.createAndManageChild(EntitySpec.create(MongoDBReplicaSet.class)
-                .configure(DynamicCluster.INITIAL_SIZE, size)
-                .configure("replicaSetName", "test-rs-"+testDescription)
-                .configure("memberSpec", EntitySpec.create(MongoDBServer.class)
-                        .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf")
-                        .configure("port", "27017+")));
-        app.start(locs);
-
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize(), size);
-                assertNotNull(replicaSet.getPrimary(), "replica set has no primary");
-                assertEquals(replicaSet.getPrimary().getReplicaSet().getName(), "test-rs-"+testDescription+replicaSet.getId());
-                assertEquals(replicaSet.getSecondaries().size(), size-1);
-            }
-        });
-        return replicaSet;
-    }
-
-    @Test(groups = "Integration")
-    public void testCanStartAndStopAReplicaSet() {
-        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "can-start-and-stop");
-        replicaSet.stop();
-        assertFalse(replicaSet.getAttribute(Startable.SERVICE_UP));
-    }
-
-    @Test(groups = "Integration")
-    public void testWriteToMasterAndReadFromSecondary() {
-        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "master-write-secondary-read");
-
-        // Test we can read a document written to the primary from all secondaries
-        final String documentId = MongoDBTestHelper.insert(replicaSet.getPrimary(), "meaning-of-life", 42);
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
-                for (MongoDBServer secondary : replicaSet.getSecondaries()) {
-                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
-                    assertEquals(docOut.get("meaning-of-life"), 42);
-                }
-            }
-        });
-    }
-
-    @Test(groups = "Integration")
-    public void testCanResizeAndReadFromNewInstances() {
-        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "resize-and-read-from-secondaries");
-
-        // Test we can a document written to the primary from all secondaries
-        final String documentId = MongoDBTestHelper.insert(replicaSet.getPrimary(), "meaning-of-life", 42);
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
-                for (MongoDBServer secondary : replicaSet.getSecondaries()) {
-                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
-                    assertEquals(docOut.get("meaning-of-life"), 42);
-                }
-            }
-        });
-
-        // Resize and confirm new members get data
-        replicaSet.resize(5);
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 5);
-                Collection<MongoDBServer> secondaries = replicaSet.getSecondaries();
-                assertEquals(secondaries.size(), 4);
-                for (MongoDBServer secondary : secondaries) {
-                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
-                    assertEquals(docOut.get("meaning-of-life"), 42);
-                }
-            }
-        });
-
-    }
-
-    @Test(groups = "Integration")
-    public void testResizeToEvenNumberOfMembers() {
-        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "resize-even-ignored");
-        assertEquals(replicaSet.getCurrentSize().intValue(), 3);
-        replicaSet.resize(4);
-        Asserts.succeedsEventually(new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 4);
-            }
-        });
-    }
-
-    /**
-     * Test replacing the primary succeeds. More interesting than replacing a secondary
-     * because the removal of a primary must happen _through_ the primary. The flow is:
-     *  - Brooklyn removes the server from the set and stops it
-     *  - The remaining members of the set elect a new primary
-     *  - We remove the original primary from the new primary.
-     */
-    @Test(groups = "Integration")
-    public void testReplacePrimary() {
-        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "replace-primary");
-        final MongoDBServer replaced = replicaSet.getPrimary();
-        replicaSet.replaceMember(replaced.getId());
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
-                for (Entity member : replicaSet.getMembers()) {
-                    assertNotEquals(member.getId(), replaced.getId());
-                }
-                assertNotNull(replicaSet.getPrimary());
-                assertNotEquals(replicaSet.getPrimary().getId(), replaced.getId(), "Expected a new primary to have been elected");
-            }
-        });
-    }
-
-    @Test(groups = "Integration")
-    public void testRemovePrimary() {
-        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "remove-primary");
-        final MongoDBServer removed = replicaSet.getPrimary();
-
-        replicaSet.removeMember(removed);
-        removed.stop();
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            @Override
-            public void run() {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 2);
-                for (Entity member : replicaSet.getMembers()) {
-                    assertNotEquals(member.getId(), removed.getId());
-                }
-                assertNotNull(replicaSet.getPrimary());
-                assertNotEquals(replicaSet.getPrimary().getId(), removed.getId(), "Expected a new primary to have been elected");
-            }
-        });
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java
deleted file mode 100644
index 3d21055..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.AbstractSoftwareProcessRestartIntegrationTest;
-import brooklyn.entity.basic.SoftwareProcess;
-import brooklyn.entity.proxying.EntitySpec;
-
-/**
- * Tests restart of the software *process* (as opposed to the VM).
- */
-@Test(groups="Integration")
-public class MongoDBRestartIntegrationTest extends AbstractSoftwareProcessRestartIntegrationTest {
-    
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBRestartIntegrationTest.class);
-
-    @Override
-    protected EntitySpec<? extends SoftwareProcess> newEntitySpec() {
-        return EntitySpec.create(MongoDBServer.class);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java
deleted file mode 100644
index 84620d7..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static org.testng.Assert.assertEquals;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import com.google.common.collect.ImmutableList;
-import com.mongodb.DBObject;
-
-import brooklyn.entity.AbstractSoftlayerLiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-public class MongoDBSoftLayerLiveTest extends AbstractSoftlayerLiveTest {
-
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBSoftLayerLiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
-                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, MongoDBServer.SERVICE_UP, true);
-
-        String id = MongoDBTestHelper.insert(entity, "hello", "world!");
-        DBObject docOut = MongoDBTestHelper.getById(entity, id);
-        assertEquals(docOut.get("hello"), "world!");
-    }
-
-    @Test(enabled=false)
-    public void testDummy() {} // Convince TestNG IDE integration that this really does have test methods
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java
deleted file mode 100644
index a5cf79d..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBTestHelper.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import java.net.UnknownHostException;
-import java.util.List;
-import java.util.Map;
-
-import org.bson.types.ObjectId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Throwables;
-import com.google.common.net.HostAndPort;
-import com.mongodb.BasicDBObject;
-import com.mongodb.CommandResult;
-import com.mongodb.DB;
-import com.mongodb.DBCollection;
-import com.mongodb.DBObject;
-import com.mongodb.MongoClient;
-import com.mongodb.ReadPreference;
-
-import brooklyn.location.access.BrooklynAccessUtils;
-
-public class MongoDBTestHelper {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBTestHelper.class);
-
-    private static final String TEST_DB = "brooklyn_test";
-    private static final String TEST_COLLECTION = "test_collection";
-    private static final String ADMIN_DB = "admin";
-
-    /**
-     * Inserts a new object with { key: value } at given server.
-     * @return The new document's id
-     */
-    public static String insert(AbstractMongoDBServer entity, String key, Object value) {
-        LOG.info("Inserting {}:{} at {}", new Object[]{key, value, entity});
-        MongoClient mongoClient = clientForServer(entity);
-        try {
-            DB db = mongoClient.getDB(TEST_DB);
-            DBCollection testCollection = db.getCollection(TEST_COLLECTION);
-            BasicDBObject doc = new BasicDBObject(key, value);
-            testCollection.insert(doc);
-            ObjectId id = (ObjectId) doc.get("_id");
-            return id.toString();
-        } finally {
-            mongoClient.close();
-        }
-    }
-
-    /** @return The {@link DBObject} representing the object with the given id */
-    public static DBObject getById(AbstractMongoDBServer entity, String id) {
-        LOG.info("Getting {} from {}", new Object[]{id, entity});
-        MongoClient mongoClient = clientForServer(entity);
-        // Secondary preferred means the driver will let us read from secondaries too.
-        mongoClient.setReadPreference(ReadPreference.secondaryPreferred());
-        try {
-            DB db = mongoClient.getDB(TEST_DB);
-            DBCollection testCollection = db.getCollection(TEST_COLLECTION);
-            return testCollection.findOne(new BasicDBObject("_id", new ObjectId(id)));
-        } finally {
-            mongoClient.close();
-        }
-    }
-    
-    public static List<String> getDatabaseNames(AbstractMongoDBServer entity) {
-        LOG.info("Getting database names from {}", entity);
-        MongoClient mongoClient = clientForServer(entity);
-        try {
-            return mongoClient.getDatabaseNames();
-        } finally {
-            mongoClient.close();
-        }
-    }
-    
-    public static boolean isConfigServer(AbstractMongoDBServer entity) {
-        LOG.info("Checking if {} is a config server", entity);
-        MongoClient mongoClient = clientForServer(entity);
-        try {
-            DB db = mongoClient.getDB(ADMIN_DB);
-            CommandResult commandResult = db.command("getCmdLineOpts");
-            Map<?, ?> parsedArgs = (Map<?, ?>)commandResult.get("parsed");
-            if (parsedArgs == null) return false;
-            Boolean configServer = (Boolean)parsedArgs.get("configsvr");
-            if (configServer != null) {
-                // v2.5 format
-                return Boolean.TRUE.equals(configServer);
-            } else {
-                // v2.6 format
-                String role = (String) ((Map)parsedArgs.get("sharding")).get("clusterRole");
-                return "configsvr".equals(role);
-            }
-        } finally {
-            mongoClient.close();
-        }
-    }
-
-    private static MongoClient clientForServer(AbstractMongoDBServer server) {
-        try {
-            HostAndPort hap = BrooklynAccessUtils.getBrooklynAccessibleAddress(server, server.getAttribute(MongoDBServer.PORT));
-            return new MongoClient(hap.getHostText(), hap.getPort());
-        } catch (UnknownHostException e) {
-            // Fail whatever test called this method.
-            throw Throwables.propagate(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java
deleted file mode 100644
index a35c893..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/ReplicaSetConfigTest.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertNotEquals;
-import static org.testng.Assert.assertTrue;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import javax.annotation.Nullable;
-
-import org.bson.BSONObject;
-import org.bson.BasicBSONObject;
-import org.bson.types.BasicBSONList;
-import org.testng.annotations.Test;
-
-import com.google.common.base.Function;
-import com.google.common.base.Predicate;
-import com.google.common.base.Predicates;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import com.google.common.net.HostAndPort;
-
-public class ReplicaSetConfigTest {
-
-    // true if object has key "votes" that is > 1
-    static Predicate<BasicBSONObject> IS_VOTING_MEMBER = new Predicate<BasicBSONObject>() {
-        @Override public boolean apply(@Nullable BasicBSONObject input) {
-            return input != null && input.containsField("votes") && input.getInt("votes") > 0;
-        }
-    };
-
-    private BasicBSONObject makeSetMember(Integer id, String host) {
-        return new BasicBSONObject(ImmutableMap.of("_id", id, "host", host));
-    }
-
-    private BasicBSONObject makeSetConfig(String id, Integer version, BasicBSONObject... members) {
-        BasicBSONList memberList = new BasicBSONList();
-        memberList.addAll(Arrays.asList(members));
-        return new BasicBSONObject(ImmutableMap.of("_id", id, "version", version, "members", memberList));
-    }
-
-    private BasicBSONObject makeSetWithNMembers(int n) {
-        ReplicaSetConfig setConfig = ReplicaSetConfig.builder("replica-set-name");
-        for (int i = 0; i < n; i++) {
-            setConfig.member("host-"+i, i, i);
-        }
-        return setConfig.build();
-    }
-
-    private Collection<HostAndPort> votingMembersOfSet(BasicBSONObject config) {
-        BasicBSONList membersObject = BasicBSONList.class.cast(config.get("members"));
-        List<BasicBSONObject> members = Lists.newArrayList();
-        for (Object object : membersObject) members.add(BasicBSONObject.class.cast(object));
-        return FluentIterable.from(members)
-                .filter(IS_VOTING_MEMBER)
-                .transform(new Function<BasicBSONObject, HostAndPort>() {
-                    @Override public HostAndPort apply(BasicBSONObject input) {
-                        return HostAndPort.fromString(input.getString("host"));
-                    }
-                })
-                .toList();
-    }
-
-    private Collection<HostAndPort> nonVotingMembersOfSet(BasicBSONObject config) {
-        BasicBSONList membersObject = BasicBSONList.class.cast(config.get("members"));
-        List<BasicBSONObject> members = Lists.newArrayList();
-        for (Object object : membersObject) members.add(BasicBSONObject.class.cast(object));
-        return FluentIterable
-                .from(members)
-                .filter(Predicates.not(IS_VOTING_MEMBER))
-                .transform(new Function<BasicBSONObject, HostAndPort>() {
-                    @Override public HostAndPort apply(BasicBSONObject input) {
-                        return HostAndPort.fromString(input.getString("host"));
-                    }
-                })
-                .toList();
-    }
-
-    @Test
-    public void testCreateFromScratch() {
-        BasicBSONObject config = ReplicaSetConfig.builder("rs")
-            .member("host-a", 12345, 1)
-            .member("host-b", 54321, 2)
-            .build();
-        assertEquals(config.get("_id"), "rs");
-        assertEquals(config.getInt("version"), 1);
-        assertTrue(config.get("members") instanceof BasicBSONList);
-        BasicBSONList members = (BasicBSONList) config.get("members");
-        assertEquals(members.size(), 2);
-    }
-
-    @Test
-    public void testCreateFromExistingConfig() {
-        // Replica set of one member
-        int version = 44;
-        BasicBSONObject config = makeSetConfig("replica-set-name", version, makeSetMember(33, "example.com:7777"));
-
-        // Use existing set to add two more members
-        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(config)
-            .member("foo", 8888, 34)
-            .member("bar", 9999, 35)
-            .build();
-
-        assertEquals(newConfig.get("_id"), "replica-set-name");
-        assertEquals(newConfig.get("version"), version + 1);
-        BasicBSONList members = (BasicBSONList) newConfig.get("members");
-        assertEquals(members.size(), 3);
-
-        BSONObject original = (BSONObject) members.get(0);
-        assertEquals(original.get("_id"), 33);
-        assertEquals(original.get("host"), "example.com:7777");
-
-        BSONObject second = (BSONObject) members.get(1);
-        assertEquals(second.get("_id"), 34);
-        assertEquals(second.get("host"), "foo:8888");
-
-        BSONObject third = (BSONObject) members.get(2);
-        assertEquals(third.get("_id"), 35);
-        assertEquals(third.get("host"), "bar:9999");
-    }
-
-    @Test
-    public void testRemoveMember() {
-        int version = 44;
-        BasicBSONObject config = makeSetConfig("replica-set-name", version,
-                makeSetMember(33, "example.com:7777"),
-                makeSetMember(34, "example.com:7778"));
-
-        // Use existing set to add two more members
-        BasicBSONObject newConfig = ReplicaSetConfig.fromExistingConfig(config)
-            .remove("example.com", 7777)
-            .build();
-
-        assertEquals(newConfig.get("version"), version + 1);
-        BasicBSONList members = (BasicBSONList) newConfig.get("members");
-        assertEquals(members.size(), 1);
-        assertEquals(BSONObject.class.cast(members.get(0)).get("host"), "example.com:7778");
-
-        newConfig = ReplicaSetConfig.fromExistingConfig(newConfig)
-            .remove("example.com", 7778)
-            .build();
-
-        members = (BasicBSONList) newConfig.get("members");
-        assertTrue(members.isEmpty());
-    }
-
-    @Test
-    public void testRemoveNonExistentMemberHasNoEffect() {
-        BasicBSONObject config = makeSetConfig("replica-set-name", 1,
-                makeSetMember(33, "example.com:7777"),
-                makeSetMember(34, "example.com:7778"));
-
-        BasicBSONList members = (BasicBSONList) config.get("members");
-        assertEquals(members.size(), 2);
-
-        BasicBSONObject altered = ReplicaSetConfig.fromExistingConfig(config)
-                .remove("foo", 99)
-                .build();
-
-        members = (BasicBSONList) altered.get("members");
-        assertEquals(members.size(), 2);
-    }
-
-    @Test
-    public void testSetOfFourMembersHasThreeVoters() {
-        BasicBSONObject config = makeSetWithNMembers(4);
-        assertEquals(votingMembersOfSet(config).size(), 3, "Expected three voters in set with four members");
-        assertEquals(nonVotingMembersOfSet(config).size(), 1, "Expected one non-voter in set with four members");
-    }
-
-    @Test
-    public void testFourthServerOfFourIsGivenVoteWhenAnotherServerIsRemoved() {
-        BasicBSONObject config = makeSetWithNMembers(4);
-        HostAndPort toRemove = votingMembersOfSet(config).iterator().next();
-
-        BasicBSONObject updated = ReplicaSetConfig.fromExistingConfig(config)
-                .remove(toRemove)
-                .build();
-
-        assertEquals(votingMembersOfSet(updated).size(), 3);
-        assertTrue(nonVotingMembersOfSet(updated).isEmpty());
-
-        BasicBSONList newMembers = BasicBSONList.class.cast(updated.get("members"));
-        for (Object object : newMembers) {
-            BasicBSONObject member = BasicBSONObject.class.cast(object);
-            HostAndPort memberHostAndPort = HostAndPort.fromString(member.getString("host"));
-            assertNotEquals(memberHostAndPort, toRemove);
-        }
-    }
-
-    @Test
-    public void testMaximumNumberOfVotersIsLimited() {
-        BasicBSONObject config = makeSetWithNMembers(ReplicaSetConfig.MAXIMUM_REPLICA_SET_SIZE);
-        int voters = ReplicaSetConfig.MAXIMUM_VOTING_MEMBERS;
-        int nonVoters = ReplicaSetConfig.MAXIMUM_REPLICA_SET_SIZE - voters;
-        assertEquals(votingMembersOfSet(config).size(), voters, "Expected number of voters in max-size set to be " + voters);
-        assertEquals(nonVotingMembersOfSet(config).size(), nonVoters, "Expected number of non-voters in max-size set to be " + nonVoters);
-    }
-
-    @Test(expectedExceptions = IllegalStateException.class)
-    public void testMoreMembersThanMaximumAllowsRejected() {
-        makeSetWithNMembers(ReplicaSetConfig.MAXIMUM_REPLICA_SET_SIZE + 1);
-    }
-
-    @Test
-    public void testPrimaryGivenVoteWhenLastInMemberList() {
-        BasicBSONObject config = ReplicaSetConfig.builder("rs")
-            .member("host-a", 1, 1)
-            .member("host-b", 2, 2)
-            .member("host-c", 3, 3)
-            .member("host-d", 4, 4)
-            .primary(HostAndPort.fromParts("host-d", 4))
-            .build();
-        assertEquals(votingMembersOfSet(config).size(), 3);
-        assertEquals(nonVotingMembersOfSet(config).size(), 1);
-        assertTrue(votingMembersOfSet(config).contains(HostAndPort.fromParts("host-d", 4)));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java
deleted file mode 100644
index 4aa8f69..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBConfigServerIntegrationTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import static org.testng.Assert.assertFalse;
-
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
-import brooklyn.entity.nosql.mongodb.MongoDBTestHelper;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.Asserts;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-
-import com.google.common.collect.ImmutableList;
-
-public class MongoDBConfigServerIntegrationTest {
-    private TestApplication app;
-    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        localhostProvisioningLocation = new LocalhostMachineProvisioningLocation();
-        app = ApplicationBuilder.newManagedApp(TestApplication.class);
-    }
-
-    @AfterMethod(alwaysRun=true)
-    public void tearDown() throws Exception {
-        if (app != null) Entities.destroyAll(app.getManagementContext());
-    }
-    
-    @Test(groups = "Integration")
-    public void testCanStartAndStop() throws Exception {
-        MongoDBConfigServer entity = app.createAndManageChild(EntitySpec.create(MongoDBConfigServer.class)
-                .configure(MongoDBServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-configserver.conf"));
-        app.start(ImmutableList.of(localhostProvisioningLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
-        Asserts.assertTrue(MongoDBTestHelper.isConfigServer(entity), "Server is not a config server");
-        entity.stop();
-        assertFalse(entity.getAttribute(Startable.SERVICE_UP));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java
deleted file mode 100644
index e85a3ef..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentEc2LiveTest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import groovy.time.TimeDuration;
-
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.Asserts;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-/**
- * NOTE: These test will provision 9 machines in AWS, which can cause 'Request limit exceeded' and
- * 'Exhausted available authentication methods' exceptions, depending upon current AWS load. You can
- * mitigate this issue by adding the following lines to your brooklyn.properties:
- *
- * brooklyn.location.jclouds.machineCreateAttempts=3
- * brooklyn.jclouds.aws-ec2.maxConcurrentMachineCreations=5
- */
-@Test
-public class MongoDBShardedDeploymentEc2LiveTest extends AbstractEc2LiveTest {
-
-    private static final Integer ROUTER_CLUSTER_SIZE = 2;
-    private static final Integer REPLICASET_SIZE = 2;
-    private static final Integer SHARD_CLUSTER_SIZE = 3;
-    private static final TimeDuration TIMEOUT = new TimeDuration(0, 3, 0, 0);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        final MongoDBShardedDeployment deployment = app.createAndManageChild(EntitySpec.create(MongoDBShardedDeployment.class)
-                .configure(MongoDBShardedDeployment.INITIAL_ROUTER_CLUSTER_SIZE, ROUTER_CLUSTER_SIZE)
-                .configure(MongoDBShardedDeployment.SHARD_REPLICASET_SIZE, REPLICASET_SIZE)
-                .configure(MongoDBShardedDeployment.INITIAL_SHARD_CLUSTER_SIZE, SHARD_CLUSTER_SIZE)
-                .configure(MongoDBShardedDeployment.MONGODB_REPLICA_SET_SPEC, EntitySpec.create(MongoDBReplicaSet.class)
-                        .configure(MongoDBServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb.conf")
-                        .configure(MongoDBReplicaSet.MEMBER_SPEC, EntitySpec.create(MongoDBServer.class)))
-                .configure(MongoDBShardedDeployment.MONGODB_ROUTER_SPEC, EntitySpec.create(MongoDBRouter.class)
-                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-router.conf"))
-                .configure(MongoDBShardedDeployment.MONGODB_CONFIG_SERVER_SPEC, EntitySpec.create(MongoDBConfigServer.class)
-                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-configserver.conf")));
-
-        app.start(ImmutableList.of(loc));
-        
-        Entities.dumpInfo(app);
-
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
-            public void run() {
-                Assert.assertEquals(deployment.getRouterCluster().getCurrentSize(), ROUTER_CLUSTER_SIZE);
-                Assert.assertEquals(deployment.getShardCluster().getCurrentSize(), SHARD_CLUSTER_SIZE);
-                Assert.assertEquals(deployment.getConfigCluster().getCurrentSize(), MongoDBShardedDeployment.CONFIG_CLUSTER_SIZE.getDefaultValue());
-                for (Entity entity : deployment.getShardCluster().getMembers()) {
-                    Assert.assertEquals(((MongoDBReplicaSet) entity).getCurrentSize(), REPLICASET_SIZE);
-                }
-            }
-        });
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java
deleted file mode 100644
index 9348431..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/sharding/MongoDBShardedDeploymentIntegrationTest.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb.sharding;
-
-import org.testng.Assert;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.entity.Entity;
-import brooklyn.entity.nosql.mongodb.AbstractMongoDBServer;
-import brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
-import brooklyn.entity.nosql.mongodb.MongoDBTestHelper;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.mongodb.DBObject;
-
-public class MongoDBShardedDeploymentIntegrationTest extends BrooklynAppLiveTestSupport {
-    
-    private static final Integer ROUTER_CLUSTER_SIZE = 2;
-    private static final Integer REPLICASET_SIZE = 2;
-    private static final Integer SHARD_CLUSTER_SIZE = 3;
-    
-    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        super.setUp();
-        localhostProvisioningLocation = app.newLocalhostProvisioningLocation();
-    }
-
-    private MongoDBShardedDeployment makeAndStartDeployment() {
-        final MongoDBShardedDeployment deployment = app.createAndManageChild(EntitySpec.create(MongoDBShardedDeployment.class)
-                .configure(MongoDBShardedDeployment.INITIAL_ROUTER_CLUSTER_SIZE, ROUTER_CLUSTER_SIZE)
-                .configure(MongoDBShardedDeployment.SHARD_REPLICASET_SIZE, REPLICASET_SIZE)
-                .configure(MongoDBShardedDeployment.INITIAL_SHARD_CLUSTER_SIZE, SHARD_CLUSTER_SIZE)
-                .configure(MongoDBShardedDeployment.MONGODB_REPLICA_SET_SPEC, EntitySpec.create(MongoDBReplicaSet.class)
-                        .configure(MongoDBServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb.conf")
-                        .configure(MongoDBReplicaSet.MEMBER_SPEC, EntitySpec.create(MongoDBServer.class)))
-                .configure(MongoDBShardedDeployment.MONGODB_ROUTER_SPEC, EntitySpec.create(MongoDBRouter.class)
-                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-router.conf"))
-                .configure(MongoDBShardedDeployment.MONGODB_CONFIG_SERVER_SPEC, EntitySpec.create(MongoDBConfigServer.class)
-                        .configure(MongoDBConfigServer.MONGODB_CONF_TEMPLATE_URL, "classpath:///test-mongodb-configserver.conf")));
-        app.start(ImmutableList.of(localhostProvisioningLocation));
-        EntityTestUtils.assertAttributeEqualsEventually(deployment, Startable.SERVICE_UP, true);
-        return deployment;
-    }
-    
-    @Test(groups = "Integration")
-    public void testCanStartAndStopDeployment() {
-        MongoDBShardedDeployment deployment = makeAndStartDeployment();
-        deployment.stop();
-        EntityTestUtils.assertAttributeEqualsEventually(deployment, Startable.SERVICE_UP, false);
-    }
-    
-    @Test(groups = "Integration")
-    public void testDeployedStructure() {
-        MongoDBShardedDeployment deployment = makeAndStartDeployment();
-        MongoDBConfigServerCluster configServers = deployment.getConfigCluster();
-        MongoDBRouterCluster routers = deployment.getRouterCluster();
-        MongoDBShardCluster shards = deployment.getShardCluster();
-        Assert.assertNotNull(configServers);
-        Assert.assertNotNull(routers);
-        Assert.assertNotNull(shards);
-        Assert.assertEquals(configServers.getCurrentSize(), MongoDBShardedDeployment.CONFIG_CLUSTER_SIZE.getDefaultValue());
-        Assert.assertEquals(routers.getCurrentSize(), ROUTER_CLUSTER_SIZE);
-        Assert.assertEquals(shards.getCurrentSize(), SHARD_CLUSTER_SIZE);
-        for (Entity entity : deployment.getShardCluster().getMembers()) {
-            Assert.assertEquals(((MongoDBReplicaSet)entity).getCurrentSize(), REPLICASET_SIZE);
-        }
-        for (Entity entity : configServers.getMembers()) {
-            checkEntityTypeAndServiceUp(entity, MongoDBConfigServer.class);
-        }
-        for (Entity entity : routers.getMembers()) {
-            checkEntityTypeAndServiceUp(entity, MongoDBRouter.class);
-        }
-        for (Entity entity : shards.getMembers()) {
-            checkEntityTypeAndServiceUp(entity, MongoDBReplicaSet.class);
-        }
-    }
-    
-    @Test(groups = "Integration")
-    private void testReadAndWriteDifferentRouters() {
-        MongoDBShardedDeployment deployment = makeAndStartDeployment();
-        EntityTestUtils.assertAttributeEqualsEventually(deployment, Startable.SERVICE_UP, true);
-        MongoDBRouter router1 = (MongoDBRouter) Iterables.get(deployment.getRouterCluster().getMembers(), 0);
-        MongoDBRouter router2 = (MongoDBRouter) Iterables.get(deployment.getRouterCluster().getMembers(), 1);
-        EntityTestUtils.assertAttributeEqualsEventually(router1, Startable.SERVICE_UP, true);
-        EntityTestUtils.assertAttributeEqualsEventually(router2, Startable.SERVICE_UP, true);
-        
-        String documentId = MongoDBTestHelper.insert(router1, "meaning-of-life", 42);
-        DBObject docOut = MongoDBTestHelper.getById(router2, documentId);
-        Assert.assertEquals(docOut.get("meaning-of-life"), 42);
-        
-        for (Entity entity : Iterables.filter(app.getManagementContext().getEntityManager().getEntitiesInApplication(app), AbstractMongoDBServer.class)) {
-            EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
-        }
-    }
-    
-    private void checkEntityTypeAndServiceUp(Entity entity, Class<? extends Entity> expectedClass) {
-        Assert.assertNotNull(entity);
-        Assert.assertTrue(expectedClass.isAssignableFrom(entity.getClass()), "expected: " + expectedClass 
-                + " on interfaces, found: " + entity.getClass().getInterfaces());
-        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/redis/JedisSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/JedisSupport.java b/software/nosql/src/test/java/brooklyn/entity/nosql/redis/JedisSupport.java
deleted file mode 100644
index fba0f59..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/JedisSupport.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-import redis.clients.jedis.Jedis;
-
-import com.google.common.base.Strings;
-
-/**
- * {@link RedisStore} testing using Jedis API.
- */
-public class JedisSupport {
-    private static final String TEST_DATA = Strings.repeat("0123456789", 16);
-
-    private RedisStore redis;
-
-    public JedisSupport(RedisStore redis) {
-        this.redis = redis;
-    }
-
-    /**
-     * Exercise the {@link RedisStore} using the Jedis API.
-     */
-    public void redisTest() throws Exception {
-        writeData("brooklyn", TEST_DATA);
-        String result = readData("brooklyn");
-        assertEquals(result, TEST_DATA);
-    }
-    
-    public void writeData(String key, String val) throws Exception {
-        Jedis client = getRedisClient(redis);
-        try {
-            client.set(key, val);
-        } finally {
-            client.disconnect();
-        }
-    }
-
-    public String readData(String key) throws Exception {
-        Jedis client = getRedisClient(redis);
-        try {
-            return client.get(key);
-        } finally {
-            client.disconnect();
-        }
-    }
-
-    private Jedis getRedisClient(RedisStore redis) {
-        int port = redis.getAttribute(RedisStore.REDIS_PORT);
-        String host = redis.getAttribute(RedisStore.HOSTNAME);
-        Jedis client = new Jedis(host, port);
-        client.connect();
-        assertTrue(client.isConnected());
-        return client;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java
deleted file mode 100644
index 7795bac..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisClusterIntegrationTest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import static org.testng.Assert.assertEquals;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.Asserts;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-
-import com.google.common.collect.ImmutableList;
-
-public class RedisClusterIntegrationTest {
-
-    private TestApplication app;
-    private Location loc;
-    private RedisCluster cluster;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setup() {
-        app = ApplicationBuilder.newManagedApp(TestApplication.class);
-        loc = new LocalhostMachineProvisioningLocation();
-    }
-
-    @AfterMethod(alwaysRun=true)
-    public void shutdown() {
-        if (app != null) Entities.destroyAll(app.getManagementContext());
-    }
-
-    @Test(groups = { "Integration" })
-    public void testRedisClusterReplicates() throws Exception {
-        final String key = "mykey";
-        final String val = "1234567890";
-        
-        cluster = app.createAndManageChild(EntitySpec.create(RedisCluster.class)
-                .configure(DynamicCluster.INITIAL_SIZE, 3));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, Startable.SERVICE_UP, true);
-
-        RedisStore master = cluster.getMaster();
-        List<RedisSlave> slaves = ImmutableList.<RedisSlave>copyOf((Collection)cluster.getSlaves().getMembers());
-        
-        assertEquals(slaves.size(), 3);
-        
-        JedisSupport viaMaster = new JedisSupport(master);
-        viaMaster.writeData(key, val);
-        assertEquals(viaMaster.readData(key), val);
-
-        for (RedisSlave slave : slaves) {
-            final JedisSupport viaSlave = new JedisSupport(slave);
-            Asserts.succeedsEventually(new Callable<Void>() {
-                @Override public Void call() throws Exception {
-                    assertEquals(viaSlave.readData(key), val);
-                    return null;
-                }});
-        }
-
-        // Check that stopping slave will not stop anything else
-        // (it used to stop master because wasn't supplying port!)
-        slaves.get(0).stop();
-        EntityTestUtils.assertAttributeEqualsEventually(slaves.get(0), Startable.SERVICE_UP, false);
-        
-        assertEquals(master.getAttribute(Startable.SERVICE_UP), Boolean.TRUE);
-        for (RedisSlave slave : slaves.subList(1, slaves.size())) {
-            assertEquals(slave.getAttribute(Startable.SERVICE_UP), Boolean.TRUE);
-        }
-        
-        // Check that stopping cluster will stop everything
-        cluster.stop();
-
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, Startable.SERVICE_UP, false);
-        assertEquals(master.getAttribute(Startable.SERVICE_UP), Boolean.FALSE);
-        for (RedisSlave slave : slaves) {
-            assertEquals(slave.getAttribute(Startable.SERVICE_UP), Boolean.FALSE);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java
deleted file mode 100644
index a7d1fac..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisEc2LiveTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import javax.annotation.Nullable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-
-public class RedisEc2LiveTest extends AbstractEc2LiveTest {
-
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(RedisEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        RedisStore redis = app.createAndManageChild(EntitySpec.create(RedisStore.class));
-        app.start(ImmutableList.of(loc));
-        EntityTestUtils.assertAttributeEqualsEventually(redis, RedisStore.SERVICE_UP, true);
-
-        JedisSupport support = new JedisSupport(redis);
-        support.redisTest();
-        // Confirm sensors are valid
-        EntityTestUtils.assertPredicateEventuallyTrue(redis, new Predicate<RedisStore>() {
-            @Override public boolean apply(@Nullable RedisStore input) {
-                return input != null &&
-                        input.getAttribute(RedisStore.UPTIME) > 0 &&
-                        input.getAttribute(RedisStore.TOTAL_COMMANDS_PROCESSED) >= 0 &&
-                        input.getAttribute(RedisStore.TOTAL_CONNECTIONS_RECEIVED) >= 0 &&
-                        input.getAttribute(RedisStore.EXPIRED_KEYS) >= 0 &&
-                        input.getAttribute(RedisStore.EVICTED_KEYS) >= 0 &&
-                        input.getAttribute(RedisStore.KEYSPACE_HITS) >= 0 &&
-                        input.getAttribute(RedisStore.KEYSPACE_MISSES) >= 0;
-            }
-        });
-
-    }
-
-    @Test(enabled=false)
-    public void testDummy() {} // Convince testng IDE integration that this really does have test methods  
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisIntegrationTest.java
deleted file mode 100644
index 9ecf445..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/redis/RedisIntegrationTest.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.redis;
-
-import javax.annotation.Nullable;
-
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.location.basic.PortRanges;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-/**
- * Test the operation of the {@link RedisStore} class.
- */
-public class RedisIntegrationTest {
-
-    private TestApplication app;
-    private Location loc;
-    private RedisStore redis;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setup() {
-        app = TestApplication.Factory.newManagedInstanceForTests();
-        loc = app.newLocalhostProvisioningLocation();
-    }
-
-    @AfterMethod(alwaysRun=true)
-    public void shutdown() {
-        if (app != null) Entities.destroyAll(app.getManagementContext());
-    }
-
-    /**
-     * Test that the server starts up and sets SERVICE_UP correctly.
-     */
-    @Test(groups = { "Integration" })
-    public void canStartupAndShutdown() throws Exception {
-        redis = app.createAndManageChild(EntitySpec.create(RedisStore.class));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, true);
-
-        redis.stop();
-
-        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, false);
-    }
-
-    /**
-     * Test that a client can connect to the service.
-     */
-    @Test(groups = { "Integration" })
-    public void testRedisConnection() throws Exception {
-        redis = app.createAndManageChild(EntitySpec.create(RedisStore.class));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, true);
-
-        JedisSupport support = new JedisSupport(redis);
-        support.redisTest();
-    }
-
-    /**
-     * Test we get sensors from an instance on a non-default port
-     */
-    @Test(groups = { "Integration" })
-    public void testNonStandardPort() throws Exception {
-        redis = app.createAndManageChild(EntitySpec.create(RedisStore.class)
-                .configure(RedisStore.REDIS_PORT, PortRanges.fromString("10000+")));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(redis, Startable.SERVICE_UP, true);
-        JedisSupport support = new JedisSupport(redis);
-        support.redisTest();
-
-        // Increase timeout because test was failing on jenkins sometimes. The log shows only one 
-        // call to `info server` (for obtaining uptime) which took 26 seconds; then 4 seconds later 
-        // this assert failed (with it checking every 500ms). The response did correctly contain
-        // `uptime_in_seconds:27`.
-        EntityTestUtils.assertPredicateEventuallyTrue(ImmutableMap.of("timeout", Duration.FIVE_MINUTES), redis, new Predicate<RedisStore>() {
-            @Override public boolean apply(@Nullable RedisStore input) {
-                return input != null &&
-                        input.getAttribute(RedisStore.UPTIME) > 0 &&
-                        input.getAttribute(RedisStore.TOTAL_COMMANDS_PROCESSED) >= 0 &&
-                        input.getAttribute(RedisStore.TOTAL_CONNECTIONS_RECEIVED) >= 0 &&
-                        input.getAttribute(RedisStore.EXPIRED_KEYS) >= 0 &&
-                        input.getAttribute(RedisStore.EVICTED_KEYS) >= 0 &&
-                        input.getAttribute(RedisStore.KEYSPACE_HITS) >= 0 &&
-                        input.getAttribute(RedisStore.KEYSPACE_MISSES) >= 0;
-            }
-        });
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java
deleted file mode 100644
index 3f9e7d9..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakClusterEc2LiveTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-public class RiakClusterEc2LiveTest extends AbstractEc2LiveTest {
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(RiakNodeEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        RiakCluster cluster = app.createAndManageChild(EntitySpec.create(RiakCluster.class)
-                .configure(RiakCluster.INITIAL_SIZE, 3)
-                .configure(RiakCluster.MEMBER_SPEC, EntitySpec.create(RiakNode.class)));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, RiakNode.SERVICE_UP, true);
-
-        RiakNode first = (RiakNode) Iterables.get(cluster.getMembers(), 0);
-        RiakNode second = (RiakNode) Iterables.get(cluster.getMembers(), 1);
-
-        assertNodesUpAndInCluster(first, second);
-        
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, Attributes.SERVICE_UP, true);
-    }
-    
-    private void assertNodesUpAndInCluster(final RiakNode... nodes) {
-        for (final RiakNode node : nodes) {
-            EntityTestUtils.assertAttributeEqualsEventually(node, RiakNode.SERVICE_UP, true);
-            EntityTestUtils.assertAttributeEqualsEventually(node, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true);
-        }
-    }
-
-    @Test(enabled = false)
-    public void testDummy() {
-    } // Convince TestNG IDE integration that this really does have test methods
-
-
-    @Override
-    public void test_Ubuntu_12_0() throws Exception {
-        //Override to add the custom securityGroup for opening Riak ports.
-        // Image: {id=us-east-1/ami-d0f89fb9, providerId=ami-d0f89fb9, name=ubuntu/images/ebs/ubuntu-precise-12.04-amd64-server-20130411.1, location={scope=REGION, id=us-east-1, description=us-east-1, parent=aws-ec2, iso3166Codes=[US-VA]}, os={family=ubuntu, arch=paravirtual, version=12.04, description=099720109477/ubuntu/images/ebs/ubuntu-precise-12.04-amd64-server-20130411.1, is64Bit=true}, description=099720109477/ubuntu/images/ebs/ubuntu-precise-12.04-amd64-server-20130411.1, version=20130411.1, status=AVAILABLE[available], loginUser=ubuntu, userMetadata={owner=099720109477, rootDeviceType=ebs, virtualizationType=paravirtual, hypervisor=xen}}
-        runTest(ImmutableMap.of("imageId", "us-east-1/ami-d0f89fb9", "loginUser", "ubuntu", "hardwareId", SMALL_HARDWARE_ID, "securityGroups", "RiakSecurityGroup"));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java
deleted file mode 100644
index b3a2005..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeEc2LiveTest.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import com.google.common.collect.ImmutableList;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-public class RiakNodeEc2LiveTest extends AbstractEc2LiveTest {
-
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(RiakNodeEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        RiakNode entity = app.createAndManageChild(EntitySpec.create(RiakNode.class));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, RiakNode.SERVICE_UP, true);
-
-    }
-
-    @Test(enabled = false)
-    public void testDummy() {
-    } // Convince TestNG IDE integration that this really does have test methods
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java
deleted file mode 100644
index a2cc46e..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeGoogleComputeLiveTest.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import org.testng.annotations.Test;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-import brooklyn.entity.AbstractGoogleComputeLiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-public class RiakNodeGoogleComputeLiveTest extends AbstractGoogleComputeLiveTest {
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        RiakCluster cluster = app.createAndManageChild(EntitySpec.create(RiakCluster.class)
-                .configure(RiakCluster.INITIAL_SIZE, 2)
-                .configure(RiakCluster.MEMBER_SPEC, EntitySpec.create(RiakNode.class)));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, RiakCluster.SERVICE_UP, true);
-
-        RiakNode first = (RiakNode) Iterables.get(cluster.getMembers(), 0);
-        RiakNode second = (RiakNode) Iterables.get(cluster.getMembers(), 1);
-
-        EntityTestUtils.assertAttributeEqualsEventually(first, RiakNode.SERVICE_UP, true);
-        EntityTestUtils.assertAttributeEqualsEventually(second, RiakNode.SERVICE_UP, true);
-
-        EntityTestUtils.assertAttributeEqualsEventually(first, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true);
-        EntityTestUtils.assertAttributeEqualsEventually(second, RiakNode.RIAK_NODE_HAS_JOINED_CLUSTER, true);
-
-    }
-
-    @Test(groups = {"Live"})
-    @Override
-    public void test_DefaultImage() throws Exception {
-        super.test_DefaultImage();
-    }
-
-    @Test(enabled = false)
-    public void testDummy() {
-    } // Convince testng IDE integration that this really does have test methods
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java
deleted file mode 100644
index 002739c..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeIntegrationTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import static org.testng.Assert.assertFalse;
-
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import com.google.common.collect.ImmutableList;
-
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-
-public class RiakNodeIntegrationTest {
-
-    private TestApplication app;
-    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        localhostProvisioningLocation = new LocalhostMachineProvisioningLocation();
-        app = TestApplication.Factory.newManagedInstanceForTests();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws Exception {
-        if (app != null) Entities.destroyAll(app.getManagementContext());
-    }
-
-
-    @Test(groups = "Integration")
-    public void testCanStartAndStop() throws Exception {
-        RiakNode entity = app.createAndManageChild(EntitySpec.create(RiakNode.class)
-                .configure(RiakNode.SUGGESTED_VERSION, "2.1.1"));
-        app.start(ImmutableList.of(localhostProvisioningLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
-        entity.stop();
-        assertFalse(entity.getAttribute(Startable.SERVICE_UP));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java
deleted file mode 100644
index 123ef7f..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/riak/RiakNodeSoftlayerLiveTest.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.riak;
-
-import org.testng.annotations.BeforeMethod;
-
-import com.google.common.collect.ImmutableList;
-
-import brooklyn.entity.AbstractSoftlayerLiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-public class RiakNodeSoftlayerLiveTest extends AbstractSoftlayerLiveTest {
-
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        super.setUp();
-    }
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        RiakNode entity = app.createAndManageChild(EntitySpec.create(RiakNode.class)
-                .configure(RiakNode.SUGGESTED_VERSION, "2.1.1"));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, RiakNode.SERVICE_UP, true);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java
deleted file mode 100644
index de77a32..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/AbstractSolrServerTest.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import org.testng.annotations.BeforeMethod;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.location.Location;
-
-/**
- * Solr test framework for integration and live tests.
- */
-public class AbstractSolrServerTest extends BrooklynAppLiveTestSupport {
-
-    protected Location testLocation;
-    protected SolrServer solr;
-
-    @BeforeMethod(alwaysRun = true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        testLocation = app.newLocalhostProvisioningLocation();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrJSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrJSupport.java b/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrJSupport.java
deleted file mode 100644
index d8bbd81..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrJSupport.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import java.util.Map;
-
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrInputDocument;
-
-import brooklyn.entity.basic.Attributes;
-
-/**
- * Solr testing using SolrJ API.
- */
-public class SolrJSupport {
-
-    private final HttpSolrServer server;
-    
-    public SolrJSupport(SolrServer node, String core) {
-        this(node.getAttribute(Attributes.HOSTNAME), node.getSolrPort(), core);
-    }
-    
-    public SolrJSupport(String hostname, int solrPort, String core) {
-        server = new HttpSolrServer(String.format("http://%s:%d/solr/%s", hostname, solrPort, core));
-        server.setMaxRetries(1);
-        server.setConnectionTimeout(5000);
-        server.setSoTimeout(5000);
-    }
-
-    public void commit() throws Exception {
-        server.commit();
-    }
-
-    public void addDocument(Map<String, Object> fields) throws Exception {
-        SolrInputDocument doc = new SolrInputDocument();
-        for (String field : fields.keySet()) {
-            doc.setField(field, fields.get(field));
-        }
-        server.add(doc, 100);
-    }
-
-    public Iterable<SolrDocument> getDocuments() throws Exception {
-        SolrQuery solrQuery = new SolrQuery();
-        solrQuery.setQuery("*:*");
-        
-        return server.query(solrQuery).getResults();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java
deleted file mode 100644
index 59dcc61..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/solr/SolrServerEc2LiveTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.solr;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-import org.apache.solr.common.SolrDocument;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-public class SolrServerEc2LiveTest extends AbstractEc2LiveTest {
-
-    private static final Logger log = LoggerFactory.getLogger(SolrServerEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        log.info("Testing Solr on {}", loc);
-
-        SolrServer solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
-                .configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
-
-        SolrJSupport client = new SolrJSupport(solr, "example");
-
-        Iterable<SolrDocument> results = client.getDocuments();
-        assertTrue(Iterables.isEmpty(results));
-
-        client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
-        client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
-        client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
-        client.commit();
-
-        results = client.getDocuments();
-        assertEquals(Iterables.size(results), 3);
-    }
-}


[17/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java
new file mode 100644
index 0000000..7036285
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+/**
+ * @deprecated since 0.7.0; use {@link CassandraDatacenter} which is equivalent but has
+ * a less ambiguous name; <em>Cluster</em> in Cassandra corresponds to what Brooklyn terms a <em>Fabric</em>.
+ */
+@Deprecated
+public class CassandraClusterImpl extends CassandraDatacenterImpl implements CassandraCluster {
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java
new file mode 100644
index 0000000..7ef646f
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.math.BigInteger;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.brooklyn.catalog.Catalog;
+import org.apache.brooklyn.entity.nosql.cassandra.TokenGenerators.PosNeg63TokenGenerator;
+
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.annotation.Effector;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.MethodEffector;
+import brooklyn.entity.database.DatastoreMixins;
+import brooklyn.entity.effector.Effectors;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.time.Duration;
+
+import com.google.common.base.Supplier;
+import com.google.common.collect.Multimap;
+import com.google.common.reflect.TypeToken;
+
+/**
+ * A group of {@link CassandraNode}s -- based on Brooklyn's {@link DynamicCluster} 
+ * (though it is a "Datacenter" in Cassandra terms, where Cassandra's "cluster" corresponds
+ * to a Brooklyn Fabric, cf {@link CassandraFabric}). 
+ * The Datacenter can be resized, manually or by policy if required.
+ * Tokens are selected intelligently.
+ * <p>
+ * Note that due to how Cassandra assumes ports are the same across a cluster,
+ * it is <em>NOT</em> possible to deploy a cluster of size larger than 1 to localhost.
+ * (Some exploratory work has been done to use different 127.0.0.x IP's for localhost,
+ * and there is evidence this could be made to work.)
+ */
+@Catalog(name="Apache Cassandra Datacenter Cluster", description="Cassandra is a highly scalable, eventually " +
+        "consistent, distributed, structured key-value store which provides a ColumnFamily-based data model " +
+        "richer than typical key/value systems", iconUrl="classpath:///cassandra-logo.jpeg")
+@ImplementedBy(CassandraDatacenterImpl.class)
+public interface CassandraDatacenter extends DynamicCluster, DatastoreMixins.HasDatastoreUrl, DatastoreMixins.CanExecuteScript {
+
+    // FIXME datacenter name -- also CASS_CLUSTER_NODES should be CASS_DC_NODES
+    @SetFromFlag("clusterName")
+    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = new BasicAttributeSensorAndConfigKey<String>(String.class, "cassandra.cluster.name", "Name of the Cassandra cluster", "BrooklynCluster");
+
+    @SetFromFlag("snitchName")
+    ConfigKey<String> ENDPOINT_SNITCH_NAME = ConfigKeys.newStringConfigKey("cassandra.cluster.snitchName", "Type of the Cassandra snitch", "SimpleSnitch");
+
+    @SetFromFlag("seedSupplier")
+    @SuppressWarnings("serial")
+    ConfigKey<Supplier<Set<Entity>>> SEED_SUPPLIER = ConfigKeys.newConfigKey(new TypeToken<Supplier<Set<Entity>>>() { }, "cassandra.cluster.seedSupplier", "For determining the seed nodes", null);
+
+    @SuppressWarnings("serial")
+    @SetFromFlag("tokenGeneratorClass")
+    ConfigKey<Class<? extends TokenGenerator>> TOKEN_GENERATOR_CLASS = ConfigKeys.newConfigKey(
+        new TypeToken<Class<? extends TokenGenerator>>() {}, "cassandra.cluster.tokenGenerator.class", "For determining the tokens of nodes", 
+        PosNeg63TokenGenerator.class);
+
+    @SetFromFlag("tokenShift")
+    ConfigKey<BigInteger> TOKEN_SHIFT = ConfigKeys.newConfigKey(BigInteger.class, "cassandra.cluster.tokenShift", 
+        "Delta applied to all tokens generated for this Cassandra datacenter, "
+        + "useful when configuring multiple datacenters which should be shifted; "
+        + "if not set, a random shift is applied. (Pass 0 to prevent any shift.)", null);
+
+    ConfigKey<Boolean> USE_VNODES = ConfigKeys.newBooleanConfigKey(
+            "cassandra.cluster.useVnodes",
+            "Determines whether to use vnodes; if doing so, tokens will not be explicitly assigned to nodes in the cluster",
+            false);
+
+    /**
+     * num_tokens will automatically be reset to 1 for each node if {@link #USE_VNODES} is false. 
+     */
+    ConfigKey<Integer> NUM_TOKENS_PER_NODE = ConfigKeys.newIntegerConfigKey("cassandra.numTokensPerNode",
+            "Number of tokens per node; if using vnodes, should set this to a value like 256; will be overridden to 1 if USE_VNODES==false",
+            256);
+    
+    /**
+     * Additional time after the nodes in the cluster are up when starting
+     * before announcing the cluster as up.
+     * <p>
+     * Useful to ensure nodes have synchronized.
+     * <p>
+     * On 1.2.2 this could be as much as 120s when using 2 seed nodes,
+     * or just a few seconds with 1 seed node. On 1.2.9 it seems a few
+     * seconds is sufficient even with 2 seed nodes
+     */
+    @SetFromFlag("delayBeforeAdvertisingCluster")
+    ConfigKey<Duration> DELAY_BEFORE_ADVERTISING_CLUSTER = ConfigKeys.newConfigKey(Duration.class, "cassandra.cluster.delayBeforeAdvertisingCluster", "Delay after cluster is started before checking and advertising its availability", Duration.TEN_SECONDS);
+
+    @SuppressWarnings("serial")
+    AttributeSensor<Multimap<String,Entity>> DATACENTER_USAGE = Sensors.newSensor(new TypeToken<Multimap<String,Entity>>() { }, "cassandra.cluster.datacenterUsages", "Current set of datacenters in use, with nodes in each");
+
+    @SuppressWarnings("serial")
+    AttributeSensor<Set<String>> DATACENTERS = Sensors.newSensor(new TypeToken<Set<String>>() { }, "cassandra.cluster.datacenters", "Current set of datacenters in use");
+
+    AttributeSensor<Boolean> HAS_PUBLISHED_SEEDS = Sensors.newBooleanSensor("cassandra.cluster.seeds.hasPublished", "Whether we have published any seeds");
+
+    @SuppressWarnings("serial")
+    AttributeSensor<Set<Entity>> CURRENT_SEEDS = Sensors.newSensor(new TypeToken<Set<Entity>>() { }, "cassandra.cluster.seeds.current", "Current set of seeds to use to bootstrap the cluster");
+
+    AttributeSensor<String> HOSTNAME = Sensors.newStringSensor("cassandra.cluster.hostname", "Hostname to connect to cluster with");
+
+    @SuppressWarnings("serial")
+    AttributeSensor<List<String>> CASSANDRA_CLUSTER_NODES = Sensors.newSensor(new TypeToken<List<String>>() {},
+        "cassandra.cluster.nodes", "List of host:port of all active nodes in the cluster (thrift port, and public hostname/IP)");
+
+    AttributeSensor<Integer> THRIFT_PORT = Sensors.newIntegerSensor("cassandra.cluster.thrift.port", "Cassandra Thrift RPC port to connect to cluster with");
+
+    AttributeSensor<Long> FIRST_NODE_STARTED_TIME_UTC = Sensors.newLongSensor("cassandra.cluster.first.node.started.utc", "Time (UTC) when the first node was started");
+    @SuppressWarnings("serial")
+    AttributeSensor<List<Entity>> QUEUED_START_NODES = Sensors.newSensor(new TypeToken<List<Entity>>() {}, "cassandra.cluster.start.nodes.queued",
+        "Nodes queued for starting (for sequential start)");
+    
+    AttributeSensor<Integer> SCHEMA_VERSION_COUNT = Sensors.newIntegerSensor("cassandra.cluster.schema.versions.count",
+            "Number of different schema versions in the cluster; should be 1 for a healthy cluster, 0 when off; " +
+            "2 and above indicats a Schema Disagreement Error (and keyspace access may fail)");
+
+    AttributeSensor<Long> READ_PENDING = Sensors.newLongSensor("cassandra.cluster.read.pending", "Current pending ReadStage tasks");
+    AttributeSensor<Integer> READ_ACTIVE = Sensors.newIntegerSensor("cassandra.cluster.read.active", "Current active ReadStage tasks");
+    AttributeSensor<Long> WRITE_PENDING = Sensors.newLongSensor("cassandra.cluster.write.pending", "Current pending MutationStage tasks");
+    AttributeSensor<Integer> WRITE_ACTIVE = Sensors.newIntegerSensor("cassandra.cluster.write.active", "Current active MutationStage tasks");
+
+    AttributeSensor<Long> THRIFT_PORT_LATENCY_PER_NODE = Sensors.newLongSensor("cassandra.cluster.thrift.latency.perNode", "Latency for thrift port connection  averaged over all nodes (ms)");
+    AttributeSensor<Double> READS_PER_SECOND_LAST_PER_NODE = Sensors.newDoubleSensor("cassandra.reads.perSec.last.perNode", "Reads/sec (last datapoint) averaged over all nodes");
+    AttributeSensor<Double> WRITES_PER_SECOND_LAST_PER_NODE = Sensors.newDoubleSensor("cassandra.write.perSec.last.perNode", "Writes/sec (last datapoint) averaged over all nodes");
+    AttributeSensor<Double> PROCESS_CPU_TIME_FRACTION_LAST_PER_NODE = Sensors.newDoubleSensor("cassandra.cluster.metrics.processCpuTime.fraction.perNode", "Fraction of CPU time used (percentage reported by JMX), averaged over all nodes");
+
+    AttributeSensor<Double> READS_PER_SECOND_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.reads.perSec.windowed.perNode", "Reads/sec (over time window) averaged over all nodes");
+    AttributeSensor<Double> WRITES_PER_SECOND_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.writes.perSec.windowed.perNode", "Writes/sec (over time window) averaged over all nodes");
+    AttributeSensor<Double> THRIFT_PORT_LATENCY_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.thrift.latency.windowed.perNode", "Latency for thrift port (ms, over time window) averaged over all nodes");
+    AttributeSensor<Double> PROCESS_CPU_TIME_FRACTION_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.cluster.metrics.processCpuTime.fraction.windowed", "Fraction of CPU time used (percentage, over time window), averaged over all nodes");
+
+    MethodEffector<Void> UPDATE = new MethodEffector<Void>(CassandraDatacenter.class, "update");
+
+    brooklyn.entity.Effector<String> EXECUTE_SCRIPT = Effectors.effector(DatastoreMixins.EXECUTE_SCRIPT)
+        .description("executes the given script contents using cassandra-cli")
+        .buildAbstract();
+
+    /**
+     * Sets the number of nodes used to seed the cluster.
+     * <p>
+     * Version 1.2.2 is buggy and requires a big delay for 2 nodes both seeds to reconcile,
+     * with 1.2.9 this seems fine, with just a few seconds' delay after starting.
+     *
+     * @see <a href="http://stackoverflow.com/questions/6770894/schemadisagreementexception/18639005" />
+     */
+    int DEFAULT_SEED_QUORUM = 2;
+
+    /**
+     * Can insert a delay after the first node comes up.
+     * <p>
+     * Reportedly not needed with 1.2.9, but we are still seeing some seed failures so re-introducing it.
+     * (This does not seem to help with the bug in 1.2.2.)
+     */
+    Duration DELAY_AFTER_FIRST = Duration.ONE_MINUTE;
+
+    /**
+     * If set (ie non-null), this waits the indicated time after a successful launch of one node
+     * before starting the next.  (If it is null, all nodes start simultaneously,
+     * possibly after the DELAY_AFTER_FIRST.)
+     * <p>
+     * When subsequent nodes start simultaneously, we occasionally see schema disagreement problems;
+     * if nodes start sequentially, we occasionally get "no sources for (tokenRange]" problems.
+     * Either way the node stops. Ideally this can be solved at the Cassandra level,
+     * but if not, we will have to introduce some restarts at the Cassandra nodes (which does seem
+     * to resolve the problems.)
+     */
+    Duration DELAY_BETWEEN_STARTS = null;
+    
+    /**
+     * Whether to wait for the first node to start up
+     * <p>
+     * not sure whether this is needed or not. Need to test in env where not all nodes are seed nodes,
+     * what happens if non-seed nodes start before the seed nodes?
+     */
+    boolean WAIT_FOR_FIRST = true;
+
+    @Effector(description="Updates the cluster members")
+    void update();
+
+    /**
+     * The name of the cluster.
+     */
+    String getClusterName();
+
+    Set<Entity> gatherPotentialSeeds();
+
+    Set<Entity> gatherPotentialRunningSeeds();
+
+    String executeScript(String commands);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java
new file mode 100644
index 0000000..baa9a17
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraDatacenterImpl.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.annotation.Nullable;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.enricher.Enrichers;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.DynamicGroup;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityPredicates;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.basic.ServiceStateLogic.ServiceNotUpLogic;
+import brooklyn.entity.effector.EffectorBody;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicClusterImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.location.Location;
+import brooklyn.location.basic.Machines;
+import brooklyn.policy.PolicySpec;
+import brooklyn.util.ResourceUtils;
+import brooklyn.util.collections.MutableList;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.config.ConfigBag;
+import brooklyn.util.text.Strings;
+import brooklyn.util.time.Time;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
+import com.google.common.base.Supplier;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.LinkedHashMultimap;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+
+/**
+ * Implementation of {@link CassandraDatacenter}.
+ * <p>
+ * Several subtleties to note:
+ * - a node may take some time after it is running and serving JMX to actually be contactable on its thrift port
+ *   (so we wait for thrift port to be contactable)
+ * - sometimes new nodes take a while to peer, and/or take a while to get a consistent schema
+ *   (each up to 1m; often very close to the 1m) 
+ */
+public class CassandraDatacenterImpl extends DynamicClusterImpl implements CassandraDatacenter {
+
+    /*
+     * TODO Seed management is hard!
+     *  - The ServiceRestarter is not doing customize(), so is not refreshing the seeds in cassandra.yaml.
+     *    If we have two nodes that were seeds for each other and they both restart at the same time, we'll have a split brain.
+     */
+    
+    private static final Logger log = LoggerFactory.getLogger(CassandraDatacenterImpl.class);
+
+    // Mutex for synchronizing during re-size operations
+    private final Object mutex = new Object[0];
+
+    private final Supplier<Set<Entity>> defaultSeedSupplier = new Supplier<Set<Entity>>() {
+        // Mutex for (re)calculating our seeds
+        // TODO is this very dangerous?! Calling out to SeedTracker, which calls out to alien getAttribute()/getConfig(). But I think that's ok.
+        // TODO might not need mutex? previous race was being caused by something else, other than concurrent calls!
+        private final Object seedMutex = new Object();
+        
+        @Override
+        public Set<Entity> get() {
+            synchronized (seedMutex) {
+                boolean hasPublishedSeeds = Boolean.TRUE.equals(getAttribute(HAS_PUBLISHED_SEEDS));
+                int quorumSize = getSeedQuorumSize();
+                Set<Entity> potentialSeeds = gatherPotentialSeeds();
+                Set<Entity> potentialRunningSeeds = gatherPotentialRunningSeeds();
+                boolean stillWaitingForQuorum = (!hasPublishedSeeds) && (potentialSeeds.size() < quorumSize);
+                
+                if (stillWaitingForQuorum) {
+                    if (log.isDebugEnabled()) log.debug("Not refreshed seeds of cluster {}, because still waiting for quorum (need {}; have {} potentials)", new Object[] {CassandraDatacenterImpl.class, quorumSize, potentialSeeds.size()});
+                    return ImmutableSet.of();
+                } else if (hasPublishedSeeds) {
+                    Set<Entity> currentSeeds = getAttribute(CURRENT_SEEDS);
+                    if (getAttribute(SERVICE_STATE_ACTUAL) == Lifecycle.STARTING) {
+                        if (Sets.intersection(currentSeeds, potentialSeeds).isEmpty()) {
+                            log.warn("Cluster {} lost all its seeds while starting! Subsequent failure likely, but changing seeds during startup would risk split-brain: seeds={}", new Object[] {CassandraDatacenterImpl.this, currentSeeds});
+                        }
+                        return currentSeeds;
+                    } else if (potentialRunningSeeds.isEmpty()) {
+                        // TODO Could be race where nodes have only just returned from start() and are about to 
+                        // transition to serviceUp; so don't just abandon all our seeds!
+                        log.warn("Cluster {} has no running seeds (yet?); leaving seeds as-is; but risks split-brain if these seeds come back up!", new Object[] {CassandraDatacenterImpl.this});
+                        return currentSeeds;
+                    } else {
+                        Set<Entity> result = trim(quorumSize, potentialRunningSeeds);
+                        log.debug("Cluster {} updating seeds: chosen={}; potentialRunning={}", new Object[] {CassandraDatacenterImpl.this, result, potentialRunningSeeds});
+                        return result;
+                    }
+                } else {
+                    Set<Entity> result = trim(quorumSize, potentialSeeds);
+                    if (log.isDebugEnabled()) log.debug("Cluster {} has reached seed quorum: seeds={}", new Object[] {CassandraDatacenterImpl.this, result});
+                    return result;
+                }
+            }
+        }
+        private Set<Entity> trim(int num, Set<Entity> contenders) {
+            // Prefer existing seeds wherever possible; otherwise accept any other contenders
+            Set<Entity> currentSeeds = (getAttribute(CURRENT_SEEDS) != null) ? getAttribute(CURRENT_SEEDS) : ImmutableSet.<Entity>of();
+            Set<Entity> result = Sets.newLinkedHashSet();
+            result.addAll(Sets.intersection(currentSeeds, contenders));
+            result.addAll(contenders);
+            return ImmutableSet.copyOf(Iterables.limit(result, num));
+        }
+    };
+    
+    protected SeedTracker seedTracker = new SeedTracker();
+    protected TokenGenerator tokenGenerator = null;
+
+    public CassandraDatacenterImpl() {
+    }
+
+    @Override
+    public void init() {
+        super.init();
+
+        /*
+         * subscribe to hostname, and keep an accurate set of current seeds in a sensor;
+         * then at nodes we set the initial seeds to be the current seeds when ready (non-empty)
+         */
+        subscribeToMembers(this, Attributes.HOSTNAME, new SensorEventListener<String>() {
+            @Override
+            public void onEvent(SensorEvent<String> event) {
+                seedTracker.onHostnameChanged(event.getSource(), event.getValue());
+            }
+        });
+        subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
+            @Override public void onEvent(SensorEvent<Entity> event) {
+                seedTracker.onMemberRemoved(event.getValue());
+            }
+        });
+        subscribeToMembers(this, Attributes.SERVICE_UP, new SensorEventListener<Boolean>() {
+            @Override
+            public void onEvent(SensorEvent<Boolean> event) {
+                seedTracker.onServiceUpChanged(event.getSource(), event.getValue());
+            }
+        });
+        subscribeToMembers(this, Attributes.SERVICE_STATE_ACTUAL, new SensorEventListener<Lifecycle>() {
+            @Override
+            public void onEvent(SensorEvent<Lifecycle> event) {
+                // trigger a recomputation also when lifecycle state changes, 
+                // because it might not have ruled a seed as inviable when service up went true 
+                // because service state was not yet running
+                seedTracker.onServiceUpChanged(event.getSource(), Lifecycle.RUNNING==event.getValue());
+            }
+        });
+        
+        // Track the datacenters for this cluster
+        subscribeToMembers(this, CassandraNode.DATACENTER_NAME, new SensorEventListener<String>() {
+            @Override
+            public void onEvent(SensorEvent<String> event) {
+                Entity member = event.getSource();
+                String dcName = event.getValue();
+                if (dcName != null) {
+                    Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
+                    Multimap<String, Entity> mutableDatacenterUsage = (datacenterUsage == null) ? LinkedHashMultimap.<String, Entity>create() : LinkedHashMultimap.create(datacenterUsage);
+                    Optional<String> oldDcName = getKeyOfVal(mutableDatacenterUsage, member);
+                    if (!(oldDcName.isPresent() && dcName.equals(oldDcName.get()))) {
+                        mutableDatacenterUsage.values().remove(member);
+                        mutableDatacenterUsage.put(dcName, member);
+                        setAttribute(DATACENTER_USAGE, mutableDatacenterUsage);
+                        setAttribute(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
+                    }
+                }
+            }
+            private <K,V> Optional<K> getKeyOfVal(Multimap<K,V> map, V val) {
+                for (Map.Entry<K,V> entry : map.entries()) {
+                    if (Objects.equal(val, entry.getValue())) {
+                        return Optional.of(entry.getKey());
+                    }
+                }
+                return Optional.absent();
+            }
+        });
+        subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
+            @Override public void onEvent(SensorEvent<Entity> event) {
+                Entity entity = event.getSource();
+                Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
+                if (datacenterUsage != null && datacenterUsage.containsValue(entity)) {
+                    Multimap<String, Entity> mutableDatacenterUsage = LinkedHashMultimap.create(datacenterUsage);
+                    mutableDatacenterUsage.values().remove(entity);
+                    setAttribute(DATACENTER_USAGE, mutableDatacenterUsage);
+                    setAttribute(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
+                }
+            }
+        });
+        
+        getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
+            @Override
+            public String call(ConfigBag parameters) {
+                return executeScript((String)parameters.getStringKey("commands"));
+            }
+        });
+    }
+    
+    protected Supplier<Set<Entity>> getSeedSupplier() {
+        Supplier<Set<Entity>> seedSupplier = getConfig(SEED_SUPPLIER);
+        return (seedSupplier == null) ? defaultSeedSupplier : seedSupplier;
+    }
+    
+    protected boolean useVnodes() {
+        return Boolean.TRUE.equals(getConfig(USE_VNODES));
+    }
+    
+    protected synchronized TokenGenerator getTokenGenerator() {
+        if (tokenGenerator!=null) 
+            return tokenGenerator;
+        
+        try {
+            tokenGenerator = getConfig(TOKEN_GENERATOR_CLASS).newInstance();
+            
+            BigInteger shift = getConfig(TOKEN_SHIFT);
+            if (shift==null) 
+                shift = BigDecimal.valueOf(Math.random()).multiply(
+                    new BigDecimal(tokenGenerator.range())).toBigInteger();
+            tokenGenerator.setOrigin(shift);
+            
+            return tokenGenerator;
+        } catch (Exception e) {
+            throw Throwables.propagate(e);
+        }        
+    }
+    
+    protected int getSeedQuorumSize() {
+        Integer quorumSize = getConfig(INITIAL_QUORUM_SIZE);
+        if (quorumSize!=null && quorumSize>0)
+            return quorumSize;
+        // default 2 is recommended, unless initial size is smaller
+        return Math.min(Math.max(getConfig(INITIAL_SIZE), 1), DEFAULT_SEED_QUORUM);
+    }
+
+    @Override
+    public Set<Entity> gatherPotentialSeeds() {
+        return seedTracker.gatherPotentialSeeds();
+    }
+
+    @Override
+    public Set<Entity> gatherPotentialRunningSeeds() {
+        return seedTracker.gatherPotentialRunningSeeds();
+    }
+
+    /**
+     * Sets the default {@link #MEMBER_SPEC} to describe the Cassandra nodes.
+     */
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        return getConfig(MEMBER_SPEC, EntitySpec.create(CassandraNode.class));
+    }
+
+    @Override
+    public String getClusterName() {
+        return getAttribute(CLUSTER_NAME);
+    }
+
+    @Override
+    public Collection<Entity> grow(int delta) {
+        if (useVnodes()) {
+            // nothing to do for token generator
+        } else {
+            if (getCurrentSize() == 0) {
+                getTokenGenerator().growingCluster(delta);
+            }
+        }
+        return super.grow(delta);
+    }
+    
+    @SuppressWarnings("deprecation")
+    @Override
+    protected Entity createNode(@Nullable Location loc, Map<?,?> flags) {
+        Map<Object, Object> allflags = MutableMap.copyOf(flags);
+        
+        if ((flags.containsKey(CassandraNode.TOKEN) || flags.containsKey("token")) || (flags.containsKey(CassandraNode.TOKENS) || flags.containsKey("tokens"))) {
+            // leave token config as-is
+        } else if (!useVnodes()) {
+            BigInteger token = getTokenGenerator().newToken();
+            allflags.put(CassandraNode.TOKEN, token);
+        }
+
+        if ((flags.containsKey(CassandraNode.NUM_TOKENS_PER_NODE) || flags.containsKey("numTokensPerNode"))) {
+            // leave num_tokens as-is
+        } else if (useVnodes()) {
+            Integer numTokensPerNode = getConfig(NUM_TOKENS_PER_NODE);
+            allflags.put(CassandraNode.NUM_TOKENS_PER_NODE, numTokensPerNode);
+        } else {
+            allflags.put(CassandraNode.NUM_TOKENS_PER_NODE, 1);
+        }
+        
+        return super.createNode(loc, allflags);
+    }
+
+    @Override
+    protected Entity replaceMember(Entity member, Location memberLoc, Map<?, ?> extraFlags) {
+        Set<BigInteger> oldTokens = ((CassandraNode) member).getTokens();
+        Set<BigInteger> newTokens = (oldTokens != null && oldTokens.size() > 0) ? getTokenGenerator().getTokensForReplacementNode(oldTokens) : null;
+        return super.replaceMember(member, memberLoc,  MutableMap.copyOf(extraFlags).add(CassandraNode.TOKENS, newTokens));
+    }
+
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        Machines.warnIfLocalhost(locations, "CassandraCluster does not support multiple nodes on localhost, " +
+                "due to assumptions Cassandra makes about the use of the same port numbers used across the cluster.");
+
+        // force this to be set - even if it is using the default
+        setAttribute(CLUSTER_NAME, getConfig(CLUSTER_NAME));
+        
+        super.start(locations);
+
+        connectSensors();
+
+        // TODO wait until all nodes which we think are up are consistent 
+        // i.e. all known nodes use the same schema, as reported by
+        // SshEffectorTasks.ssh("echo \"describe cluster;\" | /bin/cassandra-cli");
+        // once we've done that we can revert to using 2 seed nodes.
+        // see CassandraCluster.DEFAULT_SEED_QUORUM
+        // (also ensure the cluster is ready if we are about to run a creation script)
+        Time.sleep(getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER));
+
+        String scriptUrl = getConfig(CassandraNode.CREATION_SCRIPT_URL);
+        if (Strings.isNonEmpty(scriptUrl)) {
+            executeScript(new ResourceUtils(this).getResourceAsString(scriptUrl));
+        }
+
+        update();
+    }
+
+    protected void connectSensors() {
+        connectEnrichers();
+        
+        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
+                .displayName("Cassandra Cluster Tracker")
+                .configure("sensorsToTrack", ImmutableSet.of(Attributes.SERVICE_UP, Attributes.HOSTNAME, CassandraNode.THRIFT_PORT))
+                .configure("group", this));
+    }
+
+    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override
+        protected void onEntityChange(Entity member) {
+            if (log.isDebugEnabled()) log.debug("Node {} updated in Cluster {}", member, this);
+            ((CassandraDatacenterImpl)entity).update();
+        }
+        @Override
+        protected void onEntityAdded(Entity member) {
+            if (log.isDebugEnabled()) log.debug("Node {} added to Cluster {}", member, this);
+            ((CassandraDatacenterImpl)entity).update();
+        }
+        @Override
+        protected void onEntityRemoved(Entity member) {
+            if (log.isDebugEnabled()) log.debug("Node {} removed from Cluster {}", member, this);
+            ((CassandraDatacenterImpl)entity).update();
+        }
+    };
+
+    @SuppressWarnings("unchecked")
+    protected void connectEnrichers() {
+        List<? extends List<? extends AttributeSensor<? extends Number>>> summingEnricherSetup = ImmutableList.of(
+                ImmutableList.of(CassandraNode.READ_ACTIVE, READ_ACTIVE),
+                ImmutableList.of(CassandraNode.READ_PENDING, READ_PENDING),
+                ImmutableList.of(CassandraNode.WRITE_ACTIVE, WRITE_ACTIVE),
+                ImmutableList.of(CassandraNode.WRITE_PENDING, WRITE_PENDING)
+        );
+        
+        List<? extends List<? extends AttributeSensor<? extends Number>>> averagingEnricherSetup = ImmutableList.of(
+                ImmutableList.of(CassandraNode.READS_PER_SECOND_LAST, READS_PER_SECOND_LAST_PER_NODE),
+                ImmutableList.of(CassandraNode.WRITES_PER_SECOND_LAST, WRITES_PER_SECOND_LAST_PER_NODE),
+                ImmutableList.of(CassandraNode.WRITES_PER_SECOND_IN_WINDOW, WRITES_PER_SECOND_IN_WINDOW_PER_NODE),
+                ImmutableList.of(CassandraNode.READS_PER_SECOND_IN_WINDOW, READS_PER_SECOND_IN_WINDOW_PER_NODE),
+                ImmutableList.of(CassandraNode.THRIFT_PORT_LATENCY, THRIFT_PORT_LATENCY_PER_NODE),
+                ImmutableList.of(CassandraNode.THRIFT_PORT_LATENCY_IN_WINDOW, THRIFT_PORT_LATENCY_IN_WINDOW_PER_NODE),
+                ImmutableList.of(CassandraNode.PROCESS_CPU_TIME_FRACTION_LAST, PROCESS_CPU_TIME_FRACTION_LAST_PER_NODE),
+                ImmutableList.of(CassandraNode.PROCESS_CPU_TIME_FRACTION_IN_WINDOW, PROCESS_CPU_TIME_FRACTION_IN_WINDOW_PER_NODE)
+        );
+        
+        for (List<? extends AttributeSensor<? extends Number>> es : summingEnricherSetup) {
+            AttributeSensor<? extends Number> t = es.get(0);
+            AttributeSensor<? extends Number> total = es.get(1);
+            addEnricher(Enrichers.builder()
+                    .aggregating(t)
+                    .publishing(total)
+                    .fromMembers()
+                    .computingSum()
+                    .defaultValueForUnreportedSensors(null)
+                    .valueToReportIfNoSensors(null)
+                    .build());
+        }
+        
+        for (List<? extends AttributeSensor<? extends Number>> es : averagingEnricherSetup) {
+            AttributeSensor<Number> t = (AttributeSensor<Number>) es.get(0);
+            AttributeSensor<Double> average = (AttributeSensor<Double>) es.get(1);
+            addEnricher(Enrichers.builder()
+                    .aggregating(t)
+                    .publishing(average)
+                    .fromMembers()
+                    .computingAverage()
+                    .defaultValueForUnreportedSensors(null)
+                    .valueToReportIfNoSensors(null)
+                    .build());
+
+        }
+    }
+
+    @Override
+    public void stop() {
+        disconnectSensors();
+        
+        super.stop();
+    }
+    
+    protected void disconnectSensors() {
+    }
+
+    @Override
+    public void update() {
+        synchronized (mutex) {
+            // Update our seeds, as necessary
+            seedTracker.refreshSeeds();
+            
+            // Choose the first available cluster member to set host and port (and compute one-up)
+            Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE));
+
+            if (upNode.isPresent()) {
+                setAttribute(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME));
+                setAttribute(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT));
+
+                List<String> currentNodes = getAttribute(CASSANDRA_CLUSTER_NODES);
+                Set<String> oldNodes = (currentNodes != null) ? ImmutableSet.copyOf(currentNodes) : ImmutableSet.<String>of();
+                Set<String> newNodes = MutableSet.<String>of();
+                for (Entity member : getMembers()) {
+                    if (member instanceof CassandraNode && Boolean.TRUE.equals(member.getAttribute(SERVICE_UP))) {
+                        String hostname = member.getAttribute(Attributes.HOSTNAME);
+                        Integer thriftPort = member.getAttribute(CassandraNode.THRIFT_PORT);
+                        if (hostname != null && thriftPort != null) {
+                            newNodes.add(HostAndPort.fromParts(hostname, thriftPort).toString());
+                        }
+                    }
+                }
+                if (Sets.symmetricDifference(oldNodes, newNodes).size() > 0) {
+                    setAttribute(CASSANDRA_CLUSTER_NODES, MutableList.copyOf(newNodes));
+                }
+            } else {
+                setAttribute(HOSTNAME, null);
+                setAttribute(THRIFT_PORT, null);
+                setAttribute(CASSANDRA_CLUSTER_NODES, Collections.<String>emptyList());
+            }
+
+            ServiceNotUpLogic.updateNotUpIndicatorRequiringNonEmptyList(this, CASSANDRA_CLUSTER_NODES);
+        }
+    }
+    
+    /**
+     * For tracking our seeds. This gets fiddly! High-level logic is:
+     * <ul>
+     *   <li>If we have never reached quorum (i.e. have never published seeds), then continue to wait for quorum;
+     *       because entity-startup may be blocking for this. This is handled by the seedSupplier.
+     *   <li>If we previously reached quorum (i.e. have previousy published seeds), then always update;
+     *       we never want stale/dead entities listed in our seeds.
+     *   <li>If an existing seed looks unhealthy, then replace it.
+     *   <li>If a new potential seed becomes available (and we're in need of more), then add it.
+     * <ul>
+     * 
+     * Also note that {@link CassandraFabric} can take over, because it know about multiple sub-clusters!
+     * It will provide a different {@link CassandraDatacenter#SEED_SUPPLIER}. Each time we think that our seeds
+     * need to change, we call that. The fabric will call into {@link CassandraDatacenterImpl#gatherPotentialSeeds()}
+     * to find out what's available.
+     * 
+     * @author aled
+     */
+    protected class SeedTracker {
+        private final Map<Entity, Boolean> memberUpness = Maps.newLinkedHashMap();
+        
+        public void onMemberRemoved(Entity member) {
+            Set<Entity> seeds = getSeeds();
+            boolean maybeRemove = seeds.contains(member);
+            memberUpness.remove(member);
+            
+            if (maybeRemove) {
+                refreshSeeds();
+            } else {
+                if (log.isTraceEnabled()) log.trace("Seeds considered stable for cluster {} (node {} removed)", new Object[] {CassandraDatacenterImpl.this, member});
+                return;
+            }
+        }
+        public void onHostnameChanged(Entity member, String hostname) {
+            Set<Entity> seeds = getSeeds();
+            int quorum = getSeedQuorumSize();
+            boolean isViable = isViableSeed(member);
+            boolean maybeAdd = isViable && seeds.size() < quorum;
+            boolean maybeRemove = seeds.contains(member) && !isViable;
+            
+            if (maybeAdd || maybeRemove) {
+                refreshSeeds();
+            } else {
+                if (log.isTraceEnabled()) log.trace("Seeds considered stable for cluster {} (node {} changed hostname {})", new Object[] {CassandraDatacenterImpl.this, member, hostname});
+                return;
+            }
+        }
+        public void onServiceUpChanged(Entity member, Boolean serviceUp) {
+            Boolean oldVal = memberUpness.put(member, serviceUp);
+            if (Objects.equal(oldVal, serviceUp)) {
+                if (log.isTraceEnabled()) log.trace("Ignoring duplicate service-up in "+CassandraDatacenterImpl.this+" for "+member+", "+serviceUp);
+            }
+            Set<Entity> seeds = getSeeds();
+            int quorum = getSeedQuorumSize();
+            boolean isViable = isViableSeed(member);
+            boolean maybeAdd = isViable && seeds.size() < quorum;
+            boolean maybeRemove = seeds.contains(member) && !isViable;
+            
+            if (log.isDebugEnabled())
+                log.debug("Considering refresh of seeds for "+CassandraDatacenterImpl.this+" because "+member+" is now "+serviceUp+" ("+isViable+" / "+maybeAdd+" / "+maybeRemove+")");
+            if (maybeAdd || maybeRemove) {
+                refreshSeeds();
+            } else {
+                if (log.isTraceEnabled()) log.trace("Seeds considered stable for cluster {} (node {} changed serviceUp {})", new Object[] {CassandraDatacenterImpl.this, member, serviceUp});
+                return;
+            }
+        }
+        protected Set<Entity> getSeeds() {
+            Set<Entity> result = getAttribute(CURRENT_SEEDS);
+            return (result == null) ? ImmutableSet.<Entity>of() : result;
+        }
+        public void refreshSeeds() {
+            Set<Entity> oldseeds = getAttribute(CURRENT_SEEDS);
+            Set<Entity> newseeds = getSeedSupplier().get();
+            if (Objects.equal(oldseeds, newseeds)) {
+                if (log.isTraceEnabled()) log.debug("Seed refresh no-op for cluster {}: still={}", new Object[] {CassandraDatacenterImpl.this, oldseeds});
+            } else {
+                if (log.isDebugEnabled()) log.debug("Refreshing seeds of cluster {}: now={}; old={}", new Object[] {this, newseeds, oldseeds});
+                setAttribute(CURRENT_SEEDS, newseeds);
+                if (newseeds != null && newseeds.size() > 0) {
+                    setAttribute(HAS_PUBLISHED_SEEDS, true);
+                }
+            }
+        }
+        public Set<Entity> gatherPotentialSeeds() {
+            Set<Entity> result = Sets.newLinkedHashSet();
+            for (Entity member : getMembers()) {
+                if (isViableSeed(member)) {
+                    result.add(member);
+                }
+            }
+            if (log.isTraceEnabled()) log.trace("Viable seeds in Cluster {}: {}", new Object[] {result});
+            return result;
+        }
+        public Set<Entity> gatherPotentialRunningSeeds() {
+            Set<Entity> result = Sets.newLinkedHashSet();
+            for (Entity member : getMembers()) {
+                if (isRunningSeed(member)) {
+                    result.add(member);
+                }
+            }
+            if (log.isTraceEnabled()) log.trace("Viable running seeds in Cluster {}: {}", new Object[] {result});
+            return result;
+        }
+        public boolean isViableSeed(Entity member) {
+            // TODO would be good to reuse the better logic in ServiceFailureDetector
+            // (e.g. if that didn't just emit a notification but set a sensor as well?)
+            boolean managed = Entities.isManaged(member);
+            String hostname = member.getAttribute(Attributes.HOSTNAME);
+            boolean serviceUp = Boolean.TRUE.equals(member.getAttribute(Attributes.SERVICE_UP));
+            Lifecycle serviceState = member.getAttribute(Attributes.SERVICE_STATE_ACTUAL);
+            boolean hasFailed = !managed || (serviceState == Lifecycle.ON_FIRE) || (serviceState == Lifecycle.RUNNING && !serviceUp) || (serviceState == Lifecycle.STOPPED);
+            boolean result = (hostname != null && !hasFailed);
+            if (log.isTraceEnabled()) log.trace("Node {} in Cluster {}: viableSeed={}; hostname={}; serviceUp={}; serviceState={}; hasFailed={}", new Object[] {member, this, result, hostname, serviceUp, serviceState, hasFailed});
+            return result;
+        }
+        public boolean isRunningSeed(Entity member) {
+            boolean viableSeed = isViableSeed(member);
+            boolean serviceUp = Boolean.TRUE.equals(member.getAttribute(Attributes.SERVICE_UP));
+            Lifecycle serviceState = member.getAttribute(Attributes.SERVICE_STATE_ACTUAL);
+            boolean result = viableSeed && serviceUp && serviceState == Lifecycle.RUNNING;
+            if (log.isTraceEnabled()) log.trace("Node {} in Cluster {}: runningSeed={}; viableSeed={}; serviceUp={}; serviceState={}", new Object[] {member, this, result, viableSeed, serviceUp, serviceState});
+            return result;
+        }
+    }
+    
+    @Override
+    public String executeScript(String commands) {
+        Entity someChild = Iterables.getFirst(getMembers(), null);
+        if (someChild==null)
+            throw new IllegalStateException("No Cassandra nodes available");
+        // FIXME cross-etntity method-style calls such as below do not set up a queueing context (DynamicSequentialTask) 
+//        return ((CassandraNode)someChild).executeScript(commands);
+        return Entities.invokeEffector(this, someChild, CassandraNode.EXECUTE_SCRIPT, MutableMap.of("commands", commands)).getUnchecked();
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabric.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabric.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabric.java
new file mode 100644
index 0000000..23db92c
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabric.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.util.Set;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Entity;
+import brooklyn.entity.annotation.Effector;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.MethodEffector;
+import brooklyn.entity.group.DynamicFabric;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.location.Location;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Multimap;
+import com.google.common.reflect.TypeToken;
+
+/**
+ * A fabric of {@link CassandraNode}s, which forms a cluster spanning multiple locations.
+ * <p>
+ * Each {@link CassandraDatacenter} child instance is actually just a part of the whole cluster. It consists of the
+ * nodes in that single location (which normally corresponds to a "datacenter" in Cassandra terminology).
+ */
+@Catalog(name="Apache Cassandra Database Fabric", description="Cassandra is a highly scalable, eventually " +
+        "consistent, distributed, structured key-value store which provides a ColumnFamily-based data model " +
+        "richer than typical key/value systems", iconUrl="classpath:///cassandra-logo.jpeg")
+@ImplementedBy(CassandraFabricImpl.class)
+public interface CassandraFabric extends DynamicFabric {
+
+    ConfigKey<Integer> INITIAL_QUORUM_SIZE = ConfigKeys.newIntegerConfigKey(
+            "fabric.initial.quorumSize",
+            "Initial fabric quorum size - number of initial nodes that must have been successfully started " +
+            "to report success (if less than 0, then use a value based on INITIAL_SIZE of clusters)",
+            -1);
+    
+    @SuppressWarnings("serial")
+    ConfigKey<Function<Location, String>> DATA_CENTER_NAMER = ConfigKeys.newConfigKey(new TypeToken<Function<Location, String>>(){}, 
+            "cassandra.fabric.datacenter.namer",
+            "Function used to provide the cassandra.replication.datacenterName for a given location");
+
+    int DEFAULT_SEED_QUORUM = 5;
+    
+    AttributeSensor<Multimap<String,Entity>> DATACENTER_USAGE = CassandraDatacenter.DATACENTER_USAGE;
+
+    AttributeSensor<Set<String>> DATACENTERS = CassandraDatacenter.DATACENTERS;
+
+    AttributeSensor<Set<Entity>> CURRENT_SEEDS = CassandraDatacenter.CURRENT_SEEDS;
+
+    AttributeSensor<Boolean> HAS_PUBLISHED_SEEDS = CassandraDatacenter.HAS_PUBLISHED_SEEDS;
+
+    AttributeSensor<String> HOSTNAME = CassandraDatacenter.HOSTNAME;
+
+    AttributeSensor<Integer> THRIFT_PORT = CassandraDatacenter.THRIFT_PORT;
+
+    MethodEffector<Void> UPDATE = new MethodEffector<Void>(CassandraFabric.class, "update");
+
+    @Effector(description="Updates the cluster members")
+    void update();
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java
new file mode 100644
index 0000000..bce7cac
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraFabricImpl.java
@@ -0,0 +1,395 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.DynamicGroup;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.basic.EntityInternal;
+import brooklyn.entity.basic.EntityPredicates;
+import brooklyn.entity.basic.Lifecycle;
+import brooklyn.entity.group.AbstractMembershipTrackingPolicy;
+import brooklyn.entity.group.DynamicFabricImpl;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.event.SensorEvent;
+import brooklyn.event.SensorEventListener;
+import brooklyn.location.Location;
+import brooklyn.policy.PolicySpec;
+import brooklyn.util.collections.CollectionFunctionals;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.collections.MutableSet;
+import brooklyn.util.time.Time;
+
+import com.google.common.base.Function;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
+import com.google.common.base.Supplier;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.LinkedHashMultimap;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+
+/**
+ * Implementation of {@link CassandraDatacenter}.
+ * <p>
+ * Serveral subtleties to note:
+ * - a node may take some time after it is running and serving JMX to actually be contactable on its thrift port
+ *   (so we wait for thrift port to be contactable)
+ * - sometimes new nodes take a while to peer, and/or take a while to get a consistent schema
+ *   (each up to 1m; often very close to the 1m) 
+ */
+public class CassandraFabricImpl extends DynamicFabricImpl implements CassandraFabric {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraFabricImpl.class);
+
+    // Mutex for synchronizing during re-size operations
+    private final Object mutex = new Object[0];
+
+    private final Supplier<Set<Entity>> defaultSeedSupplier = new Supplier<Set<Entity>>() {
+        @Override public Set<Entity> get() {
+            // TODO Remove duplication from CassandraClusterImpl.defaultSeedSupplier
+            Set<Entity> seeds = getAttribute(CURRENT_SEEDS);
+            boolean hasPublishedSeeds = Boolean.TRUE.equals(getAttribute(HAS_PUBLISHED_SEEDS));
+            int quorumSize = getSeedQuorumSize();
+            
+            // update seeds if we're not quorate; note this may not work for dynamically adding new datacenters
+            // as we do not take a new seed from the new datacenter
+            if (seeds == null || seeds.size() < quorumSize || containsDownEntity(seeds)) {
+                Set<Entity> newseeds;
+                Map<CassandraDatacenter,Set<Entity>> potentialSeeds = MutableMap.of();
+                int potentialSeedCount = 0;
+                for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
+                    Set<Entity> dcPotentialSeeds = member.gatherPotentialSeeds();
+                    potentialSeeds.put(member, dcPotentialSeeds);
+                    potentialSeedCount += dcPotentialSeeds.size();
+                }
+                
+                if (hasPublishedSeeds) {
+                    Set<Entity> currentSeeds = getAttribute(CURRENT_SEEDS);
+                    Lifecycle serviceState = getAttribute(SERVICE_STATE_ACTUAL);
+                    if (serviceState == Lifecycle.STARTING) {
+                        if (Sets.intersection(currentSeeds, ImmutableSet.copyOf(Iterables.concat(potentialSeeds.values()))).isEmpty()) {
+                            log.warn("Fabric {} lost all its seeds while starting! Subsequent failure likely, but changing seeds during startup would risk split-brain: seeds={}", new Object[] {CassandraFabricImpl.this, currentSeeds});
+                        }
+                        newseeds = currentSeeds;
+                    } else if (serviceState == Lifecycle.STOPPING || serviceState == Lifecycle.STOPPED) {
+                        if (log.isTraceEnabled()) log.trace("Fabric {} ignoring any potential seed-changes, because {}: seeds={}", new Object[] {CassandraFabricImpl.this, serviceState, currentSeeds});
+                        newseeds = currentSeeds;
+                    } else if (potentialSeedCount == 0) {
+                        // TODO Could be race where nodes have only just returned from start() and are about to 
+                        // transition to serviceUp; so don't just abandon all our seeds!
+                        log.warn("Fabric {} has no seeds (after startup); leaving seeds as-is; but risks split-brain if these seeds come back up!", new Object[] {CassandraFabricImpl.this});
+                        newseeds = currentSeeds;
+                    } else if (!allNonEmpty(potentialSeeds.values())) {
+                        log.warn("Fabric {} has datacenter with no seeds (after startup); leaving seeds as-is; but risks split-brain if these seeds come back up!", new Object[] {CassandraFabricImpl.this});
+                        newseeds = currentSeeds;
+                    } else {
+                        Set<Entity> result = selectSeeds(quorumSize, potentialSeeds);
+                        if (log.isDebugEnabled() && !Objects.equal(seeds, result)) {
+                            log.debug("Fabric {} updating seeds: chosen={}; potential={}", new Object[] {CassandraFabricImpl.this, result, potentialSeeds});
+                        }
+                        newseeds = result;
+                    }
+                } else if (potentialSeedCount < quorumSize) {
+                    if (log.isDebugEnabled()) log.debug("Not setting seeds of fabric {} yet, because still waiting for quorum (need {}; have {} potentials from {} members)", new Object[] {CassandraFabricImpl.this, quorumSize, potentialSeedCount, getMembers()});
+                    newseeds = ImmutableSet.of();
+                } else if (!allNonEmpty(potentialSeeds.values())) {
+                    if (log.isDebugEnabled()) {
+                        Map<CassandraDatacenter, Integer> datacenterCounts = Maps.transformValues(potentialSeeds, CollectionFunctionals.sizeFunction());
+                        log.debug("Not setting seeds of fabric {} yet, because not all datacenters have seeds (sizes are {})", new Object[] {CassandraFabricImpl.this, datacenterCounts});
+                    }
+                    newseeds = ImmutableSet.of();
+                } else {
+                    // yay, we're quorate
+                    Set<Entity> result = selectSeeds(quorumSize, potentialSeeds);
+                    log.info("Fabric {} has reached seed quorum: seeds={}", new Object[] {CassandraFabricImpl.this, result});
+                    newseeds = result;
+                }
+                
+                if (!Objects.equal(seeds, newseeds)) {
+                    setAttribute(CURRENT_SEEDS, newseeds);
+                    
+                    if (newseeds != null && newseeds.size() > 0) {
+                        setAttribute(HAS_PUBLISHED_SEEDS, true);
+                        
+                        // Need to tell every datacenter that seeds are ready.
+                        // Otherwise a datacenter might get no more changes (e.g. to nodes' hostnames etc), 
+                        // and not call seedSupplier.get() again.
+                        for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
+                            member.update();
+                        }
+                    }
+                    return newseeds;
+                } else {
+                    return seeds;
+                }
+            } else {
+                if (log.isTraceEnabled()) log.trace("Not refresheed seeds of fabric {}, because have quorum {} (of {} members), and none are down: seeds={}", 
+                        new Object[] {CassandraFabricImpl.class, quorumSize, getMembers().size(), seeds});
+                return seeds;
+            }
+        }
+        private boolean allNonEmpty(Collection<? extends Collection<Entity>> contenders) {
+            for (Collection<Entity> contender: contenders)
+                if (contender.isEmpty()) return false;
+            return true;
+        }
+        private Set<Entity> selectSeeds(int num, Map<CassandraDatacenter,? extends Collection<Entity>> contenders) {
+            // Prefer existing seeds wherever possible;
+            // otherwise prefer a seed from each sub-cluster;
+            // otherwise accept any other contenders
+            Set<Entity> currentSeeds = (getAttribute(CURRENT_SEEDS) != null) ? getAttribute(CURRENT_SEEDS) : ImmutableSet.<Entity>of();
+            MutableSet<Entity> result = MutableSet.of();
+            result.addAll(Sets.intersection(currentSeeds, ImmutableSet.copyOf(contenders.values())));
+            for (CassandraDatacenter cluster : contenders.keySet()) {
+                Set<Entity> contendersInCluster = Sets.newLinkedHashSet(contenders.get(cluster));
+                if (contendersInCluster.size() > 0 && Sets.intersection(result, contendersInCluster).isEmpty()) {
+                    result.add(Iterables.getFirst(contendersInCluster, null));
+                }
+            }
+            result.addAll(Iterables.concat(contenders.values()));
+            return ImmutableSet.copyOf(Iterables.limit(result, num));
+        }
+        private boolean containsDownEntity(Set<Entity> seeds) {
+            for (Entity seed : seeds) {
+                if (!isViableSeed(seed)) {
+                    return true;
+                }
+            }
+            return false;
+        }
+        public boolean isViableSeed(Entity member) {
+            // TODO remove duplication from CassandraClusterImpl.SeedTracker.isViableSeed
+            boolean managed = Entities.isManaged(member);
+            String hostname = member.getAttribute(Attributes.HOSTNAME);
+            boolean serviceUp = Boolean.TRUE.equals(member.getAttribute(Attributes.SERVICE_UP));
+            Lifecycle serviceState = member.getAttribute(Attributes.SERVICE_STATE_ACTUAL);
+            boolean hasFailed = !managed || (serviceState == Lifecycle.ON_FIRE) || (serviceState == Lifecycle.RUNNING && !serviceUp) || (serviceState == Lifecycle.STOPPED);
+            boolean result = (hostname != null && !hasFailed);
+            if (log.isTraceEnabled()) log.trace("Node {} in Fabric {}: viableSeed={}; hostname={}; serviceUp={}; serviceState={}; hasFailed={}", new Object[] {member, CassandraFabricImpl.this, result, hostname, serviceUp, serviceState, hasFailed});
+            return result;
+        }
+    };
+
+    public CassandraFabricImpl() {
+    }
+
+    @Override
+    public void init() {
+        super.init();
+
+        if (!getConfigRaw(CassandraDatacenter.SEED_SUPPLIER, true).isPresentAndNonNull())
+            setConfig(CassandraDatacenter.SEED_SUPPLIER, getSeedSupplier());
+        
+        // track members
+        addPolicy(PolicySpec.create(MemberTrackingPolicy.class)
+                .displayName("Cassandra Fabric Tracker")
+                .configure("group", this));
+
+        // Track first node's startup
+        subscribeToMembers(this, CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, new SensorEventListener<Long>() {
+            @Override
+            public void onEvent(SensorEvent<Long> event) {
+                Long oldval = getAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC);
+                Long newval = event.getValue();
+                if (oldval == null && newval != null) {
+                    setAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, newval);
+                    for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
+                        ((EntityInternal)member).setAttribute(CassandraDatacenter.FIRST_NODE_STARTED_TIME_UTC, newval);
+                    }
+                }
+            }
+        });
+        
+        // Track the datacenters for this cluster
+        subscribeToMembers(this, CassandraDatacenter.DATACENTER_USAGE, new SensorEventListener<Multimap<String,Entity>>() {
+            @Override
+            public void onEvent(SensorEvent<Multimap<String,Entity>> event) {
+                Multimap<String, Entity> usage = calculateDatacenterUsage();
+                setAttribute(DATACENTER_USAGE, usage);
+                setAttribute(DATACENTERS, usage.keySet());
+            }
+        });
+        subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
+            @Override public void onEvent(SensorEvent<Entity> event) {
+                Multimap<String, Entity> usage = calculateDatacenterUsage();
+                setAttribute(DATACENTER_USAGE, usage);
+                setAttribute(DATACENTERS, usage.keySet());
+            }
+        });
+    }
+
+    public static class MemberTrackingPolicy extends AbstractMembershipTrackingPolicy {
+        @Override
+        protected void onEntityChange(Entity member) {
+            if (log.isDebugEnabled()) log.debug("Location {} updated in Fabric {}", member, entity);
+            ((CassandraFabricImpl)entity).update();
+        }
+        @Override
+        protected void onEntityAdded(Entity member) {
+            if (log.isDebugEnabled()) log.debug("Location {} added to Fabric {}", member, entity);
+            ((CassandraFabricImpl)entity).update();
+        }
+        @Override
+        protected void onEntityRemoved(Entity member) {
+            if (log.isDebugEnabled()) log.debug("Location {} removed from Fabric {}", member, entity);
+            ((CassandraFabricImpl)entity).update();
+        }
+    };
+
+    protected int getSeedQuorumSize() {
+        Integer quorumSize = getConfig(INITIAL_QUORUM_SIZE);
+        if (quorumSize!=null && quorumSize>0)
+            return quorumSize;
+
+        int initialSizeSum = 0;
+        for (CassandraDatacenter cluster : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
+            initialSizeSum += cluster.getConfig(CassandraDatacenter.INITIAL_SIZE);
+        }
+        if (initialSizeSum>5) initialSizeSum /= 2;
+        else if (initialSizeSum>3) initialSizeSum -= 2;
+        else if (initialSizeSum>2) initialSizeSum -= 1;
+        
+        return Math.min(Math.max(initialSizeSum, 1), CassandraFabric.DEFAULT_SEED_QUORUM);
+    }
+
+    /**
+     * Sets the default {@link #MEMBER_SPEC} to describe the Cassandra sub-clusters.
+     */
+    @Override
+    protected EntitySpec<?> getMemberSpec() {
+        // Need to set the seedSupplier, even if the caller has overridden the CassandraCluster config
+        // (unless they've explicitly overridden the seedSupplier as well!)
+        // TODO probably don't need to anymore, as it is set on the Fabric here -- just make sure there is a default!
+        EntitySpec<?> custom = getConfig(MEMBER_SPEC);
+        if (custom == null) {
+            return EntitySpec.create(CassandraDatacenter.class)
+                    .configure(CassandraDatacenter.SEED_SUPPLIER, getSeedSupplier());
+        } else if (custom.getConfig().containsKey(CassandraDatacenter.SEED_SUPPLIER) || custom.getFlags().containsKey("seedSupplier")) {
+            return custom;
+        } else {
+            return EntitySpec.create(custom)
+                    .configure(CassandraDatacenter.SEED_SUPPLIER, getSeedSupplier());
+        }
+    }
+    
+    @Override
+    protected Entity createCluster(Location location, Map flags) {
+        Function<Location, String> dataCenterNamer = getConfig(DATA_CENTER_NAMER);
+        if (dataCenterNamer != null) {
+            flags = ImmutableMap.builder()
+                .putAll(flags)
+                .put(CassandraNode.DATACENTER_NAME, dataCenterNamer.apply(location))
+                .build();
+        }
+        return super.createCluster(location, flags);
+    }
+
+    /**
+     * Prefers one node per location, and then others from anywhere.
+     * Then trims result down to the "quorumSize".
+     */
+    public Supplier<Set<Entity>> getSeedSupplier() {
+        return defaultSeedSupplier;
+    }
+
+    @Override
+    public void start(Collection<? extends Location> locations) {
+        super.start(locations);
+
+        connectSensors();
+
+        // TODO wait until all nodes which we think are up are consistent 
+        // i.e. all known nodes use the same schema, as reported by
+        // SshEffectorTasks.ssh("echo \"describe cluster;\" | /bin/cassandra-cli");
+        // once we've done that we can revert to using 2 seed nodes.
+        // see CassandraCluster.DEFAULT_SEED_QUORUM
+        Time.sleep(getConfig(CassandraDatacenter.DELAY_BEFORE_ADVERTISING_CLUSTER));
+
+        update();
+    }
+
+    protected void connectSensors() {
+        connectEnrichers();
+    }
+    
+    protected void connectEnrichers() {
+        // TODO Aggregate across sub-clusters
+
+        subscribeToMembers(this, SERVICE_UP, new SensorEventListener<Boolean>() {
+            @Override public void onEvent(SensorEvent<Boolean> event) {
+                setAttribute(SERVICE_UP, calculateServiceUp());
+            }
+        });
+    }
+
+    @Override
+    public void stop() {
+        disconnectSensors();
+        
+        super.stop();
+    }
+    
+    protected void disconnectSensors() {
+    }
+
+    protected boolean calculateServiceUp() {
+        Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE));
+        return upNode.isPresent();
+    }
+
+    protected Multimap<String, Entity> calculateDatacenterUsage() {
+        Multimap<String, Entity> result = LinkedHashMultimap.<String, Entity>create();
+        for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
+            Multimap<String, Entity> memberUsage = member.getAttribute(CassandraDatacenter.DATACENTER_USAGE);
+            if (memberUsage != null) result.putAll(memberUsage);
+        }
+        return result;
+    }
+
+    @Override
+    public void update() {
+        synchronized (mutex) {
+            for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) {
+                member.update();
+            }
+
+            calculateServiceUp();
+
+            // Choose the first available location to set host and port (and compute one-up)
+            Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE));
+
+            if (upNode.isPresent()) {
+                setAttribute(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME));
+                setAttribute(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT));
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNode.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNode.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNode.java
new file mode 100644
index 0000000..7d0a56d
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNode.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.math.BigInteger;
+import java.util.Set;
+
+import org.apache.brooklyn.catalog.Catalog;
+import brooklyn.config.ConfigKey;
+import brooklyn.entity.Effector;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.BrooklynConfigKeys;
+import brooklyn.entity.basic.ConfigKeys;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.database.DatastoreMixins;
+import brooklyn.entity.java.UsesJavaMXBeans;
+import brooklyn.entity.java.UsesJmx;
+import brooklyn.entity.proxying.ImplementedBy;
+import brooklyn.event.AttributeSensor;
+import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
+import brooklyn.event.basic.BasicConfigKey;
+import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
+import brooklyn.event.basic.Sensors;
+import brooklyn.location.basic.PortRanges;
+import brooklyn.util.flags.SetFromFlag;
+import brooklyn.util.time.Duration;
+
+import com.google.common.reflect.TypeToken;
+
+/**
+ * An {@link brooklyn.entity.Entity} that represents a Cassandra node in a {@link CassandraDatacenter}.
+ */
+@Catalog(name="Apache Cassandra Node", description="Cassandra is a highly scalable, eventually " +
+        "consistent, distributed, structured key-value store which provides a ColumnFamily-based data model " +
+        "richer than typical key/value systems", iconUrl="classpath:///cassandra-logo.jpeg")
+@ImplementedBy(CassandraNodeImpl.class)
+public interface CassandraNode extends DatastoreMixins.DatastoreCommon, SoftwareProcess, UsesJmx, UsesJavaMXBeans, DatastoreMixins.HasDatastoreUrl, DatastoreMixins.CanExecuteScript {
+
+    @SetFromFlag("version")
+    ConfigKey<String> SUGGESTED_VERSION = ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "1.2.16");
+    // when this changes remember to put a copy under releng2:/var/www/developer/brooklyn/repository/ !
+    // TODO experiment with supporting 2.0.x
+
+    @SetFromFlag("downloadUrl")
+    BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new BasicAttributeSensorAndConfigKey<String>(
+            SoftwareProcess.DOWNLOAD_URL, "${driver.mirrorUrl}/${version}/apache-cassandra-${version}-bin.tar.gz");
+
+    /** download mirror, if desired */
+    @SetFromFlag("mirrorUrl")
+    ConfigKey<String> MIRROR_URL = new BasicConfigKey<String>(String.class, "cassandra.install.mirror.url", "URL of mirror", 
+        "http://www.mirrorservice.org/sites/ftp.apache.org/cassandra"
+        // for older versions, but slower:
+//        "http://archive.apache.org/dist/cassandra/"
+        );
+
+    @SetFromFlag("tgzUrl")
+    ConfigKey<String> TGZ_URL = new BasicConfigKey<String>(String.class, "cassandra.install.tgzUrl", "URL of TGZ download file");
+
+    @SetFromFlag("clusterName")
+    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = CassandraDatacenter.CLUSTER_NAME;
+
+    @SetFromFlag("snitchName")
+    ConfigKey<String> ENDPOINT_SNITCH_NAME = CassandraDatacenter.ENDPOINT_SNITCH_NAME;
+
+    @SetFromFlag("gossipPort")
+    PortAttributeSensorAndConfigKey GOSSIP_PORT = new PortAttributeSensorAndConfigKey("cassandra.gossip.port", "Cassandra Gossip communications port", PortRanges.fromString("7000+"));
+
+    @SetFromFlag("sslGgossipPort")
+    PortAttributeSensorAndConfigKey SSL_GOSSIP_PORT = new PortAttributeSensorAndConfigKey("cassandra.ssl-gossip.port", "Cassandra Gossip SSL communications port", PortRanges.fromString("7001+"));
+
+    @SetFromFlag("thriftPort")
+    PortAttributeSensorAndConfigKey THRIFT_PORT = new PortAttributeSensorAndConfigKey("cassandra.thrift.port", "Cassandra Thrift RPC port", PortRanges.fromString("9160+"));
+
+    @SetFromFlag("nativePort")
+    PortAttributeSensorAndConfigKey NATIVE_TRANSPORT_PORT = new PortAttributeSensorAndConfigKey("cassandra.native.port", "Cassandra Native Transport port", PortRanges.fromString("9042+"));
+
+    @SetFromFlag("rmiRegistryPort")
+    // cassandra nodetool and others want 7199 - not required, but useful
+    PortAttributeSensorAndConfigKey RMI_REGISTRY_PORT = new PortAttributeSensorAndConfigKey(UsesJmx.RMI_REGISTRY_PORT, 
+        PortRanges.fromInteger(7199));
+
+    // some of the cassandra tooing (eg nodetool) use RMI, but we want JMXMP, so do both!
+    ConfigKey<JmxAgentModes> JMX_AGENT_MODE = ConfigKeys.newConfigKeyWithDefault(UsesJmx.JMX_AGENT_MODE, JmxAgentModes.JMXMP_AND_RMI);
+    
+    @SetFromFlag("customSnitchJarUrl")
+    ConfigKey<String> CUSTOM_SNITCH_JAR_URL = ConfigKeys.newStringConfigKey("cassandra.config.customSnitchUrl", 
+            "URL for a jar file to be uploaded (e.g. \"classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar\"); defaults to null which means nothing to upload", 
+            null);
+
+    @SetFromFlag("cassandraConfigTemplateUrl")
+    ConfigKey<String> CASSANDRA_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
+            "cassandra.config.templateUrl", "A URL (in freemarker format) for a cassandra.yaml config file (in freemarker format)", 
+            "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-${entity.majorMinorVersion}.yaml");
+
+    @SetFromFlag("cassandraConfigFileName")
+    ConfigKey<String> CASSANDRA_CONFIG_FILE_NAME = ConfigKeys.newStringConfigKey(
+            "cassandra.config.fileName", "Name for the copied config file", "cassandra.yaml");
+
+    @SetFromFlag("cassandraRackdcConfigTemplateUrl")
+    ConfigKey<String> CASSANDRA_RACKDC_CONFIG_TEMPLATE_URL = ConfigKeys.newStringConfigKey(
+            "cassandra.config.rackdc.templateUrl", "Template file (in freemarker format) for the cassandra-rackdc.properties config file", 
+            "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties");
+
+    @SetFromFlag("cassandraRackdcConfigFileName")
+    ConfigKey<String> CASSANDRA_RACKDC_CONFIG_FILE_NAME = ConfigKeys.newStringConfigKey(
+            "cassandra.config.rackdc.fileName", "Name for the copied rackdc config file (used for configuring replication, when a suitable snitch is used)", "cassandra-rackdc.properties");
+    
+    @SetFromFlag("datacenterName")
+    BasicAttributeSensorAndConfigKey<String> DATACENTER_NAME = new BasicAttributeSensorAndConfigKey<String>(
+            String.class, "cassandra.replication.datacenterName", "Datacenter name (used for configuring replication, when a suitable snitch is used)", 
+            null);
+
+    @SetFromFlag("rackName")
+    BasicAttributeSensorAndConfigKey<String> RACK_NAME = new BasicAttributeSensorAndConfigKey<String>(
+            String.class, "cassandra.replication.rackName", "Rack name (used for configuring replication, when a suitable snitch is used)", 
+            null);
+
+    ConfigKey<Integer> NUM_TOKENS_PER_NODE = ConfigKeys.newIntegerConfigKey("cassandra.numTokensPerNode",
+            "Number of tokens per node; if using vnodes, should set this to a value like 256",
+            1);
+    
+    /**
+     * @deprecated since 0.7; use {@link #TOKENS}
+     */
+    @SetFromFlag("token")
+    @Deprecated
+    BasicAttributeSensorAndConfigKey<BigInteger> TOKEN = new BasicAttributeSensorAndConfigKey<BigInteger>(
+            BigInteger.class, "cassandra.token", "Cassandra Token");
+
+    @SetFromFlag("tokens")
+    BasicAttributeSensorAndConfigKey<Set<BigInteger>> TOKENS = new BasicAttributeSensorAndConfigKey<Set<BigInteger>>(
+            new TypeToken<Set<BigInteger>>() {}, "cassandra.tokens", "Cassandra Tokens");
+
+    AttributeSensor<Integer> PEERS = Sensors.newIntegerSensor( "cassandra.peers", "Number of peers in cluster");
+
+    AttributeSensor<Integer> LIVE_NODE_COUNT = Sensors.newIntegerSensor( "cassandra.liveNodeCount", "Number of live nodes in cluster");
+
+    /* Metrics for read/write performance. */
+
+    AttributeSensor<Long> READ_PENDING = Sensors.newLongSensor("cassandra.read.pending", "Current pending ReadStage tasks");
+    AttributeSensor<Integer> READ_ACTIVE = Sensors.newIntegerSensor("cassandra.read.active", "Current active ReadStage tasks");
+    AttributeSensor<Long> READ_COMPLETED = Sensors.newLongSensor("cassandra.read.completed", "Total completed ReadStage tasks");
+    AttributeSensor<Long> WRITE_PENDING = Sensors.newLongSensor("cassandra.write.pending", "Current pending MutationStage tasks");
+    AttributeSensor<Integer> WRITE_ACTIVE = Sensors.newIntegerSensor("cassandra.write.active", "Current active MutationStage tasks");
+    AttributeSensor<Long> WRITE_COMPLETED = Sensors.newLongSensor("cassandra.write.completed", "Total completed MutationStage tasks");
+    
+    AttributeSensor<Boolean> SERVICE_UP_JMX = Sensors.newBooleanSensor("cassandra.service.jmx.up", "Whether JMX is up for this service");
+    AttributeSensor<Long> THRIFT_PORT_LATENCY = Sensors.newLongSensor("cassandra.thrift.latency", "Latency for thrift port connection (ms) or null if down");
+
+    AttributeSensor<Double> READS_PER_SECOND_LAST = Sensors.newDoubleSensor("cassandra.reads.perSec.last", "Reads/sec (last datapoint)");
+    AttributeSensor<Double> WRITES_PER_SECOND_LAST = Sensors.newDoubleSensor("cassandra.write.perSec.last", "Writes/sec (last datapoint)");
+
+    AttributeSensor<Double> THRIFT_PORT_LATENCY_IN_WINDOW = Sensors.newDoubleSensor("cassandra.thrift.latency.windowed", "Latency for thrift port (ms, averaged over time window)");
+    AttributeSensor<Double> READS_PER_SECOND_IN_WINDOW = Sensors.newDoubleSensor("cassandra.reads.perSec.windowed", "Reads/sec (over time window)");
+    AttributeSensor<Double> WRITES_PER_SECOND_IN_WINDOW = Sensors.newDoubleSensor("cassandra.writes.perSec.windowed", "Writes/sec (over time window)");
+
+    @SuppressWarnings({ "rawtypes", "unchecked" })
+    ConfigKey<Set<Entity>> INITIAL_SEEDS = (ConfigKey)ConfigKeys.newConfigKey(Set.class, "cassandra.cluster.seeds.initial", 
+            "List of cluster nodes to seed this node");
+
+    ConfigKey<Duration> START_TIMEOUT = ConfigKeys.newConfigKeyWithDefault(BrooklynConfigKeys.START_TIMEOUT, Duration.FIVE_MINUTES);
+    
+    ConfigKey<String> LISTEN_ADDRESS_SENSOR = ConfigKeys.newStringConfigKey("cassandra.listenAddressSensor", "sensor name from which to take the listen address; default (null) is a smart lookup");
+    ConfigKey<String> BROADCAST_ADDRESS_SENSOR = ConfigKeys.newStringConfigKey("cassandra.broadcastAddressSensor", "sensor name from which to take the broadcast address; default (null) is a smart lookup");
+    ConfigKey<String> RPC_ADDRESS_SENSOR = ConfigKeys.newStringConfigKey("cassandra.rpcAddressSensor", "sensor name from which to take the RPC address; default (null) is 0.0.0.0");
+
+    Effector<String> EXECUTE_SCRIPT = CassandraDatacenter.EXECUTE_SCRIPT;
+
+    /* Accessors used from template */
+    
+    String getMajorMinorVersion();
+    Integer getGossipPort();
+    Integer getSslGossipPort();
+    Integer getThriftPort();
+    Integer getNativeTransportPort();
+    String getClusterName();
+    String getListenAddress();
+    String getBroadcastAddress();
+    String getRpcAddress();
+    String getSeeds();
+    
+    String getPrivateIp();
+    String getPublicIp();
+    
+    /**
+     * In range 0 to (2^127)-1; or null if not yet set or known.
+     * Returns the first token if more than one token.
+     * @deprecated since 0.7; see {@link #getTokens()}
+     */
+    @Deprecated
+    BigInteger getToken();
+
+    int getNumTokensPerNode();
+
+    Set<BigInteger> getTokens();
+
+    /**
+     * string value of token (with no commas, which freemarker introduces!) or blank if none
+     * @deprecated since 0.7; use {@link #getTokensAsString()}
+     */
+    @Deprecated
+    String getTokenAsString();
+
+    /** string value of comma-separated tokens; or blank if none */
+    String getTokensAsString();
+
+    /* For configuration */
+    
+    void setToken(String token);
+    
+    /* Using Cassandra */
+    
+    String executeScript(String commands);
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java
new file mode 100644
index 0000000..eab6672
--- /dev/null
+++ b/software/nosql/src/main/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeDriver.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import brooklyn.entity.java.JavaSoftwareProcessDriver;
+import brooklyn.util.task.system.ProcessTaskWrapper;
+
+public interface CassandraNodeDriver extends JavaSoftwareProcessDriver {
+
+    Integer getGossipPort();
+
+    Integer getSslGossipPort();
+
+    Integer getThriftPort();
+
+    Integer getNativeTransportPort();
+
+    String getClusterName();
+
+    String getCassandraConfigTemplateUrl();
+
+    String getCassandraConfigFileName();
+
+    boolean isClustered();
+
+    ProcessTaskWrapper<Integer> executeScriptAsync(String commands);
+
+    /** returns the address that the given hostname resolves to at the target */
+    String getResolvedAddress(String hostname);
+
+}


[25/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
[BROOKLYN-162] Renaming of the NoSQL packages 

- From brooklyn.entity.nosql to org.apache.brooklyn.entity.nosql


Project: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/commit/d5cf5285
Tree: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/tree/d5cf5285
Diff: http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/diff/d5cf5285

Branch: refs/heads/master
Commit: d5cf5285286a6397d8f7abef32c00e2179259b38
Parents: 08662a7
Author: Yavor Yanchev <ya...@yanchev.com>
Authored: Thu Aug 6 17:05:21 2015 +0300
Committer: Yavor Yanchev <ya...@yanchev.com>
Committed: Thu Aug 6 18:25:29 2015 +0300

----------------------------------------------------------------------
 .../nosql-cassandra/cassandra.include.md        |   4 +-
 docs/guide/ops/catalog/index.md                 |   4 +-
 .../brooklyn/demo/CumulusRDFApplication.java    |  11 +-
 .../demo/HighAvailabilityCassandraCluster.java  |   5 +-
 .../brooklyn/demo/ResilientMongoDbApp.java      |   5 +-
 .../brooklyn/demo/RiakClusterExample.java       |   5 +-
 .../brooklyn/demo/SimpleCassandraCluster.java   |   3 +-
 .../brooklyn/demo/SimpleCouchDBCluster.java     |   3 +-
 .../brooklyn/demo/SimpleMongoDBReplicaSet.java  |   5 +-
 .../brooklyn/demo/SimpleRedisCluster.java       |   3 +-
 .../brooklyn/demo/WideAreaCassandraCluster.java |  11 +-
 .../brooklyn/demo/ha-cassandra-cluster.yaml     |   4 +-
 .../brooklyn/demo/simple-cassandra-cluster.yaml |   2 +-
 .../demo/wide-area-cassandra-cluster.yaml       |   8 +-
 .../brooklyn/demo/NodeJsTodoApplication.java    |   3 +-
 .../apache/brooklyn/demo/nodejs-riak-todo.yaml  |   2 +-
 .../org/apache/brooklyn/demo/nodejs-todo.yaml   |   2 +-
 software/nosql/pom.xml                          |  28 +-
 .../nosql/cassandra/CassandraCluster.java       |  30 -
 .../nosql/cassandra/CassandraClusterImpl.java   |  27 -
 .../nosql/cassandra/CassandraDatacenter.java    | 214 ------
 .../cassandra/CassandraDatacenterImpl.java      | 625 -----------------
 .../entity/nosql/cassandra/CassandraFabric.java |  80 ---
 .../nosql/cassandra/CassandraFabricImpl.java    | 395 -----------
 .../entity/nosql/cassandra/CassandraNode.java   | 231 -------
 .../nosql/cassandra/CassandraNodeDriver.java    |  47 --
 .../nosql/cassandra/CassandraNodeImpl.java      | 594 ----------------
 .../nosql/cassandra/CassandraNodeSshDriver.java | 420 -----------
 .../entity/nosql/cassandra/TokenGenerator.java  |  49 --
 .../entity/nosql/cassandra/TokenGenerators.java | 192 ------
 .../nosql/couchbase/CouchbaseCluster.java       | 134 ----
 .../nosql/couchbase/CouchbaseClusterImpl.java   | 597 ----------------
 .../entity/nosql/couchbase/CouchbaseNode.java   | 159 -----
 .../nosql/couchbase/CouchbaseNodeDriver.java    |  41 --
 .../nosql/couchbase/CouchbaseNodeImpl.java      | 269 --------
 .../nosql/couchbase/CouchbaseNodeSshDriver.java | 512 --------------
 .../nosql/couchbase/CouchbaseSyncGateway.java   |  75 --
 .../couchbase/CouchbaseSyncGatewayDriver.java   |  27 -
 .../couchbase/CouchbaseSyncGatewayImpl.java     |  82 ---
 .../CouchbaseSyncGatewaySshDriver.java          | 167 -----
 .../entity/nosql/couchdb/CouchDBCluster.java    |  48 --
 .../nosql/couchdb/CouchDBClusterImpl.java       |  51 --
 .../entity/nosql/couchdb/CouchDBNode.java       |  66 --
 .../entity/nosql/couchdb/CouchDBNodeDriver.java |  37 -
 .../entity/nosql/couchdb/CouchDBNodeImpl.java   | 106 ---
 .../nosql/couchdb/CouchDBNodeSshDriver.java     | 153 -----
 .../elasticsearch/ElasticSearchCluster.java     |  40 --
 .../elasticsearch/ElasticSearchClusterImpl.java |  45 --
 .../nosql/elasticsearch/ElasticSearchNode.java  |  88 ---
 .../elasticsearch/ElasticSearchNodeDriver.java  |  25 -
 .../elasticsearch/ElasticSearchNodeImpl.java    | 110 ---
 .../ElasticSearchNodeSshDriver.java             | 139 ----
 .../nosql/mongodb/AbstractMongoDBServer.java    |  61 --
 .../nosql/mongodb/AbstractMongoDBSshDriver.java | 175 -----
 .../entity/nosql/mongodb/MongoDBClient.java     |  65 --
 .../nosql/mongodb/MongoDBClientDriver.java      |  25 -
 .../entity/nosql/mongodb/MongoDBClientImpl.java |  43 --
 .../nosql/mongodb/MongoDBClientSshDriver.java   | 147 ----
 .../nosql/mongodb/MongoDBClientSupport.java     | 263 -------
 .../entity/nosql/mongodb/MongoDBDriver.java     |  24 -
 .../entity/nosql/mongodb/MongoDBReplicaSet.java |  84 ---
 .../nosql/mongodb/MongoDBReplicaSetImpl.java    | 404 -----------
 .../entity/nosql/mongodb/MongoDBServer.java     | 152 ----
 .../entity/nosql/mongodb/MongoDBServerImpl.java | 214 ------
 .../entity/nosql/mongodb/MongoDBSshDriver.java  |  57 --
 .../entity/nosql/mongodb/ReplicaSetConfig.java  | 278 --------
 .../nosql/mongodb/ReplicaSetMemberStatus.java   |  66 --
 .../sharding/CoLocatedMongoDBRouter.java        |  59 --
 .../sharding/CoLocatedMongoDBRouterImpl.java    |  70 --
 .../mongodb/sharding/MongoDBConfigServer.java   |  27 -
 .../sharding/MongoDBConfigServerCluster.java    |  35 -
 .../MongoDBConfigServerClusterImpl.java         |  57 --
 .../sharding/MongoDBConfigServerDriver.java     |  25 -
 .../sharding/MongoDBConfigServerImpl.java       |  36 -
 .../sharding/MongoDBConfigServerSshDriver.java  |  43 --
 .../nosql/mongodb/sharding/MongoDBRouter.java   |  51 --
 .../mongodb/sharding/MongoDBRouterCluster.java  |  54 --
 .../sharding/MongoDBRouterClusterImpl.java      | 101 ---
 .../mongodb/sharding/MongoDBRouterDriver.java   |  25 -
 .../mongodb/sharding/MongoDBRouterImpl.java     |  85 ---
 .../sharding/MongoDBRouterSshDriver.java        |  52 --
 .../mongodb/sharding/MongoDBShardCluster.java   |  27 -
 .../sharding/MongoDBShardClusterImpl.java       | 179 -----
 .../sharding/MongoDBShardedDeployment.java      | 102 ---
 .../sharding/MongoDBShardedDeploymentImpl.java  | 147 ----
 .../entity/nosql/redis/RedisCluster.java        |  41 --
 .../entity/nosql/redis/RedisClusterImpl.java    | 130 ----
 .../brooklyn/entity/nosql/redis/RedisShard.java |  26 -
 .../entity/nosql/redis/RedisShardImpl.java      |  26 -
 .../brooklyn/entity/nosql/redis/RedisSlave.java |  42 --
 .../entity/nosql/redis/RedisSlaveImpl.java      |  34 -
 .../brooklyn/entity/nosql/redis/RedisStore.java |  69 --
 .../entity/nosql/redis/RedisStoreDriver.java    |  27 -
 .../entity/nosql/redis/RedisStoreImpl.java      | 154 -----
 .../entity/nosql/redis/RedisStoreSshDriver.java | 131 ----
 .../brooklyn/entity/nosql/riak/RiakCluster.java |  65 --
 .../entity/nosql/riak/RiakClusterImpl.java      | 264 -------
 .../brooklyn/entity/nosql/riak/RiakNode.java    | 238 -------
 .../entity/nosql/riak/RiakNodeDriver.java       |  48 --
 .../entity/nosql/riak/RiakNodeImpl.java         | 306 ---------
 .../entity/nosql/riak/RiakNodeSshDriver.java    | 614 -----------------
 .../brooklyn/entity/nosql/solr/SolrServer.java  |  81 ---
 .../entity/nosql/solr/SolrServerDriver.java     |  30 -
 .../entity/nosql/solr/SolrServerImpl.java       |  75 --
 .../entity/nosql/solr/SolrServerSshDriver.java  | 158 -----
 .../nosql/cassandra/CassandraCluster.java       |  30 +
 .../nosql/cassandra/CassandraClusterImpl.java   |  27 +
 .../nosql/cassandra/CassandraDatacenter.java    | 215 ++++++
 .../cassandra/CassandraDatacenterImpl.java      | 625 +++++++++++++++++
 .../entity/nosql/cassandra/CassandraFabric.java |  80 +++
 .../nosql/cassandra/CassandraFabricImpl.java    | 395 +++++++++++
 .../entity/nosql/cassandra/CassandraNode.java   | 231 +++++++
 .../nosql/cassandra/CassandraNodeDriver.java    |  47 ++
 .../nosql/cassandra/CassandraNodeImpl.java      | 594 ++++++++++++++++
 .../nosql/cassandra/CassandraNodeSshDriver.java | 420 +++++++++++
 .../entity/nosql/cassandra/TokenGenerator.java  |  49 ++
 .../entity/nosql/cassandra/TokenGenerators.java | 192 ++++++
 .../nosql/couchbase/CouchbaseCluster.java       | 134 ++++
 .../nosql/couchbase/CouchbaseClusterImpl.java   | 597 ++++++++++++++++
 .../entity/nosql/couchbase/CouchbaseNode.java   | 159 +++++
 .../nosql/couchbase/CouchbaseNodeDriver.java    |  41 ++
 .../nosql/couchbase/CouchbaseNodeImpl.java      | 269 ++++++++
 .../nosql/couchbase/CouchbaseNodeSshDriver.java | 512 ++++++++++++++
 .../nosql/couchbase/CouchbaseSyncGateway.java   |  75 ++
 .../couchbase/CouchbaseSyncGatewayDriver.java   |  27 +
 .../couchbase/CouchbaseSyncGatewayImpl.java     |  82 +++
 .../CouchbaseSyncGatewaySshDriver.java          | 167 +++++
 .../entity/nosql/couchdb/CouchDBCluster.java    |  48 ++
 .../nosql/couchdb/CouchDBClusterImpl.java       |  51 ++
 .../entity/nosql/couchdb/CouchDBNode.java       |  66 ++
 .../entity/nosql/couchdb/CouchDBNodeDriver.java |  37 +
 .../entity/nosql/couchdb/CouchDBNodeImpl.java   | 106 +++
 .../nosql/couchdb/CouchDBNodeSshDriver.java     | 153 +++++
 .../elasticsearch/ElasticSearchCluster.java     |  40 ++
 .../elasticsearch/ElasticSearchClusterImpl.java |  45 ++
 .../nosql/elasticsearch/ElasticSearchNode.java  |  88 +++
 .../elasticsearch/ElasticSearchNodeDriver.java  |  25 +
 .../elasticsearch/ElasticSearchNodeImpl.java    | 110 +++
 .../ElasticSearchNodeSshDriver.java             | 139 ++++
 .../nosql/mongodb/AbstractMongoDBServer.java    |  61 ++
 .../nosql/mongodb/AbstractMongoDBSshDriver.java | 175 +++++
 .../entity/nosql/mongodb/MongoDBClient.java     |  65 ++
 .../nosql/mongodb/MongoDBClientDriver.java      |  25 +
 .../entity/nosql/mongodb/MongoDBClientImpl.java |  43 ++
 .../nosql/mongodb/MongoDBClientSshDriver.java   | 147 ++++
 .../nosql/mongodb/MongoDBClientSupport.java     | 263 +++++++
 .../entity/nosql/mongodb/MongoDBDriver.java     |  24 +
 .../entity/nosql/mongodb/MongoDBReplicaSet.java |  84 +++
 .../nosql/mongodb/MongoDBReplicaSetImpl.java    | 404 +++++++++++
 .../entity/nosql/mongodb/MongoDBServer.java     | 152 ++++
 .../entity/nosql/mongodb/MongoDBServerImpl.java | 214 ++++++
 .../entity/nosql/mongodb/MongoDBSshDriver.java  |  57 ++
 .../entity/nosql/mongodb/ReplicaSetConfig.java  | 278 ++++++++
 .../nosql/mongodb/ReplicaSetMemberStatus.java   |  66 ++
 .../sharding/CoLocatedMongoDBRouter.java        |  59 ++
 .../sharding/CoLocatedMongoDBRouterImpl.java    |  70 ++
 .../mongodb/sharding/MongoDBConfigServer.java   |  28 +
 .../sharding/MongoDBConfigServerCluster.java    |  35 +
 .../MongoDBConfigServerClusterImpl.java         |  57 ++
 .../sharding/MongoDBConfigServerDriver.java     |  25 +
 .../sharding/MongoDBConfigServerImpl.java       |  36 +
 .../sharding/MongoDBConfigServerSshDriver.java  |  44 ++
 .../nosql/mongodb/sharding/MongoDBRouter.java   |  52 ++
 .../mongodb/sharding/MongoDBRouterCluster.java  |  54 ++
 .../sharding/MongoDBRouterClusterImpl.java      | 101 +++
 .../mongodb/sharding/MongoDBRouterDriver.java   |  25 +
 .../mongodb/sharding/MongoDBRouterImpl.java     |  86 +++
 .../sharding/MongoDBRouterSshDriver.java        |  52 ++
 .../mongodb/sharding/MongoDBShardCluster.java   |  27 +
 .../sharding/MongoDBShardClusterImpl.java       | 179 +++++
 .../sharding/MongoDBShardedDeployment.java      | 103 +++
 .../sharding/MongoDBShardedDeploymentImpl.java  | 147 ++++
 .../entity/nosql/redis/RedisCluster.java        |  41 ++
 .../entity/nosql/redis/RedisClusterImpl.java    | 130 ++++
 .../brooklyn/entity/nosql/redis/RedisShard.java |  26 +
 .../entity/nosql/redis/RedisShardImpl.java      |  26 +
 .../brooklyn/entity/nosql/redis/RedisSlave.java |  42 ++
 .../entity/nosql/redis/RedisSlaveImpl.java      |  34 +
 .../brooklyn/entity/nosql/redis/RedisStore.java |  69 ++
 .../entity/nosql/redis/RedisStoreDriver.java    |  27 +
 .../entity/nosql/redis/RedisStoreImpl.java      | 154 +++++
 .../entity/nosql/redis/RedisStoreSshDriver.java | 131 ++++
 .../brooklyn/entity/nosql/riak/RiakCluster.java |  65 ++
 .../entity/nosql/riak/RiakClusterImpl.java      | 264 +++++++
 .../brooklyn/entity/nosql/riak/RiakNode.java    | 238 +++++++
 .../entity/nosql/riak/RiakNodeDriver.java       |  48 ++
 .../entity/nosql/riak/RiakNodeImpl.java         | 306 +++++++++
 .../entity/nosql/riak/RiakNodeSshDriver.java    | 614 +++++++++++++++++
 .../brooklyn/entity/nosql/solr/SolrServer.java  |  81 +++
 .../entity/nosql/solr/SolrServerDriver.java     |  30 +
 .../entity/nosql/solr/SolrServerImpl.java       |  75 ++
 .../entity/nosql/solr/SolrServerSshDriver.java  | 158 +++++
 .../entity/nosql/cassandra/cassandra-1.2.yaml   | 644 -----------------
 .../entity/nosql/cassandra/cassandra-2.0.yaml   | 688 -------------------
 .../cassandra/cassandra-multicloud-snitch.jar   | Bin 4729 -> 0 bytes
 .../cassandra/cassandra-multicloud-snitch.txt   |  33 -
 .../nosql/cassandra/cassandra-rackdc.properties |   6 -
 .../entity/nosql/couchbase/pillowfight.yaml     |  77 ---
 .../brooklyn/entity/nosql/couchdb/couch.ini     |  17 -
 .../brooklyn/entity/nosql/couchdb/couch.uri     |   2 -
 .../entity/nosql/mongodb/default-mongod.conf    |   7 -
 .../brooklyn/entity/nosql/mongodb/default.conf  |   2 -
 .../brooklyn/entity/nosql/redis/redis.conf      |  13 -
 .../brooklyn/entity/nosql/redis/slave.conf      |  16 -
 .../brooklyn/entity/nosql/riak/app.config       | 353 ----------
 .../nosql/riak/riak-cluster-with-solr.yaml      |  35 -
 .../brooklyn/entity/nosql/riak/riak-mac.conf    | 494 -------------
 .../nosql/riak/riak-with-webapp-cluster.yaml    |  42 --
 .../entity/nosql/riak/riak-with-webapp.yaml     |  36 -
 .../brooklyn/entity/nosql/riak/riak.conf        | 494 -------------
 .../brooklyn/entity/nosql/riak/riak.md          |  67 --
 .../brooklyn/entity/nosql/riak/riak.png         | Bin 110651 -> 0 bytes
 .../brooklyn/entity/nosql/riak/vm.args          |  64 --
 .../brooklyn/entity/nosql/solr/solr.xml         |  19 -
 .../entity/nosql/cassandra/cassandra-1.2.yaml   | 644 +++++++++++++++++
 .../entity/nosql/cassandra/cassandra-2.0.yaml   | 688 +++++++++++++++++++
 .../cassandra/cassandra-multicloud-snitch.jar   | Bin 0 -> 4729 bytes
 .../cassandra/cassandra-multicloud-snitch.txt   |  33 +
 .../nosql/cassandra/cassandra-rackdc.properties |   6 +
 .../entity/nosql/couchbase/pillowfight.yaml     |  77 +++
 .../brooklyn/entity/nosql/couchdb/couch.ini     |  17 +
 .../brooklyn/entity/nosql/couchdb/couch.uri     |   2 +
 .../entity/nosql/mongodb/default-mongod.conf    |   7 +
 .../brooklyn/entity/nosql/mongodb/default.conf  |   2 +
 .../brooklyn/entity/nosql/redis/redis.conf      |  13 +
 .../brooklyn/entity/nosql/redis/slave.conf      |  16 +
 .../brooklyn/entity/nosql/riak/app.config       | 353 ++++++++++
 .../nosql/riak/riak-cluster-with-solr.yaml      |  35 +
 .../brooklyn/entity/nosql/riak/riak-mac.conf    | 494 +++++++++++++
 .../nosql/riak/riak-with-webapp-cluster.yaml    |  42 ++
 .../entity/nosql/riak/riak-with-webapp.yaml     |  36 +
 .../apache/brooklyn/entity/nosql/riak/riak.conf | 494 +++++++++++++
 .../apache/brooklyn/entity/nosql/riak/riak.md   |  67 ++
 .../apache/brooklyn/entity/nosql/riak/riak.png  | Bin 0 -> 110651 bytes
 .../apache/brooklyn/entity/nosql/riak/vm.args   |  64 ++
 .../apache/brooklyn/entity/nosql/solr/solr.xml  |  19 +
 .../cassandra/AbstractCassandraNodeTest.java    |  41 --
 .../entity/nosql/cassandra/AstyanaxSupport.java | 330 ---------
 .../CassandraDatacenterIntegrationTest.java     | 149 ----
 .../cassandra/CassandraDatacenterLiveTest.java  | 308 ---------
 ...assandraDatacenterRebindIntegrationTest.java |  97 ---
 .../cassandra/CassandraDatacenterTest.java      | 233 -------
 .../nosql/cassandra/CassandraFabricTest.java    | 184 -----
 .../cassandra/CassandraNodeEc2LiveTest.java     |  50 --
 .../cassandra/CassandraNodeIntegrationTest.java | 190 -----
 .../nosql/cassandra/CassandraNodeLiveTest.java  |  74 --
 .../cassandra/NonNegTokenGeneratorTest.java     | 117 ----
 .../cassandra/PosNegTokenGeneratorTest.java     |  58 --
 .../nosql/couchbase/CouchbaseOfflineTest.java   |  62 --
 .../CouchbaseSyncGatewayEc2LiveTest.java        | 137 ----
 .../nosql/couchdb/AbstractCouchDBNodeTest.java  |  59 --
 .../nosql/couchdb/CouchDBClusterLiveTest.java   |  90 ---
 .../nosql/couchdb/CouchDBNodeEc2LiveTest.java   |  49 --
 .../couchdb/CouchDBNodeIntegrationTest.java     |  66 --
 .../nosql/couchdb/CouchDBNodeLiveTest.java      |  74 --
 .../entity/nosql/couchdb/JcouchdbSupport.java   |  77 ---
 .../ElasticSearchClusterIntegrationTest.java    | 128 ----
 .../ElasticSearchNodeIntegrationTest.java       | 112 ---
 .../nosql/mongodb/MongoDBEc2LiveTest.java       |  54 --
 .../nosql/mongodb/MongoDBIntegrationTest.java   |  91 ---
 .../mongodb/MongoDBRebindIntegrationTest.java   |  60 --
 .../mongodb/MongoDBReplicaSetEc2LiveTest.java   |  96 ---
 .../MongoDBReplicaSetIntegrationTest.java       | 206 ------
 .../mongodb/MongoDBRestartIntegrationTest.java  |  42 --
 .../nosql/mongodb/MongoDBSoftLayerLiveTest.java |  56 --
 .../entity/nosql/mongodb/MongoDBTestHelper.java | 124 ----
 .../nosql/mongodb/ReplicaSetConfigTest.java     | 239 -------
 .../MongoDBConfigServerIntegrationTest.java     |  66 --
 .../MongoDBShardedDeploymentEc2LiveTest.java    |  83 ---
 ...MongoDBShardedDeploymentIntegrationTest.java | 129 ----
 .../entity/nosql/redis/JedisSupport.java        |  74 --
 .../redis/RedisClusterIntegrationTest.java      | 109 ---
 .../entity/nosql/redis/RedisEc2LiveTest.java    |  66 --
 .../nosql/redis/RedisIntegrationTest.java       | 119 ----
 .../nosql/riak/RiakClusterEc2LiveTest.java      |  74 --
 .../entity/nosql/riak/RiakNodeEc2LiveTest.java  |  51 --
 .../riak/RiakNodeGoogleComputeLiveTest.java     |  62 --
 .../nosql/riak/RiakNodeIntegrationTest.java     |  64 --
 .../nosql/riak/RiakNodeSoftlayerLiveTest.java   |  45 --
 .../nosql/solr/AbstractSolrServerTest.java      |  41 --
 .../entity/nosql/solr/SolrJSupport.java         |  66 --
 .../nosql/solr/SolrServerEc2LiveTest.java       |  66 --
 .../nosql/solr/SolrServerIntegrationTest.java   |  84 ---
 .../entity/nosql/solr/SolrServerLiveTest.java   |  89 ---
 .../cassandra/AbstractCassandraNodeTest.java    |  42 ++
 .../entity/nosql/cassandra/AstyanaxSupport.java | 331 +++++++++
 .../CassandraDatacenterIntegrationTest.java     | 151 ++++
 .../cassandra/CassandraDatacenterLiveTest.java  | 310 +++++++++
 ...assandraDatacenterRebindIntegrationTest.java |  99 +++
 .../cassandra/CassandraDatacenterTest.java      | 235 +++++++
 .../nosql/cassandra/CassandraFabricTest.java    | 186 +++++
 .../cassandra/CassandraNodeEc2LiveTest.java     |  51 ++
 .../cassandra/CassandraNodeIntegrationTest.java | 191 +++++
 .../nosql/cassandra/CassandraNodeLiveTest.java  |  75 ++
 .../cassandra/NonNegTokenGeneratorTest.java     | 116 ++++
 .../cassandra/PosNegTokenGeneratorTest.java     |  57 ++
 .../nosql/couchbase/CouchbaseOfflineTest.java   |  63 ++
 .../CouchbaseSyncGatewayEc2LiveTest.java        | 140 ++++
 .../nosql/couchdb/AbstractCouchDBNodeTest.java  |  60 ++
 .../nosql/couchdb/CouchDBClusterLiveTest.java   |  92 +++
 .../nosql/couchdb/CouchDBNodeEc2LiveTest.java   |  50 ++
 .../couchdb/CouchDBNodeIntegrationTest.java     |  67 ++
 .../nosql/couchdb/CouchDBNodeLiveTest.java      |  75 ++
 .../entity/nosql/couchdb/JcouchdbSupport.java   |  78 +++
 .../ElasticSearchClusterIntegrationTest.java    | 130 ++++
 .../ElasticSearchNodeIntegrationTest.java       | 113 +++
 .../nosql/mongodb/MongoDBEc2LiveTest.java       |  56 ++
 .../nosql/mongodb/MongoDBIntegrationTest.java   |  92 +++
 .../mongodb/MongoDBRebindIntegrationTest.java   |  61 ++
 .../mongodb/MongoDBReplicaSetEc2LiveTest.java   |  98 +++
 .../MongoDBReplicaSetIntegrationTest.java       | 208 ++++++
 .../mongodb/MongoDBRestartIntegrationTest.java  |  43 ++
 .../nosql/mongodb/MongoDBSoftLayerLiveTest.java |  57 ++
 .../entity/nosql/mongodb/MongoDBTestHelper.java | 126 ++++
 .../nosql/mongodb/ReplicaSetConfigTest.java     | 240 +++++++
 .../MongoDBConfigServerIntegrationTest.java     |  67 ++
 .../MongoDBShardedDeploymentEc2LiveTest.java    |  86 +++
 ...MongoDBShardedDeploymentIntegrationTest.java | 135 ++++
 .../entity/nosql/redis/JedisSupport.java        |  77 +++
 .../redis/RedisClusterIntegrationTest.java      | 112 +++
 .../entity/nosql/redis/RedisEc2LiveTest.java    |  67 ++
 .../nosql/redis/RedisIntegrationTest.java       | 120 ++++
 .../nosql/riak/RiakClusterEc2LiveTest.java      |  76 ++
 .../entity/nosql/riak/RiakNodeEc2LiveTest.java  |  52 ++
 .../riak/RiakNodeGoogleComputeLiveTest.java     |  64 ++
 .../nosql/riak/RiakNodeIntegrationTest.java     |  65 ++
 .../nosql/riak/RiakNodeSoftlayerLiveTest.java   |  46 ++
 .../nosql/solr/AbstractSolrServerTest.java      |  42 ++
 .../entity/nosql/solr/SolrJSupport.java         |  67 ++
 .../nosql/solr/SolrServerEc2LiveTest.java       |  67 ++
 .../nosql/solr/SolrServerIntegrationTest.java   |  85 +++
 .../entity/nosql/solr/SolrServerLiveTest.java   |  90 +++
 .../main/resources/brooklyn/default.catalog.bom |   2 +-
 .../src/test/resources/cassandra-blueprint.yaml |   2 +-
 .../resources/couchbase-cluster-singleNode.yaml |   2 +-
 .../src/test/resources/couchbase-cluster.yaml   |   2 +-
 .../src/test/resources/couchbase-node.yaml      |   2 +-
 .../couchbase-replication-w-pillowfight.yaml    |   6 +-
 .../src/test/resources/couchbase-w-loadgen.yaml |   4 +-
 .../test/resources/couchbase-w-pillowfight.yaml |   4 +-
 .../src/test/resources/mongo-blueprint.yaml     |   2 +-
 .../resources/mongo-client-single-server.yaml   |   4 +-
 .../src/test/resources/mongo-scripts.yaml       |   4 +-
 .../src/test/resources/mongo-sharded.yaml       |   4 +-
 .../mongo-single-server-blueprint.yaml          |   2 +-
 usage/launcher/src/test/resources/playing.yaml  |   2 +-
 .../ApplicationResourceIntegrationTest.java     |   4 +-
 .../ApplicationResourceIntegrationTest.java     |   2 +-
 .../rest/resources/CatalogResourceTest.java     |   6 +-
 349 files changed, 19879 insertions(+), 19793 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/docs/_extra/big_examples/nosql-cassandra/cassandra.include.md
----------------------------------------------------------------------
diff --git a/docs/_extra/big_examples/nosql-cassandra/cassandra.include.md b/docs/_extra/big_examples/nosql-cassandra/cassandra.include.md
index 56319f8..a4d1643 100644
--- a/docs/_extra/big_examples/nosql-cassandra/cassandra.include.md
+++ b/docs/_extra/big_examples/nosql-cassandra/cassandra.include.md
@@ -189,7 +189,7 @@ public class WideAreaCassandraCluster extends AbstractApplication {
         .configure(CassandraCluster.CLUSTER_NAME, "Brooklyn")
         .configure(CassandraCluster.INITIAL_SIZE, 2) // per location
         .configure(CassandraCluster.ENDPOINT_SNITCH_NAME, "brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
-        .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar"));
+        .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar"));
   }
 }
 {% endhighlight %}
@@ -203,7 +203,7 @@ public class WideAreaCassandraCluster extends AbstractApplication {
         .configure(CassandraCluster.CLUSTER_NAME, "Brooklyn")
         .configure(CassandraCluster.INITIAL_SIZE, 2) // per location
         .configure(CassandraCluster.ENDPOINT_SNITCH_NAME, "brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
-        .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
+        .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
         .configure(CassandraFabric.MEMBER_SPEC, EntitySpec.create(CassandraCluster.class)
             .configure(CassandraCluster.MEMBER_SPEC, EntitySpec.create(CassandraNode.class)
                 .policy(PolicySpec.create(ServiceFailureDetector.class))

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/docs/guide/ops/catalog/index.md
----------------------------------------------------------------------
diff --git a/docs/guide/ops/catalog/index.md b/docs/guide/ops/catalog/index.md
index d24e200..9a65448 100644
--- a/docs/guide/ops/catalog/index.md
+++ b/docs/guide/ops/catalog/index.md
@@ -141,7 +141,7 @@ brooklyn.catalog:
   id: datastore
   version: 1.0
   itemType: template
-  iconUrl: classpath://brooklyn/entity/nosql/riak/riak.png
+  iconUrl: classpath://org/apache/brooklyn/entity/nosql/riak/riak.png
   name: Datastore (Riak)
   description: Riak is an open-source NoSQL key-value data store.
   item:
@@ -157,7 +157,7 @@ This YAML will install three items:
 ~~~ yaml
 brooklyn.catalog:
   version: 1.1
-  iconUrl: classpath://brooklyn/entity/nosql/riak/riak.png
+  iconUrl: classpath://org/apache/brooklyn/entity/nosql/riak/riak.png
   description: Riak is an open-source NoSQL key-value data store.
   items:
     - id: riak-node

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/CumulusRDFApplication.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/CumulusRDFApplication.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/CumulusRDFApplication.java
index 5b521bd..b62c028 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/CumulusRDFApplication.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/CumulusRDFApplication.java
@@ -27,6 +27,10 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.brooklyn.catalog.Catalog;
 import org.apache.brooklyn.catalog.CatalogConfig;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraFabric;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+
 import brooklyn.config.ConfigKey;
 import brooklyn.entity.Effector;
 import brooklyn.entity.Entity;
@@ -41,9 +45,6 @@ import brooklyn.entity.effector.EffectorBody;
 import brooklyn.entity.effector.Effectors;
 import brooklyn.entity.java.UsesJava;
 import brooklyn.entity.java.UsesJmx;
-import brooklyn.entity.nosql.cassandra.CassandraDatacenter;
-import brooklyn.entity.nosql.cassandra.CassandraFabric;
-import brooklyn.entity.nosql.cassandra.CassandraNode;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.entity.software.SshEffectorTasks;
 import brooklyn.entity.trait.Startable;
@@ -138,8 +139,8 @@ public class CumulusRDFApplication extends AbstractApplication {
             cassandra = addChild(EntitySpec.create(CassandraFabric.class)
                     .configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                     .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)) // per location
-                    .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME, "brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
-                    .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
+                    .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME, "org.apache.brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
+                    .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
                     .configure(CassandraFabric.MEMBER_SPEC, clusterSpec));
         } else {
             cassandra = addChild(EntitySpec.create(clusterSpec)

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/HighAvailabilityCassandraCluster.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/HighAvailabilityCassandraCluster.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/HighAvailabilityCassandraCluster.java
index a380798..4328269 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/HighAvailabilityCassandraCluster.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/HighAvailabilityCassandraCluster.java
@@ -22,13 +22,14 @@ import java.util.List;
 
 import org.apache.brooklyn.catalog.Catalog;
 import org.apache.brooklyn.catalog.CatalogConfig;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+
 import brooklyn.config.ConfigKey;
 import brooklyn.entity.basic.AbstractApplication;
 import brooklyn.entity.basic.ConfigKeys;
 import brooklyn.entity.basic.Entities;
 import brooklyn.entity.basic.StartableApplication;
-import brooklyn.entity.nosql.cassandra.CassandraDatacenter;
-import brooklyn.entity.nosql.cassandra.CassandraNode;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.launcher.BrooklynLauncher;
 import brooklyn.policy.EnricherSpec;

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/ResilientMongoDbApp.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/ResilientMongoDbApp.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/ResilientMongoDbApp.java
index 3824502..2af0a81 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/ResilientMongoDbApp.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/ResilientMongoDbApp.java
@@ -24,6 +24,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.brooklyn.catalog.Catalog;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+
 import brooklyn.enricher.Enrichers;
 import brooklyn.entity.Entity;
 import brooklyn.entity.basic.AbstractApplication;
@@ -31,8 +34,6 @@ import brooklyn.entity.basic.Entities;
 import brooklyn.entity.basic.SoftwareProcess;
 import brooklyn.entity.basic.StartableApplication;
 import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.event.SensorEvent;
 import brooklyn.event.SensorEventListener;

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/RiakClusterExample.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/RiakClusterExample.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/RiakClusterExample.java
index 1d61a45..fe5edc9 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/RiakClusterExample.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/RiakClusterExample.java
@@ -22,13 +22,14 @@ import java.util.List;
 
 import org.apache.brooklyn.catalog.Catalog;
 import org.apache.brooklyn.catalog.CatalogConfig;
+import org.apache.brooklyn.entity.nosql.riak.RiakCluster;
+import org.apache.brooklyn.entity.nosql.riak.RiakNode;
+
 import brooklyn.config.ConfigKey;
 import brooklyn.entity.basic.AbstractApplication;
 import brooklyn.entity.basic.ConfigKeys;
 import brooklyn.entity.basic.Entities;
 import brooklyn.entity.basic.StartableApplication;
-import brooklyn.entity.nosql.riak.RiakCluster;
-import brooklyn.entity.nosql.riak.RiakNode;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.launcher.BrooklynLauncher;
 import brooklyn.policy.EnricherSpec;

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCassandraCluster.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCassandraCluster.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCassandraCluster.java
index 2ba6ba3..fe9059f 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCassandraCluster.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCassandraCluster.java
@@ -20,10 +20,11 @@ package org.apache.brooklyn.demo;
 
 import java.util.List;
 
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+
 import brooklyn.entity.basic.AbstractApplication;
 import brooklyn.entity.basic.Entities;
 import brooklyn.entity.basic.StartableApplication;
-import brooklyn.entity.nosql.cassandra.CassandraDatacenter;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.launcher.BrooklynLauncher;
 import brooklyn.util.CommandLineUtil;

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCouchDBCluster.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCouchDBCluster.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCouchDBCluster.java
index 0919c24..9fef1af 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCouchDBCluster.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleCouchDBCluster.java
@@ -18,8 +18,9 @@
  */
 package org.apache.brooklyn.demo;
 
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBCluster;
+
 import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.nosql.couchdb.CouchDBCluster;
 import brooklyn.entity.proxying.EntitySpec;
 
 /** CouchDB cluster. */

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleMongoDBReplicaSet.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleMongoDBReplicaSet.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleMongoDBReplicaSet.java
index 249431c..a24cf4d 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleMongoDBReplicaSet.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleMongoDBReplicaSet.java
@@ -18,9 +18,10 @@
  */
 package org.apache.brooklyn.demo;
 
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+
 import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
-import brooklyn.entity.nosql.mongodb.MongoDBServer;
 import brooklyn.entity.proxying.EntitySpec;
 
 public class SimpleMongoDBReplicaSet extends ApplicationBuilder {

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleRedisCluster.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleRedisCluster.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleRedisCluster.java
index 96268c3..27acc35 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleRedisCluster.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/SimpleRedisCluster.java
@@ -18,8 +18,9 @@
  */
 package org.apache.brooklyn.demo;
 
+import org.apache.brooklyn.entity.nosql.redis.RedisCluster;
+
 import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.nosql.redis.RedisCluster;
 import brooklyn.entity.proxying.EntitySpec;
 
 /** Redis cluster. */

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/WideAreaCassandraCluster.java
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/WideAreaCassandraCluster.java b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/WideAreaCassandraCluster.java
index 21156e8..6116d13 100644
--- a/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/WideAreaCassandraCluster.java
+++ b/examples/simple-nosql-cluster/src/main/java/org/apache/brooklyn/demo/WideAreaCassandraCluster.java
@@ -23,14 +23,15 @@ import java.util.List;
 
 import org.apache.brooklyn.catalog.Catalog;
 import org.apache.brooklyn.catalog.CatalogConfig;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraFabric;
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+
 import brooklyn.config.ConfigKey;
 import brooklyn.entity.basic.AbstractApplication;
 import brooklyn.entity.basic.ConfigKeys;
 import brooklyn.entity.basic.Entities;
 import brooklyn.entity.basic.StartableApplication;
-import brooklyn.entity.nosql.cassandra.CassandraDatacenter;
-import brooklyn.entity.nosql.cassandra.CassandraFabric;
-import brooklyn.entity.nosql.cassandra.CassandraNode;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.launcher.BrooklynLauncher;
 import brooklyn.policy.EnricherSpec;
@@ -58,8 +59,8 @@ public class WideAreaCassandraCluster extends AbstractApplication {
         addChild(EntitySpec.create(CassandraFabric.class)
                 .configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                 .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)) // per location
-                .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME, "brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
-                .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
+                .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME, "org.apache.brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
+                .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL, "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
                 .configure(CassandraFabric.MEMBER_SPEC, EntitySpec.create(CassandraDatacenter.class)
                         .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(CassandraNode.class)
                                 .enricher(EnricherSpec.create(ServiceFailureDetector.class))

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/ha-cassandra-cluster.yaml
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/ha-cassandra-cluster.yaml b/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/ha-cassandra-cluster.yaml
index 4a9ebc7..d1c6ab4 100644
--- a/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/ha-cassandra-cluster.yaml
+++ b/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/ha-cassandra-cluster.yaml
@@ -22,7 +22,7 @@ name: HA Cassandra cluster
 location: aws-ec2:us-east-1
 
 services:
-- type: brooklyn.entity.nosql.cassandra.CassandraDatacenter
+- type: org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter
   name: Brooklyn
   brooklyn.config:
     initialSize: 6
@@ -36,7 +36,7 @@ services:
     snitchName: GossipingPropertyFileSnitch
     memberSpec:
       $brooklyn:entitySpec:
-        type: brooklyn.entity.nosql.cassandra.CassandraNode
+        type: org.apache.brooklyn.entity.nosql.cassandra.CassandraNode
         brookyn.policies:
         - type: brooklyn.policy.ha.ServiceRestarter
         brooklyn.enrichers:

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/simple-cassandra-cluster.yaml
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/simple-cassandra-cluster.yaml b/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/simple-cassandra-cluster.yaml
index ff7b48c..346ea78 100644
--- a/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/simple-cassandra-cluster.yaml
+++ b/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/simple-cassandra-cluster.yaml
@@ -22,7 +22,7 @@ name: Simple Cassandra cluster
 location: localhost
 
 services:
-- type: brooklyn.entity.nosql.cassandra.CassandraDatacenter
+- type: org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenter
   name: Brooklyn
   brooklyn.config:
     initialSize: 1

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/wide-area-cassandra-cluster.yaml
----------------------------------------------------------------------
diff --git a/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/wide-area-cassandra-cluster.yaml b/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/wide-area-cassandra-cluster.yaml
index 3e44e39..0cf39da 100644
--- a/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/wide-area-cassandra-cluster.yaml
+++ b/examples/simple-nosql-cluster/src/main/resources/org/apache/brooklyn/demo/wide-area-cassandra-cluster.yaml
@@ -24,15 +24,15 @@ locations:
 - rackspace-cloudservers-uk
 
 services:
-- type: brooklyn.entity.nosql.cassandra.CassandraFabric
+- type: org.apache.brooklyn.entity.nosql.cassandra.CassandraFabric
   name: Brooklyn
   brooklyn.config:
     initialSize: 2
-    snitchName: brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch
-    customSnitchJarUrl: classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar
+    snitchName: org.apache.brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch
+    customSnitchJarUrl: classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar
     memberSpec:
       $brooklyn:entitySpec:
-        type: brooklyn.entity.nosql.cassandra.CassandraNode
+        type: org.apache.brooklyn.entity.nosql.cassandra.CassandraNode
         brookyn.policies:
         - type: brooklyn.policy.ha.ServiceRestarter
         brooklyn.enrichers:

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-web-cluster/src/main/java/org/apache/brooklyn/demo/NodeJsTodoApplication.java
----------------------------------------------------------------------
diff --git a/examples/simple-web-cluster/src/main/java/org/apache/brooklyn/demo/NodeJsTodoApplication.java b/examples/simple-web-cluster/src/main/java/org/apache/brooklyn/demo/NodeJsTodoApplication.java
index 7ae1afd..993b549 100644
--- a/examples/simple-web-cluster/src/main/java/org/apache/brooklyn/demo/NodeJsTodoApplication.java
+++ b/examples/simple-web-cluster/src/main/java/org/apache/brooklyn/demo/NodeJsTodoApplication.java
@@ -20,11 +20,12 @@ package org.apache.brooklyn.demo;
 
 import static brooklyn.event.basic.DependentConfiguration.attributeWhenReady;
 import org.apache.brooklyn.catalog.Catalog;
+import org.apache.brooklyn.entity.nosql.redis.RedisStore;
+
 import brooklyn.entity.basic.AbstractApplication;
 import brooklyn.entity.basic.Attributes;
 import brooklyn.entity.basic.SoftwareProcess;
 import brooklyn.entity.basic.StartableApplication;
-import brooklyn.entity.nosql.redis.RedisStore;
 import brooklyn.entity.proxying.EntitySpec;
 import brooklyn.entity.trait.Startable;
 import brooklyn.entity.webapp.nodejs.NodeJsWebAppService;

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-riak-todo.yaml
----------------------------------------------------------------------
diff --git a/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-riak-todo.yaml b/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-riak-todo.yaml
index d963671..7cdf546 100644
--- a/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-riak-todo.yaml
+++ b/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-riak-todo.yaml
@@ -21,7 +21,7 @@ location:
   jclouds:aws-ec2:us-west-1:
     imageId: us-west-1/ami-c33cdd87
 services:
-- type: brooklyn.entity.nosql.riak.RiakCluster
+- type: org.apache.brooklyn.entity.nosql.riak.RiakCluster
   initialSize: 2
   id: mycluster
   brooklyn.config:

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-todo.yaml
----------------------------------------------------------------------
diff --git a/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-todo.yaml b/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-todo.yaml
index ea17556..82bc686 100644
--- a/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-todo.yaml
+++ b/examples/simple-web-cluster/src/main/resources/org/apache/brooklyn/demo/nodejs-todo.yaml
@@ -21,7 +21,7 @@ origin: "https://github.com/amirrajan/nodejs-todo/"
 locations:
 - jclouds:softlayer:ams01
 services:
-- type: brooklyn.entity.nosql.redis.RedisStore
+- type: org.apache.brooklyn.entity.nosql.redis.RedisStore
   id: redis
   name: "Redis"
 - type: brooklyn.entity.webapp.nodejs.NodeJsWebAppService

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/pom.xml
----------------------------------------------------------------------
diff --git a/software/nosql/pom.xml b/software/nosql/pom.xml
index 2c9bd22..8acbc95 100644
--- a/software/nosql/pom.xml
+++ b/software/nosql/pom.xml
@@ -255,23 +255,23 @@
                     the given components. These are files "without any degree of creativity" from the
                     perspective of the Brooklyn/Apache contribution.
                 -->
-                <exclude>src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/couchdb/couch.ini</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/couchdb/couch.uri</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/mongodb/default.conf</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/mongodb/default-mongod.conf</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-1.2.yaml</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-2.0.yaml</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/cassandra/cassandra-rackdc.properties</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.ini</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/couchdb/couch.uri</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default.conf</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/mongodb/default-mongod.conf</exclude>
                 <exclude>src/test/resources/test-mongodb.conf</exclude>
                 <exclude>src/test/resources/test-mongodb-configserver.conf</exclude>
                 <exclude>src/test/resources/test-mongodb-router.conf</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/redis/redis.conf</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/redis/slave.conf</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/riak/app.config</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/riak/vm.args</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/riak/riak.conf</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/riak/riak-mac.conf</exclude>
-                <exclude>src/main/resources/brooklyn/entity/nosql/solr/solr.xml</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/redis/redis.conf</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/redis/slave.conf</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/riak/app.config</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/riak/vm.args</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak.conf</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/riak/riak-mac.conf</exclude>
+                <exclude>src/main/resources/org/apache/brooklyn/entity/nosql/solr/solr.xml</exclude>
 
                 <!--
                     The source code for cassandra-multicloud-snitch.jar is in sandbox/cassandra-multicloud-snitch.

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraCluster.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraCluster.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraCluster.java
deleted file mode 100644
index afd582a..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraCluster.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import brooklyn.entity.proxying.ImplementedBy;
-
-/**
- * @deprecated since 0.7.0; use {@link CassandraDatacenter} which is equivalent but has
- * a less ambiguous name; <em>Cluster</em> in Cassandra corresponds to what Brooklyn terms a <em>Fabric</em>.
- */
-@Deprecated
-@ImplementedBy(CassandraClusterImpl.class)
-public interface CassandraCluster extends CassandraDatacenter {
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java
deleted file mode 100644
index 340a86a..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraClusterImpl.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-/**
- * @deprecated since 0.7.0; use {@link CassandraDatacenter} which is equivalent but has
- * a less ambiguous name; <em>Cluster</em> in Cassandra corresponds to what Brooklyn terms a <em>Fabric</em>.
- */
-@Deprecated
-public class CassandraClusterImpl extends CassandraDatacenterImpl implements CassandraCluster {
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java b/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java
deleted file mode 100644
index a012692..0000000
--- a/software/nosql/src/main/java/brooklyn/entity/nosql/cassandra/CassandraDatacenter.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.math.BigInteger;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.brooklyn.catalog.Catalog;
-import brooklyn.config.ConfigKey;
-import brooklyn.entity.Entity;
-import brooklyn.entity.annotation.Effector;
-import brooklyn.entity.basic.ConfigKeys;
-import brooklyn.entity.basic.MethodEffector;
-import brooklyn.entity.database.DatastoreMixins;
-import brooklyn.entity.effector.Effectors;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.nosql.cassandra.TokenGenerators.PosNeg63TokenGenerator;
-import brooklyn.entity.proxying.ImplementedBy;
-import brooklyn.event.AttributeSensor;
-import brooklyn.event.basic.BasicAttributeSensorAndConfigKey;
-import brooklyn.event.basic.Sensors;
-import brooklyn.util.flags.SetFromFlag;
-import brooklyn.util.time.Duration;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.Multimap;
-import com.google.common.reflect.TypeToken;
-
-/**
- * A group of {@link CassandraNode}s -- based on Brooklyn's {@link DynamicCluster} 
- * (though it is a "Datacenter" in Cassandra terms, where Cassandra's "cluster" corresponds
- * to a Brooklyn Fabric, cf {@link CassandraFabric}). 
- * The Datacenter can be resized, manually or by policy if required.
- * Tokens are selected intelligently.
- * <p>
- * Note that due to how Cassandra assumes ports are the same across a cluster,
- * it is <em>NOT</em> possible to deploy a cluster of size larger than 1 to localhost.
- * (Some exploratory work has been done to use different 127.0.0.x IP's for localhost,
- * and there is evidence this could be made to work.)
- */
-@Catalog(name="Apache Cassandra Datacenter Cluster", description="Cassandra is a highly scalable, eventually " +
-        "consistent, distributed, structured key-value store which provides a ColumnFamily-based data model " +
-        "richer than typical key/value systems", iconUrl="classpath:///cassandra-logo.jpeg")
-@ImplementedBy(CassandraDatacenterImpl.class)
-public interface CassandraDatacenter extends DynamicCluster, DatastoreMixins.HasDatastoreUrl, DatastoreMixins.CanExecuteScript {
-
-    // FIXME datacenter name -- also CASS_CLUSTER_NODES should be CASS_DC_NODES
-    @SetFromFlag("clusterName")
-    BasicAttributeSensorAndConfigKey<String> CLUSTER_NAME = new BasicAttributeSensorAndConfigKey<String>(String.class, "cassandra.cluster.name", "Name of the Cassandra cluster", "BrooklynCluster");
-
-    @SetFromFlag("snitchName")
-    ConfigKey<String> ENDPOINT_SNITCH_NAME = ConfigKeys.newStringConfigKey("cassandra.cluster.snitchName", "Type of the Cassandra snitch", "SimpleSnitch");
-
-    @SetFromFlag("seedSupplier")
-    @SuppressWarnings("serial")
-    ConfigKey<Supplier<Set<Entity>>> SEED_SUPPLIER = ConfigKeys.newConfigKey(new TypeToken<Supplier<Set<Entity>>>() { }, "cassandra.cluster.seedSupplier", "For determining the seed nodes", null);
-
-    @SuppressWarnings("serial")
-    @SetFromFlag("tokenGeneratorClass")
-    ConfigKey<Class<? extends TokenGenerator>> TOKEN_GENERATOR_CLASS = ConfigKeys.newConfigKey(
-        new TypeToken<Class<? extends TokenGenerator>>() {}, "cassandra.cluster.tokenGenerator.class", "For determining the tokens of nodes", 
-        PosNeg63TokenGenerator.class);
-
-    @SetFromFlag("tokenShift")
-    ConfigKey<BigInteger> TOKEN_SHIFT = ConfigKeys.newConfigKey(BigInteger.class, "cassandra.cluster.tokenShift", 
-        "Delta applied to all tokens generated for this Cassandra datacenter, "
-        + "useful when configuring multiple datacenters which should be shifted; "
-        + "if not set, a random shift is applied. (Pass 0 to prevent any shift.)", null);
-
-    ConfigKey<Boolean> USE_VNODES = ConfigKeys.newBooleanConfigKey(
-            "cassandra.cluster.useVnodes",
-            "Determines whether to use vnodes; if doing so, tokens will not be explicitly assigned to nodes in the cluster",
-            false);
-
-    /**
-     * num_tokens will automatically be reset to 1 for each node if {@link #USE_VNODES} is false. 
-     */
-    ConfigKey<Integer> NUM_TOKENS_PER_NODE = ConfigKeys.newIntegerConfigKey("cassandra.numTokensPerNode",
-            "Number of tokens per node; if using vnodes, should set this to a value like 256; will be overridden to 1 if USE_VNODES==false",
-            256);
-    
-    /**
-     * Additional time after the nodes in the cluster are up when starting
-     * before announcing the cluster as up.
-     * <p>
-     * Useful to ensure nodes have synchronized.
-     * <p>
-     * On 1.2.2 this could be as much as 120s when using 2 seed nodes,
-     * or just a few seconds with 1 seed node. On 1.2.9 it seems a few
-     * seconds is sufficient even with 2 seed nodes
-     */
-    @SetFromFlag("delayBeforeAdvertisingCluster")
-    ConfigKey<Duration> DELAY_BEFORE_ADVERTISING_CLUSTER = ConfigKeys.newConfigKey(Duration.class, "cassandra.cluster.delayBeforeAdvertisingCluster", "Delay after cluster is started before checking and advertising its availability", Duration.TEN_SECONDS);
-
-    @SuppressWarnings("serial")
-    AttributeSensor<Multimap<String,Entity>> DATACENTER_USAGE = Sensors.newSensor(new TypeToken<Multimap<String,Entity>>() { }, "cassandra.cluster.datacenterUsages", "Current set of datacenters in use, with nodes in each");
-
-    @SuppressWarnings("serial")
-    AttributeSensor<Set<String>> DATACENTERS = Sensors.newSensor(new TypeToken<Set<String>>() { }, "cassandra.cluster.datacenters", "Current set of datacenters in use");
-
-    AttributeSensor<Boolean> HAS_PUBLISHED_SEEDS = Sensors.newBooleanSensor("cassandra.cluster.seeds.hasPublished", "Whether we have published any seeds");
-
-    @SuppressWarnings("serial")
-    AttributeSensor<Set<Entity>> CURRENT_SEEDS = Sensors.newSensor(new TypeToken<Set<Entity>>() { }, "cassandra.cluster.seeds.current", "Current set of seeds to use to bootstrap the cluster");
-
-    AttributeSensor<String> HOSTNAME = Sensors.newStringSensor("cassandra.cluster.hostname", "Hostname to connect to cluster with");
-
-    @SuppressWarnings("serial")
-    AttributeSensor<List<String>> CASSANDRA_CLUSTER_NODES = Sensors.newSensor(new TypeToken<List<String>>() {},
-        "cassandra.cluster.nodes", "List of host:port of all active nodes in the cluster (thrift port, and public hostname/IP)");
-
-    AttributeSensor<Integer> THRIFT_PORT = Sensors.newIntegerSensor("cassandra.cluster.thrift.port", "Cassandra Thrift RPC port to connect to cluster with");
-
-    AttributeSensor<Long> FIRST_NODE_STARTED_TIME_UTC = Sensors.newLongSensor("cassandra.cluster.first.node.started.utc", "Time (UTC) when the first node was started");
-    @SuppressWarnings("serial")
-    AttributeSensor<List<Entity>> QUEUED_START_NODES = Sensors.newSensor(new TypeToken<List<Entity>>() {}, "cassandra.cluster.start.nodes.queued",
-        "Nodes queued for starting (for sequential start)");
-    
-    AttributeSensor<Integer> SCHEMA_VERSION_COUNT = Sensors.newIntegerSensor("cassandra.cluster.schema.versions.count",
-            "Number of different schema versions in the cluster; should be 1 for a healthy cluster, 0 when off; " +
-            "2 and above indicats a Schema Disagreement Error (and keyspace access may fail)");
-
-    AttributeSensor<Long> READ_PENDING = Sensors.newLongSensor("cassandra.cluster.read.pending", "Current pending ReadStage tasks");
-    AttributeSensor<Integer> READ_ACTIVE = Sensors.newIntegerSensor("cassandra.cluster.read.active", "Current active ReadStage tasks");
-    AttributeSensor<Long> WRITE_PENDING = Sensors.newLongSensor("cassandra.cluster.write.pending", "Current pending MutationStage tasks");
-    AttributeSensor<Integer> WRITE_ACTIVE = Sensors.newIntegerSensor("cassandra.cluster.write.active", "Current active MutationStage tasks");
-
-    AttributeSensor<Long> THRIFT_PORT_LATENCY_PER_NODE = Sensors.newLongSensor("cassandra.cluster.thrift.latency.perNode", "Latency for thrift port connection  averaged over all nodes (ms)");
-    AttributeSensor<Double> READS_PER_SECOND_LAST_PER_NODE = Sensors.newDoubleSensor("cassandra.reads.perSec.last.perNode", "Reads/sec (last datapoint) averaged over all nodes");
-    AttributeSensor<Double> WRITES_PER_SECOND_LAST_PER_NODE = Sensors.newDoubleSensor("cassandra.write.perSec.last.perNode", "Writes/sec (last datapoint) averaged over all nodes");
-    AttributeSensor<Double> PROCESS_CPU_TIME_FRACTION_LAST_PER_NODE = Sensors.newDoubleSensor("cassandra.cluster.metrics.processCpuTime.fraction.perNode", "Fraction of CPU time used (percentage reported by JMX), averaged over all nodes");
-
-    AttributeSensor<Double> READS_PER_SECOND_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.reads.perSec.windowed.perNode", "Reads/sec (over time window) averaged over all nodes");
-    AttributeSensor<Double> WRITES_PER_SECOND_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.writes.perSec.windowed.perNode", "Writes/sec (over time window) averaged over all nodes");
-    AttributeSensor<Double> THRIFT_PORT_LATENCY_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.thrift.latency.windowed.perNode", "Latency for thrift port (ms, over time window) averaged over all nodes");
-    AttributeSensor<Double> PROCESS_CPU_TIME_FRACTION_IN_WINDOW_PER_NODE = Sensors.newDoubleSensor("cassandra.cluster.metrics.processCpuTime.fraction.windowed", "Fraction of CPU time used (percentage, over time window), averaged over all nodes");
-
-    MethodEffector<Void> UPDATE = new MethodEffector<Void>(CassandraDatacenter.class, "update");
-
-    brooklyn.entity.Effector<String> EXECUTE_SCRIPT = Effectors.effector(DatastoreMixins.EXECUTE_SCRIPT)
-        .description("executes the given script contents using cassandra-cli")
-        .buildAbstract();
-
-    /**
-     * Sets the number of nodes used to seed the cluster.
-     * <p>
-     * Version 1.2.2 is buggy and requires a big delay for 2 nodes both seeds to reconcile,
-     * with 1.2.9 this seems fine, with just a few seconds' delay after starting.
-     *
-     * @see <a href="http://stackoverflow.com/questions/6770894/schemadisagreementexception/18639005" />
-     */
-    int DEFAULT_SEED_QUORUM = 2;
-
-    /**
-     * Can insert a delay after the first node comes up.
-     * <p>
-     * Reportedly not needed with 1.2.9, but we are still seeing some seed failures so re-introducing it.
-     * (This does not seem to help with the bug in 1.2.2.)
-     */
-    Duration DELAY_AFTER_FIRST = Duration.ONE_MINUTE;
-
-    /**
-     * If set (ie non-null), this waits the indicated time after a successful launch of one node
-     * before starting the next.  (If it is null, all nodes start simultaneously,
-     * possibly after the DELAY_AFTER_FIRST.)
-     * <p>
-     * When subsequent nodes start simultaneously, we occasionally see schema disagreement problems;
-     * if nodes start sequentially, we occasionally get "no sources for (tokenRange]" problems.
-     * Either way the node stops. Ideally this can be solved at the Cassandra level,
-     * but if not, we will have to introduce some restarts at the Cassandra nodes (which does seem
-     * to resolve the problems.)
-     */
-    Duration DELAY_BETWEEN_STARTS = null;
-    
-    /**
-     * Whether to wait for the first node to start up
-     * <p>
-     * not sure whether this is needed or not. Need to test in env where not all nodes are seed nodes,
-     * what happens if non-seed nodes start before the seed nodes?
-     */
-    boolean WAIT_FOR_FIRST = true;
-
-    @Effector(description="Updates the cluster members")
-    void update();
-
-    /**
-     * The name of the cluster.
-     */
-    String getClusterName();
-
-    Set<Entity> gatherPotentialSeeds();
-
-    Set<Entity> gatherPotentialRunningSeeds();
-
-    String executeScript(String commands);
-
-}


[06/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java
deleted file mode 100644
index 0131867..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeEc2LiveTest.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.collect.ImmutableList;
-
-public class CassandraNodeEc2LiveTest extends AbstractEc2LiveTest {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraNodeEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        log.info("Testing Cassandra on {}", loc);
-
-        CassandraNode cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
-                .configure("thriftPort", "9876+")
-                .configure("clusterName", "TestCluster"));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, CassandraNode.SERVICE_UP, true);
-
-        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
-        astyanax.astyanaxTest();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java
deleted file mode 100644
index 4f780da..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeIntegrationTest.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertNotNull;
-import static org.testng.Assert.assertTrue;
-
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.basic.PortAttributeSensorAndConfigKey;
-import brooklyn.test.Asserts;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.NetworkingTestUtils;
-import brooklyn.util.math.MathPredicates;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Maps;
-
-/**
- * Cassandra integration tests.
- *
- * Test the operation of the {@link CassandraNode} class.
- */
-public class CassandraNodeIntegrationTest extends AbstractCassandraNodeTest {
-
-    private static final Logger LOG = LoggerFactory.getLogger(CassandraNodeIntegrationTest.class);
-
-    public static void assertCassandraPortsAvailableEventually() {
-        Map<String, Integer> ports = getCassandraDefaultPorts();
-        NetworkingTestUtils.assertPortsAvailableEventually(ports);
-        LOG.info("Confirmed Cassandra ports are available: "+ports);
-    }
-    
-    public static Map<String, Integer> getCassandraDefaultPorts() {
-        List<PortAttributeSensorAndConfigKey> ports = ImmutableList.of(
-                CassandraNode.GOSSIP_PORT, 
-                CassandraNode.SSL_GOSSIP_PORT, 
-                CassandraNode.THRIFT_PORT, 
-                CassandraNode.NATIVE_TRANSPORT_PORT, 
-                CassandraNode.RMI_REGISTRY_PORT);
-        Map<String, Integer> result = Maps.newLinkedHashMap();
-        for (PortAttributeSensorAndConfigKey key : ports) {
-            result.put(key.getName(), key.getConfigKey().getDefaultValue().iterator().next());
-        }
-        return result;
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    @Override
-    public void setUp() throws Exception {
-        assertCassandraPortsAvailableEventually();
-        super.setUp();
-    }
-    
-    @AfterMethod(alwaysRun=true)
-    @Override
-    public void tearDown() throws Exception {
-        super.tearDown();
-        assertCassandraPortsAvailableEventually();
-    }
-    
-    /**
-     * Test that a node starts and sets SERVICE_UP correctly.
-     */
-    @Test(groups = "Integration")
-    public void canStartupAndShutdown() {
-        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
-                .configure("jmxPort", "11099+")
-                .configure("rmiRegistryPort", "19001+"));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, true);
-        Entities.dumpInfo(app);
-
-        cassandra.stop();
-
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, false);
-    }
-
-    /**
-     * Test that a keyspace and column family can be created and used with Astyanax client.
-     */
-    @Test(groups = "Integration")
-    public void testConnection() throws Exception {
-        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
-                .configure("jmxPort", "11099+")
-                .configure("rmiRegistryPort", "19001+")
-                .configure("thriftPort", "9876+"));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, true);
-
-        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
-        astyanax.astyanaxTest();
-    }
-    
-    /**
-     * Cassandra v2 needs Java >= 1.7. If you have java 6 as the defult locally, then you can use
-     * something like {@code .configure("shell.env", MutableMap.of("JAVA_HOME", "/Library/Java/JavaVirtualMachines/jdk1.7.0_51.jdk/Contents/Home"))}
-     */
-    @Test(groups = "Integration")
-    public void testCassandraVersion2() throws Exception {
-        // TODO In v2.0.10, the bin/cassandra script changed to add an additional check for JMX connectivity.
-        // This causes cassandera script to hang for us (presumably due to the CLASSPATH/JVM_OPTS we're passing
-        // in, regarding JMX agent).
-        // See:
-        //  - https://issues.apache.org/jira/browse/CASSANDRA-7254
-        //  - https://github.com/apache/cassandra/blame/trunk/bin/cassandra#L211-216
-        
-        String version = "2.0.9";
-        String majorMinorVersion = "2.0";
-        
-        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
-                .configure(CassandraNode.SUGGESTED_VERSION, version)
-                .configure(CassandraNode.NUM_TOKENS_PER_NODE, 256)
-                .configure("jmxPort", "11099+")
-                .configure("rmiRegistryPort", "19001+"));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, true);
-        Entities.dumpInfo(app);
-
-        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
-        astyanax.astyanaxTest();
-
-        assertEquals(cassandra.getMajorMinorVersion(), majorMinorVersion);
-        
-        Asserts.succeedsEventually(new Runnable() {
-            @Override public void run() {
-                assertNotNull(cassandra.getAttribute(CassandraNode.TOKEN));
-                assertNotNull(cassandra.getAttribute(CassandraNode.TOKENS));
-                assertEquals(cassandra.getAttribute(CassandraNode.TOKENS).size(), 256, "tokens="+cassandra.getAttribute(CassandraNode.TOKENS));
-                
-                assertEquals(cassandra.getAttribute(CassandraNode.PEERS), (Integer)256);
-                assertEquals(cassandra.getAttribute(CassandraNode.LIVE_NODE_COUNT), (Integer)1);
-        
-                assertTrue(cassandra.getAttribute(CassandraNode.SERVICE_UP_JMX));
-                assertNotNull(cassandra.getAttribute(CassandraNode.THRIFT_PORT_LATENCY));
-        
-                assertNotNull(cassandra.getAttribute(CassandraNode.READ_PENDING));
-                assertNotNull(cassandra.getAttribute(CassandraNode.READ_ACTIVE));
-                EntityTestUtils.assertAttribute(cassandra, CassandraNode.READ_COMPLETED, MathPredicates.greaterThanOrEqual(1));
-                assertNotNull(cassandra.getAttribute(CassandraNode.WRITE_PENDING));
-                assertNotNull(cassandra.getAttribute(CassandraNode.WRITE_ACTIVE));
-                EntityTestUtils.assertAttribute(cassandra, CassandraNode.WRITE_COMPLETED, MathPredicates.greaterThanOrEqual(1));
-                
-                assertNotNull(cassandra.getAttribute(CassandraNode.READS_PER_SECOND_LAST));
-                assertNotNull(cassandra.getAttribute(CassandraNode.WRITES_PER_SECOND_LAST));
-        
-                assertNotNull(cassandra.getAttribute(CassandraNode.THRIFT_PORT_LATENCY_IN_WINDOW));
-                assertNotNull(cassandra.getAttribute(CassandraNode.READS_PER_SECOND_IN_WINDOW));
-                assertNotNull(cassandra.getAttribute(CassandraNode.WRITES_PER_SECOND_IN_WINDOW));
-                
-                // an example MXBean
-                EntityTestUtils.assertAttribute(cassandra, CassandraNode.MAX_HEAP_MEMORY, MathPredicates.greaterThanOrEqual(1));
-            }});
-
-        cassandra.stop();
-
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, Startable.SERVICE_UP, false);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java
deleted file mode 100644
index 4bf087f..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.text.Strings;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * Cassandra live tests.
- *
- * Test the operation of the {@link CassandraNode} class using the jclouds {@code rackspace-cloudservers-uk}
- * and {@code aws-ec2} providers, with different OS images. The tests use the {@link AstyanaxSupport#astyanaxTest()} method
- * to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
- * set, usually in the {@code .brooklyn/bropoklyn.properties} file.
- */
-public class CassandraNodeLiveTest extends AbstractCassandraNodeTest {
-
-    private static final Logger log = LoggerFactory.getLogger(CassandraNodeLiveTest.class);
-
-    @DataProvider(name = "virtualMachineData")
-    public Object[][] provideVirtualMachineData() {
-        return new Object[][] { // ImageId, Provider, Region, Description (for logging)
-            new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
-            new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" }, 
-            new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" }, 
-        };
-    }
-
-    @Test(groups = "Live", dataProvider = "virtualMachineData")
-    protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
-        log.info("Testing Cassandra on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
-
-        Map<String, String> properties = MutableMap.of("imageId", imageId);
-        testLocation = app.getManagementContext().getLocationRegistry()
-                .resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
-
-        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
-                .configure("thriftPort", "9876+")
-                .configure("clusterName", "TestCluster"));
-        app.start(ImmutableList.of(testLocation));
-        EntityTestUtils.assertAttributeEqualsEventually(cassandra, CassandraNode.SERVICE_UP, true);
-
-        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
-        astyanax.astyanaxTest();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java
deleted file mode 100644
index a88fa4f..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertEquals;
-
-import java.math.BigInteger;
-import java.util.List;
-
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.nosql.cassandra.TokenGenerators.AbstractTokenGenerator;
-import brooklyn.entity.nosql.cassandra.TokenGenerators.NonNeg127TokenGenerator;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-public class NonNegTokenGeneratorTest {
-
-    public static final BigInteger C4_1 = new BigInteger("42535295865117307932921825928971026432");
-    public static final BigInteger C4_2 = new BigInteger("85070591730234615865843651857942052864");
-    public static final BigInteger C4_3 = new BigInteger("127605887595351923798765477786913079296");
-
-    // TODO Expect this behaviour to change when we better support dynamically growing/shrinking.
-    // In particular, the expected behaviour for testReturnsNullWhenClusterSizeUnknown 
-    // and testReturnsNullWhenGrowingClusterUnknownAmount will change.
-
-    private AbstractTokenGenerator generator;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        generator = new NonNeg127TokenGenerator();
-    }
-    
-    @Test
-    public void testGetTokenForReplacementNode() {
-        assertEquals(generator.getTokenForReplacementNode(BigInteger.ONE), BigInteger.ZERO);
-        assertEquals(generator.getTokenForReplacementNode(BigInteger.ZERO), generator.max());
-        assertEquals(generator.getTokenForReplacementNode(generator.max()), generator.max().subtract(BigInteger.ONE));
-    }
-    
-    @Test
-    public void testGeneratesInitialTokens() throws Exception {
-        List<BigInteger> tokens = Lists.newArrayList();
-        generator.growingCluster(4);
-        for (int i = 0; i < 4; i++) {
-            tokens.add(generator.newToken());
-        }
-        
-        assertEquals(tokens, ImmutableList.of(
-                BigInteger.ZERO, 
-                C4_1,
-                C4_2,
-                C4_3));
-    }
-    
-    // Expect behaviour to be changed to better choose tokens for growing clusters 
-    // (but eg need to take into account how busy each node is!)
-    @Test
-    public void testGeneratesTokensForGrowingCluster() throws Exception {
-        List<BigInteger> tokens = Lists.newArrayList();
-        generator.growingCluster(4);
-        for (int i = 0; i < 4; i++) {
-            tokens.add(generator.newToken());
-        }
-        generator.growingCluster(1);
-        assertEquals(generator.newToken(), C4_3.add(generator.max().add(BigInteger.ONE)).divide(BigInteger.valueOf(2)));
-        generator.growingCluster(2);
-        assertEquals(generator.newToken(), C4_1.divide(BigInteger.valueOf(2)));
-        assertEquals(generator.newToken(), C4_2.add(C4_1).divide(BigInteger.valueOf(2)));
-    }
-    
-    @Test
-    public void testGeneratesTokensForGrowingClusterWhenInitialSizeIsOne() throws Exception {
-        // initial size 1 has to do a special "average with ourself by half phase shift" computation
-        List<BigInteger> tokens = Lists.newArrayList();
-        generator.growingCluster(1);
-        tokens.add(generator.newToken());
-        
-        generator.growingCluster(1);
-        assertEquals(generator.newToken(), C4_2);
-        generator.growingCluster(2);
-        assertEquals(generator.newToken(), C4_3);
-        assertEquals(generator.newToken(), C4_1);
-    }
-    
-    @Test
-    public void testReturnsNullWhenClusterSizeUnknown() throws Exception {
-        assertEquals(generator.newToken(), null);
-    }
-    
-    @Test
-    public void testReturnsNullWhenGrowingClusterUnknownAmount() throws Exception {
-        generator.growingCluster(4);
-        for (int i = 0; i < 4; i++) {
-            generator.newToken();
-        }
-        assertEquals(generator.newToken(), null);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java
deleted file mode 100644
index 274d22f..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.cassandra;
-
-import static org.testng.Assert.assertEquals;
-
-import java.math.BigInteger;
-
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.nosql.cassandra.TokenGenerators.AbstractTokenGenerator;
-import brooklyn.entity.nosql.cassandra.TokenGenerators.PosNeg63TokenGenerator;
-
-public class PosNegTokenGeneratorTest {
-
-    // TODO Expect this behaviour to change when we better support dynamically growing/shrinking.
-    // In particular, the expected behaviour for testReturnsNullWhenClusterSizeUnknown 
-    // and testReturnsNullWhenGrowingClusterUnknownAmount will change.
-
-    private AbstractTokenGenerator generator;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        generator = new PosNeg63TokenGenerator();
-    }
-    
-    @Test
-    public void testGetTokenForReplacementNode() {
-        assertEquals(generator.getTokenForReplacementNode(BigInteger.ONE), BigInteger.ZERO);
-        assertEquals(generator.getTokenForReplacementNode(BigInteger.ZERO), BigInteger.ONE.negate());
-        assertEquals(generator.getTokenForReplacementNode(generator.min()), generator.max());
-        assertEquals(generator.getTokenForReplacementNode(generator.max()), generator.max().subtract(BigInteger.ONE));
-    }
-    
-    @Test
-    public void testGeneratesInitialTokens() throws Exception {
-        generator.growingCluster(4);
-        assertEquals(generator.newToken(), generator.min());
-        assertEquals(generator.newToken(), generator.min().add(generator.range().divide(BigInteger.valueOf(4))));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java
deleted file mode 100644
index e8815f9..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.Entities;
-import brooklyn.location.basic.BasicOsDetails;
-import brooklyn.location.basic.BasicOsDetails.OsArchs;
-import brooklyn.management.internal.LocalManagementContext;
-import brooklyn.test.entity.LocalManagementContextForTests;
-
-public class CouchbaseOfflineTest {
-
-    private LocalManagementContext mgmt;
-
-    @BeforeMethod
-    public void setUp() {
-        mgmt = LocalManagementContextForTests.newInstance();
-    }
-    
-    @AfterMethod
-    public void tearDown() {
-        Entities.destroyAll(mgmt);
-    }
-    
-    @Test
-    public void testResolvingDownloadLinks() {
-        checkOsTag("linux", OsArchs.I386, "unknown", true, "centos6.x86.rpm");
-        checkOsTag("linux", OsArchs.I386, "unknown", false, "x86.rpm");
-        checkOsTag("rhel", OsArchs.X_86_64, "6", true, "centos6.x86_64.rpm");
-        checkOsTag("Ubuntu 14", OsArchs.X_86_64, "14.04", true, "ubuntu12.04_amd64.deb");
-        checkOsTag("Ubuntu 14", OsArchs.X_86_64, "14.04", false, "x86_64.deb");
-        checkOsTag("Debian 7up", OsArchs.I386, "7ish", true, "debian7_x86.deb");
-        Assert.assertEquals(new CouchbaseNodeSshDriver.DownloadLinkSegmentComputer(null, true, "test").getOsTag(), "centos6.x86_64.rpm");
-        Assert.assertEquals(new CouchbaseNodeSshDriver.DownloadLinkSegmentComputer(null, false, "test").getOsTag(), "x86_64.rpm");
-    }
-
-    protected void checkOsTag(String os, String arch, String version, boolean isV30, String expectedTag) {
-        Assert.assertEquals(new CouchbaseNodeSshDriver.DownloadLinkSegmentComputer(new BasicOsDetails(os, arch, version), isV30, "test").getOsTag(), expectedTag);
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java
deleted file mode 100644
index e0a5f3c..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchbase;
-
-import java.util.List;
-import java.util.Map;
-
-import org.testng.annotations.Test;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-public class CouchbaseSyncGatewayEc2LiveTest extends AbstractEc2LiveTest {
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        CouchbaseCluster cluster = app.createAndManageChild(EntitySpec.create(CouchbaseCluster.class)
-            .configure(CouchbaseNode.COUCHBASE_ADMIN_USERNAME, "Administrator")
-            .configure(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD, "Password")
-            .configure(DynamicCluster.INITIAL_SIZE, 3)
-            .configure(CouchbaseCluster.CREATE_BUCKETS, (List<Map<String,Object>>)ImmutableList.of(
-                (Map<String,Object>)ImmutableMap.<String, Object>of(
-                    "bucket", "default",
-                    "bucket-ramsize", 100,
-                    "bucket-type", "couchbase",
-                    "bucket-port", 11211
-                ),
-                (Map<String,Object>)ImmutableMap.<String, Object>of(
-                    "bucket", "my_bucket",
-                    "bucket-ramsize", 100,
-                    "bucket-type", "couchbase",
-                    "bucket-port", 11223
-                ),
-                (Map<String,Object>)ImmutableMap.<String, Object>of(
-                    "bucket", "another",
-                    "bucket-ramsize", 100,
-                    "bucket-type", "couchbase",
-                    "bucket-port", 11224
-                ))
-            )
-        );
-        CouchbaseSyncGateway gateway = app.createAndManageChild(EntitySpec.create(CouchbaseSyncGateway.class)
-            .configure(CouchbaseSyncGateway.COUCHBASE_SERVER, cluster)
-            .configure(CouchbaseSyncGateway.COUCHBASE_SERVER_BUCKET, "my_bucket")
-        );
-        
-        app.start(ImmutableList.of(loc));
-        
-        EntityTestUtils.assertAttributeEqualsEventually(gateway, Startable.SERVICE_UP, true);
-    }
-    
-    
-    // Supported operating systems
-    @Test(groups = {"Live"})
-    @Override
-    public void test_Ubuntu_12_0() throws Exception {
-        super.test_Ubuntu_12_0();
-    }
-    
-    @Test(groups = {"Live"})
-    @Override
-    public void test_Red_Hat_Enterprise_Linux_6() throws Exception {
-        super.test_Red_Hat_Enterprise_Linux_6();
-    }
-    
-    @Test(groups = {"Live"})
-    @Override
-    public void test_CentOS_6_3() throws Exception {
-        super.test_CentOS_6_3();
-    }
-    
-    // Unsupported operating systems
-    
-    @Test(groups = {"Live"})
-    @Override
-    public void test_CentOS_5() throws Exception {
-        // Unsupported
-        // error: Failed dependencies:
-        //     libc.so.6(GLIBC_2.7)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libcrypto.so.10()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libreadline.so.6()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libssl.so.10()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libstdc++.so.6(GLIBCXX_3.4.10)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libstdc++.so.6(GLIBCXX_3.4.11)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libstdc++.so.6(GLIBCXX_3.4.9)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        libtinfo.so.5()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
-        //        openssl >= 1.0.0 is needed by couchbase-server-2.5.1-1083.x86_64
-        //        rpmlib(FileDigests) <= 4.6.0-1 is needed by couchbase-server-2.5.1-1083.x86_64
-        //        rpmlib(PayloadIsXz) <= 5.2-1 is needed by couchbase-server-2.5.1-1083.x86_64
-    }
-    
-    @Test(groups = {"Live"})
-    @Override
-    public void test_Debian_6() throws Exception {
-        // Unsupported
-    }
-    
-    @Test(groups = {"Live"})
-    @Override
-    public void test_Debian_7_2() throws Exception {
-        // Unsupported
-    }
-    
-    @Test(groups = {"Live"})
-    @Override
-    public void test_Ubuntu_10_0() throws Exception {
-        // Unsupported
-        // Installing cannot proceed since the package 'libssl1*' is missing. 
-        // Please install libssl1* and try again. 
-        //    $sudo apt-get install libssl1*
-        //
-        // Installing libssl1* doesn't fix the issue
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java
deleted file mode 100644
index a7c4c77..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-
-import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.basic.Entities;
-import brooklyn.location.Location;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.entity.TestApplication;
-import brooklyn.util.internal.TimeExtras;
-
-/**
- * CouchDB test framework for integration and live tests.
- */
-public class AbstractCouchDBNodeTest {
-
-    private static final Logger log = LoggerFactory.getLogger(AbstractCouchDBNodeTest.class);
-
-    static {
-        TimeExtras.init();
-    }
-
-    protected TestApplication app;
-    protected Location testLocation;
-    protected CouchDBNode couchdb;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        app = ApplicationBuilder.newManagedApp(TestApplication.class);
-        testLocation = new LocalhostMachineProvisioningLocation();
-        // testLocation = app.managementContext.locationRegistry.resolve("named:test");
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void shutdown() {
-        Entities.destroyAll(app.getManagementContext());
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java
deleted file mode 100644
index c891e70..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import static org.testng.Assert.assertEquals;
-
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-/**
- * A live test of the {@link CouchDBCluster} entity.
- *
- * Tests that a two node cluster can be started on Amazon EC2 and data written on one {@link CouchDBNode}
- * can be read from another, using the Astyanax API.
- */
-public class CouchDBClusterLiveTest {
-
-    // private String provider = "rackspace-cloudservers-uk";
-    private String provider = "aws-ec2:eu-west-1";
-
-    protected TestApplication app;
-    protected Location testLocation;
-    protected CouchDBCluster cluster;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() {
-        app = ApplicationBuilder.newManagedApp(TestApplication.class);
-        testLocation = app.getManagementContext().getLocationRegistry().resolve(provider);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void shutdown() {
-        Entities.destroyAll(app.getManagementContext());
-    }
-
-    /**
-     * Test that a two node cluster starts up and allows access via the Astyanax API through both nodes.
-     */
-    @Test(groups = "Live")
-    public void canStartupAndShutdown() throws Exception {
-        cluster = app.createAndManageChild(EntitySpec.create(CouchDBCluster.class)
-                .configure("initialSize", 2)
-                .configure("clusterName", "AmazonCluster"));
-        assertEquals(cluster.getCurrentSize().intValue(), 0);
-
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(cluster, CouchDBCluster.GROUP_SIZE, 2);
-        Entities.dumpInfo(app);
-
-        CouchDBNode first = (CouchDBNode) Iterables.get(cluster.getMembers(), 0);
-        CouchDBNode second = (CouchDBNode) Iterables.get(cluster.getMembers(), 1);
-
-        EntityTestUtils.assertAttributeEqualsEventually(first, Startable.SERVICE_UP, true);
-        EntityTestUtils.assertAttributeEqualsEventually(second, Startable.SERVICE_UP, true);
-
-        JcouchdbSupport jcouchdbFirst = new JcouchdbSupport(first);
-        JcouchdbSupport jcouchdbSecond = new JcouchdbSupport(second);
-        jcouchdbFirst.jcouchdbTest();
-        jcouchdbSecond.jcouchdbTest();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java
deleted file mode 100644
index ca0383d..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.collect.ImmutableList;
-
-public class CouchDBNodeEc2LiveTest extends AbstractEc2LiveTest {
-
-    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        log.info("Testing Cassandra on {}", loc);
-
-        CouchDBNode couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
-                .configure("httpPort", "8000+"));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
-
-        JcouchdbSupport jcouchdb = new JcouchdbSupport(couchdb);
-        jcouchdb.jcouchdbTest();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java
deleted file mode 100644
index f1fa31e..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import org.testng.annotations.Test;
-
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * CouchDB integration tests.
- *
- * Test the operation of the {@link CouchDBNode} class.
- */
-public class CouchDBNodeIntegrationTest extends AbstractCouchDBNodeTest {
-
-    /**
-     * Test that a node starts and sets SERVICE_UP correctly.
-     */
-    @Test(groups = {"Integration", "WIP"})
-    public void canStartupAndShutdown() {
-        couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
-                .configure("httpPort", "8000+"));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
-
-        couchdb.stop();
-
-        EntityTestUtils.assertAttributeEquals(couchdb, Startable.SERVICE_UP, false);
-    }
-
-    /**
-     * Test that a node can be used with jcouchdb client.
-     */
-    @Test(groups = {"Integration", "WIP"})
-    public void testConnection() throws Exception {
-        couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
-                .configure("httpPort", "8000+"));
-        app.start(ImmutableList.of(testLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
-
-        JcouchdbSupport jcouchdb = new JcouchdbSupport(couchdb);
-        jcouchdb.jcouchdbTest();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java
deleted file mode 100644
index 3a3c4e2..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.collections.MutableMap;
-import brooklyn.util.text.Strings;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * CouchDB live tests.
- *
- * Test the operation of the {@link CouchDBNode} class using the jclouds {@code rackspace-cloudservers-uk}
- * and {@code aws-ec2} providers, with different OS images. The tests use the {@link JcouchdbSupport#jcouchdbTest(CouchDBNode)} method
- * to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
- * set, usually in the {@code .brooklyn/brooklyn.properties} file.
- */
-public class CouchDBNodeLiveTest extends AbstractCouchDBNodeTest {
-
-    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeLiveTest.class);
-
-    @DataProvider(name = "virtualMachineData")
-    public Object[][] provideVirtualMachineData() {
-        return new Object[][] { // ImageId, Provider, Region, Description (for logging)
-            new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
-            new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" }, 
-            new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" }, 
-        };
-    }
-
-    @Test(groups = "Live", dataProvider = "virtualMachineData")
-    protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
-        log.info("Testing CouchDB on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
-
-        Map<String, String> properties = MutableMap.of("imageId", imageId);
-        testLocation = app.getManagementContext().getLocationRegistry()
-                .resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
-
-        couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
-                .configure("httpPort", "12345+")
-                .configure("clusterName", "TestCluster"));
-        app.start(ImmutableList.of(testLocation));
-        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
-
-        JcouchdbSupport jcouchdb = new JcouchdbSupport(couchdb);
-        jcouchdb.jcouchdbTest();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java b/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java
deleted file mode 100644
index 742e3b7..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.couchdb;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.jcouchdb.db.Database;
-import org.jcouchdb.db.Server;
-import org.jcouchdb.db.ServerImpl;
-
-import brooklyn.entity.basic.Attributes;
-
-/**
- * CouchDB test framework for integration and live tests, using jcouchdb API.
- */
-public class JcouchdbSupport {
-
-    private CouchDBNode node;
-
-    public JcouchdbSupport(CouchDBNode node) {
-        this.node = node;
-    }
-
-    /**
-     * Exercise the {@link CouchDBNode} using the jcouchdb API.
-     */
-    public void jcouchdbTest() throws Exception {
-        Server server = new ServerImpl(node.getAttribute(Attributes.HOSTNAME), node.getHttpPort());
-        assertTrue(server.createDatabase("brooklyn"));
-
-        Database db = new Database(node.getAttribute(Attributes.HOSTNAME), node.getHttpPort(), "brooklyn");
-
-        // create a hash map document with two fields
-        Map<String,String> doc = new HashMap<String, String>();
-        doc.put("first", "one");
-        doc.put("second", "two");
-
-        // create the document in couchdb
-        int before = db.listDocuments(null, null).getTotalRows();
-        db.createDocument(doc);
-        int after = db.listDocuments(null, null).getTotalRows();
-
-        assertEquals(before + 1, after);
-    }
-
-    /**
-     * Write to a {@link CouchDBNode} using the jcouchdb API.
-     */
-    protected void writeData() throws Exception {
-    }
-
-    /**
-     * Read from a {@link CouchDBNode} using the jcouchdb API.
-     */
-    protected void readData() throws Exception {
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java
deleted file mode 100644
index 232918a..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.apache.http.client.methods.HttpGet;
-import org.bouncycastle.util.Strings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.BrooklynAppLiveTestSupport;
-import brooklyn.entity.Entity;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.location.Location;
-import brooklyn.test.Asserts;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.util.http.HttpTool;
-import brooklyn.util.http.HttpToolResponse;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-public class ElasticSearchClusterIntegrationTest extends BrooklynAppLiveTestSupport {
-
-    // FIXME Exception in thread "main" java.lang.UnsupportedClassVersionError: org/elasticsearch/bootstrap/Elasticsearch : Unsupported major.minor version 51.0
-
-    private static final Logger LOG = LoggerFactory.getLogger(ElasticSearchClusterIntegrationTest.class);
-
-    protected Location testLocation;
-    protected ElasticSearchCluster elasticSearchCluster;
-
-    @BeforeMethod(alwaysRun = true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        testLocation = app.newLocalhostProvisioningLocation();
-    }
-
-    @Test(groups = {"Integration"})
-    public void testStartupAndShutdown() {
-        elasticSearchCluster = app.createAndManageChild(EntitySpec.create(ElasticSearchCluster.class)
-                .configure(DynamicCluster.INITIAL_SIZE, 3));
-        app.start(ImmutableList.of(testLocation));
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchCluster, Startable.SERVICE_UP, true);
-        
-        elasticSearchCluster.stop();
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchCluster, Startable.SERVICE_UP, false);
-    }
-    
-    @Test(groups = {"Integration"})
-    public void testPutAndGet() throws URISyntaxException {
-        elasticSearchCluster = app.createAndManageChild(EntitySpec.create(ElasticSearchCluster.class)
-                .configure(DynamicCluster.INITIAL_SIZE, 3));
-        app.start(ImmutableList.of(testLocation));
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchCluster, Startable.SERVICE_UP, true);
-        assertEquals(elasticSearchCluster.getMembers().size(), 3);
-        assertEquals(clusterDocumentCount(), 0);
-        
-        ElasticSearchNode anyNode = (ElasticSearchNode)elasticSearchCluster.getMembers().iterator().next();
-        
-        String document = "{\"foo\" : \"bar\",\"baz\" : \"quux\"}";
-        
-        String putBaseUri = "http://" + anyNode.getAttribute(Attributes.HOSTNAME) + ":" + anyNode.getAttribute(Attributes.HTTP_PORT);
-        
-        HttpToolResponse putResponse = HttpTool.httpPut(
-                HttpTool.httpClientBuilder()
-                    .port(anyNode.getAttribute(Attributes.HTTP_PORT))
-                    .build(), 
-                new URI(putBaseUri + "/mydocuments/docs/1"), 
-                ImmutableMap.<String, String>of(), 
-                Strings.toByteArray(document)); 
-        assertEquals(putResponse.getResponseCode(), 201);
-        
-        for (Entity entity : elasticSearchCluster.getMembers()) {
-            ElasticSearchNode node = (ElasticSearchNode)entity;
-            String getBaseUri = "http://" + node.getAttribute(Attributes.HOSTNAME) + ":" + node.getAttribute(Attributes.HTTP_PORT);
-            HttpToolResponse getResponse = HttpTool.execAndConsume(
-                    HttpTool.httpClientBuilder().build(),
-                    new HttpGet(getBaseUri + "/mydocuments/docs/1/_source"));
-            assertEquals(getResponse.getResponseCode(), 200);
-            assertEquals(HttpValueFunctions.jsonContents("foo", String.class).apply(getResponse), "bar");
-        }
-        Asserts.succeedsEventually(new Runnable() {
-            public void run() {
-                int count = clusterDocumentCount();
-                assertTrue(count >= 1, "count="+count);
-                LOG.debug("Document count is {}", count);
-            }});
-    }
-    
-    private int clusterDocumentCount() {
-        int result = 0;
-        for (Entity entity : elasticSearchCluster.getMembers()) {
-            result += entity.getAttribute(ElasticSearchNode.DOCUMENT_COUNT);
-        }
-        return result;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java
deleted file mode 100644
index 73a2f97..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.elasticsearch;
-
-import static org.testng.Assert.assertEquals;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.apache.http.client.methods.HttpGet;
-import org.bouncycastle.util.Strings;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.basic.Attributes;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.event.feed.http.HttpValueFunctions;
-import brooklyn.location.Location;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-import brooklyn.util.http.HttpTool;
-import brooklyn.util.http.HttpToolResponse;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-
-public class ElasticSearchNodeIntegrationTest {
-    
-    protected TestApplication app;
-    protected Location testLocation;
-    protected ElasticSearchNode elasticSearchNode;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        app = ApplicationBuilder.newManagedApp(TestApplication.class);
-        testLocation = new LocalhostMachineProvisioningLocation();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void shutdown() {
-        Entities.destroyAll(app.getManagementContext());
-    }
-    
-    @Test(groups = {"Integration"})
-    public void testStartupAndShutdown() {
-        elasticSearchNode = app.createAndManageChild(EntitySpec.create(ElasticSearchNode.class));
-        app.start(ImmutableList.of(testLocation));
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, Startable.SERVICE_UP, true);
-        
-        elasticSearchNode.stop();
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, Startable.SERVICE_UP, false);
-    }
-    
-    @Test(groups = {"Integration"})
-    public void testDocumentCount() throws URISyntaxException {
-        elasticSearchNode = app.createAndManageChild(EntitySpec.create(ElasticSearchNode.class));
-        app.start(ImmutableList.of(testLocation));
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, Startable.SERVICE_UP, true);
-        
-        EntityTestUtils.assertAttributeEquals(elasticSearchNode, ElasticSearchNode.DOCUMENT_COUNT, 0);
-        
-        String baseUri = "http://" + elasticSearchNode.getAttribute(Attributes.HOSTNAME) + ":" + elasticSearchNode.getAttribute(Attributes.HTTP_PORT);
-        
-        HttpToolResponse pingResponse = HttpTool.execAndConsume(
-                HttpTool.httpClientBuilder().build(),
-                new HttpGet(baseUri));
-        assertEquals(pingResponse.getResponseCode(), 200);
-        
-        String document = "{\"foo\" : \"bar\",\"baz\" : \"quux\"}";
-        
-        HttpToolResponse putResponse = HttpTool.httpPut(
-                HttpTool.httpClientBuilder()
-                    .port(elasticSearchNode.getAttribute(Attributes.HTTP_PORT))
-                    .build(), 
-                new URI(baseUri + "/mydocuments/docs/1"), 
-                ImmutableMap.<String, String>of(), 
-                Strings.toByteArray(document)); 
-        assertEquals(putResponse.getResponseCode(), 201);
-        
-        HttpToolResponse getResponse = HttpTool.execAndConsume(
-                HttpTool.httpClientBuilder().build(),
-                new HttpGet(baseUri + "/mydocuments/docs/1/_source"));
-        assertEquals(getResponse.getResponseCode(), 200);
-        assertEquals(HttpValueFunctions.jsonContents("foo", String.class).apply(getResponse), "bar");
-        
-        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, ElasticSearchNode.DOCUMENT_COUNT, 1);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java
deleted file mode 100644
index bd6b23f..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.EntityTestUtils;
-import com.google.common.collect.ImmutableList;
-import com.mongodb.DBObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import static org.testng.Assert.assertEquals;
-
-public class MongoDBEc2LiveTest extends AbstractEc2LiveTest {
-
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBEc2LiveTest.class);
-
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
-                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
-        app.start(ImmutableList.of(loc));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, MongoDBServer.SERVICE_UP, true);
-
-        String id = MongoDBTestHelper.insert(entity, "hello", "world!");
-        DBObject docOut = MongoDBTestHelper.getById(entity, id);
-        assertEquals(docOut.get("hello"), "world!");
-    }
-
-    @Test(enabled=false)
-    public void testDummy() {} // Convince TestNG IDE integration that this really does have test methods
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java
deleted file mode 100644
index 5884122..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertFalse;
-
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.basic.ApplicationBuilder;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.trait.Startable;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-import brooklyn.test.entity.TestApplication;
-
-import com.google.common.collect.ImmutableList;
-import com.mongodb.DBObject;
-
-public class MongoDBIntegrationTest {
-
-    private TestApplication app;
-    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
-
-    @BeforeMethod(alwaysRun=true)
-    public void setUp() throws Exception {
-        localhostProvisioningLocation = new LocalhostMachineProvisioningLocation();
-        app = ApplicationBuilder.newManagedApp(TestApplication.class);
-    }
-
-    @AfterMethod(alwaysRun=true)
-    public void tearDown() throws Exception {
-        if (app != null) Entities.destroyAll(app.getManagementContext());
-    }
-
-    @Test(groups = "Integration")
-    public void testCanStartAndStop() throws Exception {
-        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
-                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
-        app.start(ImmutableList.of(localhostProvisioningLocation));
-
-        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
-        entity.stop();
-        assertFalse(entity.getAttribute(Startable.SERVICE_UP));
-    }
-
-    @Test(groups = "Integration", dependsOnMethods = { "testCanStartAndStop" })
-    public void testCanReadAndWrite() throws Exception {
-        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
-                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
-        app.start(ImmutableList.of(localhostProvisioningLocation));
-
-        String id = MongoDBTestHelper.insert(entity, "hello", "world!");
-        DBObject docOut = MongoDBTestHelper.getById(entity, id);
-        assertEquals(docOut.get("hello"), "world!");
-    }
-
-    @Test(groups = "Integration", dependsOnMethods = { "testCanStartAndStop" })
-    public void testPollInsertCountSensor() throws Exception {
-        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
-                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
-        app.start(ImmutableList.of(localhostProvisioningLocation));
-        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
-
-        EntityTestUtils.assertAttributeEventuallyNonNull(entity, MongoDBServer.OPCOUNTERS_INSERTS);
-        Long initialInserts = entity.getAttribute(MongoDBServer.OPCOUNTERS_INSERTS);
-        MongoDBTestHelper.insert(entity, "a", Boolean.TRUE);
-        MongoDBTestHelper.insert(entity, "b", Boolean.FALSE);
-        EntityTestUtils.assertAttributeEqualsEventually(entity, MongoDBServer.OPCOUNTERS_INSERTS, initialInserts + 2);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java
deleted file mode 100644
index 4789624..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.entity.rebind.RebindTestFixtureWithApp;
-import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
-import brooklyn.test.EntityTestUtils;
-
-import com.google.common.base.Predicates;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-public class MongoDBRebindIntegrationTest extends RebindTestFixtureWithApp {
-
-    private LocalhostMachineProvisioningLocation loc;
-    
-    @BeforeMethod(alwaysRun=true)
-    @Override
-    public void setUp() throws Exception {
-        super.setUp();
-        loc = origApp.newLocalhostProvisioningLocation();
-    }
-
-    @Test(groups = {"Integration"})
-    public void testRebindMongoDb() throws Exception {
-        MongoDBServer origEntity = origApp.createAndManageChild(EntitySpec.create(MongoDBServer.class)
-                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
-        origApp.start(ImmutableList.of(loc));
-        EntityTestUtils.assertAttributeEventuallyNonNull(origEntity, MongoDBServer.STATUS_BSON);
-
-        // rebind
-        rebind();
-        final MongoDBServer newEntity = (MongoDBServer) Iterables.find(newApp.getChildren(), Predicates.instanceOf(MongoDBServer.class));
-
-        // confirm effectors still work on entity
-        EntityTestUtils.assertAttributeEqualsEventually(newEntity, MongoDBServer.SERVICE_UP, true);
-        newEntity.stop();
-        EntityTestUtils.assertAttributeEqualsEventually(newEntity, MongoDBServer.SERVICE_UP, false);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java b/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java
deleted file mode 100644
index a1cf038..0000000
--- a/software/nosql/src/test/java/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package brooklyn.entity.nosql.mongodb;
-
-import brooklyn.entity.AbstractEc2LiveTest;
-import brooklyn.entity.basic.Entities;
-import brooklyn.entity.group.DynamicCluster;
-import brooklyn.entity.proxying.EntitySpec;
-import brooklyn.location.Location;
-import brooklyn.test.Asserts;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.mongodb.DBObject;
-
-import groovy.time.TimeDuration;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.Test;
-
-import java.util.concurrent.Callable;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertNotNull;
-
-public class MongoDBReplicaSetEc2LiveTest extends AbstractEc2LiveTest {
-
-    @SuppressWarnings("unused")
-    private static final Logger LOG = LoggerFactory.getLogger(MongoDBReplicaSetEc2LiveTest.class);
-    private static final Integer REPLICA_SET_SIZE = 3;
-    private static final TimeDuration TIMEOUT = new TimeDuration(0, 0, 180, 0);
-
-    /**
-     * Test that a three node replica set starts and allows access through both nodes.
-     */
-    @Override
-    protected void doTest(Location loc) throws Exception {
-        final MongoDBReplicaSet replicaSet = app.createAndManageChild(EntitySpec.create(MongoDBReplicaSet.class)
-                .configure(DynamicCluster.INITIAL_SIZE, REPLICA_SET_SIZE)
-                .configure("replicaSetName", "mongodb-live-test-replica-set")
-                .configure("memberSpec", EntitySpec.create(MongoDBServer.class)
-                        .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf")
-                        .configure("port", "27017+")));
-
-        assertEquals(replicaSet.getCurrentSize().intValue(), 0);
-
-        app.start(ImmutableList.of(loc));
-
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Callable<Boolean>() {
-            @Override
-            public Boolean call() {
-                assertEquals(replicaSet.getCurrentSize(), REPLICA_SET_SIZE);
-                assertNotNull(replicaSet.getPrimary());
-                assertEquals(replicaSet.getSecondaries().size(), REPLICA_SET_SIZE-1);
-                return true;
-            }
-        });
-
-        Entities.dumpInfo(app);
-
-        // Test inserting a document and reading from secondaries
-        final String documentId = MongoDBTestHelper.insert(replicaSet.getPrimary(), "meaning-of-life", 42);
-        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Callable<Boolean>() {
-            @Override
-            public Boolean call() throws Exception {
-                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
-                for (MongoDBServer secondary : replicaSet.getSecondaries()) {
-                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
-                    assertEquals(docOut.get("meaning-of-life"), 42);
-                }
-                return true;
-            }
-        });
-
-    }
-
-    @Test(enabled=false)
-    public void testDummy() {} // Convince TestNG IDE integration that this really does have test methods
-}


[03/26] incubator-brooklyn git commit: [BROOKLYN-162] Renaming of the NoSQL packages

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java
new file mode 100644
index 0000000..c098d13
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/CassandraNodeLiveTest.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.cassandra.CassandraNode;
+import org.apache.brooklyn.entity.nosql.cassandra.AstyanaxSupport.AstyanaxSample;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.text.Strings;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Cassandra live tests.
+ *
+ * Test the operation of the {@link CassandraNode} class using the jclouds {@code rackspace-cloudservers-uk}
+ * and {@code aws-ec2} providers, with different OS images. The tests use the {@link AstyanaxSupport#astyanaxTest()} method
+ * to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
+ * set, usually in the {@code .brooklyn/bropoklyn.properties} file.
+ */
+public class CassandraNodeLiveTest extends AbstractCassandraNodeTest {
+
+    private static final Logger log = LoggerFactory.getLogger(CassandraNodeLiveTest.class);
+
+    @DataProvider(name = "virtualMachineData")
+    public Object[][] provideVirtualMachineData() {
+        return new Object[][] { // ImageId, Provider, Region, Description (for logging)
+            new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
+            new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" }, 
+            new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" }, 
+        };
+    }
+
+    @Test(groups = "Live", dataProvider = "virtualMachineData")
+    protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
+        log.info("Testing Cassandra on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
+
+        Map<String, String> properties = MutableMap.of("imageId", imageId);
+        testLocation = app.getManagementContext().getLocationRegistry()
+                .resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
+
+        cassandra = app.createAndManageChild(EntitySpec.create(CassandraNode.class)
+                .configure("thriftPort", "9876+")
+                .configure("clusterName", "TestCluster"));
+        app.start(ImmutableList.of(testLocation));
+        EntityTestUtils.assertAttributeEqualsEventually(cassandra, CassandraNode.SERVICE_UP, true);
+
+        AstyanaxSample astyanax = new AstyanaxSample(cassandra);
+        astyanax.astyanaxTest();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java
new file mode 100644
index 0000000..281ae6d
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/NonNegTokenGeneratorTest.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertEquals;
+
+import java.math.BigInteger;
+import java.util.List;
+
+import org.apache.brooklyn.entity.nosql.cassandra.TokenGenerators.AbstractTokenGenerator;
+import org.apache.brooklyn.entity.nosql.cassandra.TokenGenerators.NonNeg127TokenGenerator;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+public class NonNegTokenGeneratorTest {
+
+    public static final BigInteger C4_1 = new BigInteger("42535295865117307932921825928971026432");
+    public static final BigInteger C4_2 = new BigInteger("85070591730234615865843651857942052864");
+    public static final BigInteger C4_3 = new BigInteger("127605887595351923798765477786913079296");
+
+    // TODO Expect this behaviour to change when we better support dynamically growing/shrinking.
+    // In particular, the expected behaviour for testReturnsNullWhenClusterSizeUnknown 
+    // and testReturnsNullWhenGrowingClusterUnknownAmount will change.
+
+    private AbstractTokenGenerator generator;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        generator = new NonNeg127TokenGenerator();
+    }
+    
+    @Test
+    public void testGetTokenForReplacementNode() {
+        assertEquals(generator.getTokenForReplacementNode(BigInteger.ONE), BigInteger.ZERO);
+        assertEquals(generator.getTokenForReplacementNode(BigInteger.ZERO), generator.max());
+        assertEquals(generator.getTokenForReplacementNode(generator.max()), generator.max().subtract(BigInteger.ONE));
+    }
+    
+    @Test
+    public void testGeneratesInitialTokens() throws Exception {
+        List<BigInteger> tokens = Lists.newArrayList();
+        generator.growingCluster(4);
+        for (int i = 0; i < 4; i++) {
+            tokens.add(generator.newToken());
+        }
+        
+        assertEquals(tokens, ImmutableList.of(
+                BigInteger.ZERO, 
+                C4_1,
+                C4_2,
+                C4_3));
+    }
+    
+    // Expect behaviour to be changed to better choose tokens for growing clusters 
+    // (but eg need to take into account how busy each node is!)
+    @Test
+    public void testGeneratesTokensForGrowingCluster() throws Exception {
+        List<BigInteger> tokens = Lists.newArrayList();
+        generator.growingCluster(4);
+        for (int i = 0; i < 4; i++) {
+            tokens.add(generator.newToken());
+        }
+        generator.growingCluster(1);
+        assertEquals(generator.newToken(), C4_3.add(generator.max().add(BigInteger.ONE)).divide(BigInteger.valueOf(2)));
+        generator.growingCluster(2);
+        assertEquals(generator.newToken(), C4_1.divide(BigInteger.valueOf(2)));
+        assertEquals(generator.newToken(), C4_2.add(C4_1).divide(BigInteger.valueOf(2)));
+    }
+    
+    @Test
+    public void testGeneratesTokensForGrowingClusterWhenInitialSizeIsOne() throws Exception {
+        // initial size 1 has to do a special "average with ourself by half phase shift" computation
+        List<BigInteger> tokens = Lists.newArrayList();
+        generator.growingCluster(1);
+        tokens.add(generator.newToken());
+        
+        generator.growingCluster(1);
+        assertEquals(generator.newToken(), C4_2);
+        generator.growingCluster(2);
+        assertEquals(generator.newToken(), C4_3);
+        assertEquals(generator.newToken(), C4_1);
+    }
+    
+    @Test
+    public void testReturnsNullWhenClusterSizeUnknown() throws Exception {
+        assertEquals(generator.newToken(), null);
+    }
+    
+    @Test
+    public void testReturnsNullWhenGrowingClusterUnknownAmount() throws Exception {
+        generator.growingCluster(4);
+        for (int i = 0; i < 4; i++) {
+            generator.newToken();
+        }
+        assertEquals(generator.newToken(), null);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java
new file mode 100644
index 0000000..e353f41
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/cassandra/PosNegTokenGeneratorTest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.cassandra;
+
+import static org.testng.Assert.assertEquals;
+
+import java.math.BigInteger;
+
+import org.apache.brooklyn.entity.nosql.cassandra.TokenGenerators.AbstractTokenGenerator;
+import org.apache.brooklyn.entity.nosql.cassandra.TokenGenerators.PosNeg63TokenGenerator;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+public class PosNegTokenGeneratorTest {
+
+    // TODO Expect this behaviour to change when we better support dynamically growing/shrinking.
+    // In particular, the expected behaviour for testReturnsNullWhenClusterSizeUnknown 
+    // and testReturnsNullWhenGrowingClusterUnknownAmount will change.
+
+    private AbstractTokenGenerator generator;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        generator = new PosNeg63TokenGenerator();
+    }
+    
+    @Test
+    public void testGetTokenForReplacementNode() {
+        assertEquals(generator.getTokenForReplacementNode(BigInteger.ONE), BigInteger.ZERO);
+        assertEquals(generator.getTokenForReplacementNode(BigInteger.ZERO), BigInteger.ONE.negate());
+        assertEquals(generator.getTokenForReplacementNode(generator.min()), generator.max());
+        assertEquals(generator.getTokenForReplacementNode(generator.max()), generator.max().subtract(BigInteger.ONE));
+    }
+    
+    @Test
+    public void testGeneratesInitialTokens() throws Exception {
+        generator.growingCluster(4);
+        assertEquals(generator.newToken(), generator.min());
+        assertEquals(generator.newToken(), generator.min().add(generator.range().divide(BigInteger.valueOf(4))));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java
new file mode 100644
index 0000000..de1d49a
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseOfflineTest.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import org.apache.brooklyn.entity.nosql.couchbase.CouchbaseNodeSshDriver;
+import org.testng.Assert;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.Entities;
+import brooklyn.location.basic.BasicOsDetails;
+import brooklyn.location.basic.BasicOsDetails.OsArchs;
+import brooklyn.management.internal.LocalManagementContext;
+import brooklyn.test.entity.LocalManagementContextForTests;
+
+public class CouchbaseOfflineTest {
+
+    private LocalManagementContext mgmt;
+
+    @BeforeMethod
+    public void setUp() {
+        mgmt = LocalManagementContextForTests.newInstance();
+    }
+    
+    @AfterMethod
+    public void tearDown() {
+        Entities.destroyAll(mgmt);
+    }
+    
+    @Test
+    public void testResolvingDownloadLinks() {
+        checkOsTag("linux", OsArchs.I386, "unknown", true, "centos6.x86.rpm");
+        checkOsTag("linux", OsArchs.I386, "unknown", false, "x86.rpm");
+        checkOsTag("rhel", OsArchs.X_86_64, "6", true, "centos6.x86_64.rpm");
+        checkOsTag("Ubuntu 14", OsArchs.X_86_64, "14.04", true, "ubuntu12.04_amd64.deb");
+        checkOsTag("Ubuntu 14", OsArchs.X_86_64, "14.04", false, "x86_64.deb");
+        checkOsTag("Debian 7up", OsArchs.I386, "7ish", true, "debian7_x86.deb");
+        Assert.assertEquals(new CouchbaseNodeSshDriver.DownloadLinkSegmentComputer(null, true, "test").getOsTag(), "centos6.x86_64.rpm");
+        Assert.assertEquals(new CouchbaseNodeSshDriver.DownloadLinkSegmentComputer(null, false, "test").getOsTag(), "x86_64.rpm");
+    }
+
+    protected void checkOsTag(String os, String arch, String version, boolean isV30, String expectedTag) {
+        Assert.assertEquals(new CouchbaseNodeSshDriver.DownloadLinkSegmentComputer(new BasicOsDetails(os, arch, version), isV30, "test").getOsTag(), expectedTag);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java
new file mode 100644
index 0000000..89297be
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchbase/CouchbaseSyncGatewayEc2LiveTest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchbase;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.couchbase.CouchbaseCluster;
+import org.apache.brooklyn.entity.nosql.couchbase.CouchbaseNode;
+import org.apache.brooklyn.entity.nosql.couchbase.CouchbaseSyncGateway;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+public class CouchbaseSyncGatewayEc2LiveTest extends AbstractEc2LiveTest {
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        CouchbaseCluster cluster = app.createAndManageChild(EntitySpec.create(CouchbaseCluster.class)
+            .configure(CouchbaseNode.COUCHBASE_ADMIN_USERNAME, "Administrator")
+            .configure(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD, "Password")
+            .configure(DynamicCluster.INITIAL_SIZE, 3)
+            .configure(CouchbaseCluster.CREATE_BUCKETS, (List<Map<String,Object>>)ImmutableList.of(
+                (Map<String,Object>)ImmutableMap.<String, Object>of(
+                    "bucket", "default",
+                    "bucket-ramsize", 100,
+                    "bucket-type", "couchbase",
+                    "bucket-port", 11211
+                ),
+                (Map<String,Object>)ImmutableMap.<String, Object>of(
+                    "bucket", "my_bucket",
+                    "bucket-ramsize", 100,
+                    "bucket-type", "couchbase",
+                    "bucket-port", 11223
+                ),
+                (Map<String,Object>)ImmutableMap.<String, Object>of(
+                    "bucket", "another",
+                    "bucket-ramsize", 100,
+                    "bucket-type", "couchbase",
+                    "bucket-port", 11224
+                ))
+            )
+        );
+        CouchbaseSyncGateway gateway = app.createAndManageChild(EntitySpec.create(CouchbaseSyncGateway.class)
+            .configure(CouchbaseSyncGateway.COUCHBASE_SERVER, cluster)
+            .configure(CouchbaseSyncGateway.COUCHBASE_SERVER_BUCKET, "my_bucket")
+        );
+        
+        app.start(ImmutableList.of(loc));
+        
+        EntityTestUtils.assertAttributeEqualsEventually(gateway, Startable.SERVICE_UP, true);
+    }
+    
+    
+    // Supported operating systems
+    @Test(groups = {"Live"})
+    @Override
+    public void test_Ubuntu_12_0() throws Exception {
+        super.test_Ubuntu_12_0();
+    }
+    
+    @Test(groups = {"Live"})
+    @Override
+    public void test_Red_Hat_Enterprise_Linux_6() throws Exception {
+        super.test_Red_Hat_Enterprise_Linux_6();
+    }
+    
+    @Test(groups = {"Live"})
+    @Override
+    public void test_CentOS_6_3() throws Exception {
+        super.test_CentOS_6_3();
+    }
+    
+    // Unsupported operating systems
+    
+    @Test(groups = {"Live"})
+    @Override
+    public void test_CentOS_5() throws Exception {
+        // Unsupported
+        // error: Failed dependencies:
+        //     libc.so.6(GLIBC_2.7)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libcrypto.so.10()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libreadline.so.6()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libssl.so.10()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libstdc++.so.6(GLIBCXX_3.4.10)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libstdc++.so.6(GLIBCXX_3.4.11)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libstdc++.so.6(GLIBCXX_3.4.9)(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        libtinfo.so.5()(64bit) is needed by couchbase-server-2.5.1-1083.x86_64
+        //        openssl >= 1.0.0 is needed by couchbase-server-2.5.1-1083.x86_64
+        //        rpmlib(FileDigests) <= 4.6.0-1 is needed by couchbase-server-2.5.1-1083.x86_64
+        //        rpmlib(PayloadIsXz) <= 5.2-1 is needed by couchbase-server-2.5.1-1083.x86_64
+    }
+    
+    @Test(groups = {"Live"})
+    @Override
+    public void test_Debian_6() throws Exception {
+        // Unsupported
+    }
+    
+    @Test(groups = {"Live"})
+    @Override
+    public void test_Debian_7_2() throws Exception {
+        // Unsupported
+    }
+    
+    @Test(groups = {"Live"})
+    @Override
+    public void test_Ubuntu_10_0() throws Exception {
+        // Unsupported
+        // Installing cannot proceed since the package 'libssl1*' is missing. 
+        // Please install libssl1* and try again. 
+        //    $sudo apt-get install libssl1*
+        //
+        // Installing libssl1* doesn't fix the issue
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java
new file mode 100644
index 0000000..d7bfa6b
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/AbstractCouchDBNodeTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+
+import brooklyn.entity.basic.ApplicationBuilder;
+import brooklyn.entity.basic.Entities;
+import brooklyn.location.Location;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.entity.TestApplication;
+import brooklyn.util.internal.TimeExtras;
+
+/**
+ * CouchDB test framework for integration and live tests.
+ */
+public class AbstractCouchDBNodeTest {
+
+    private static final Logger log = LoggerFactory.getLogger(AbstractCouchDBNodeTest.class);
+
+    static {
+        TimeExtras.init();
+    }
+
+    protected TestApplication app;
+    protected Location testLocation;
+    protected CouchDBNode couchdb;
+
+    @BeforeMethod(alwaysRun = true)
+    public void setup() throws Exception {
+        app = ApplicationBuilder.newManagedApp(TestApplication.class);
+        testLocation = new LocalhostMachineProvisioningLocation();
+        // testLocation = app.managementContext.locationRegistry.resolve("named:test");
+    }
+
+    @AfterMethod(alwaysRun = true)
+    public void shutdown() {
+        Entities.destroyAll(app.getManagementContext());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java
new file mode 100644
index 0000000..0fd7796
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBClusterLiveTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import static org.testng.Assert.assertEquals;
+
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBCluster;
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBNode;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.ApplicationBuilder;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+
+/**
+ * A live test of the {@link CouchDBCluster} entity.
+ *
+ * Tests that a two node cluster can be started on Amazon EC2 and data written on one {@link CouchDBNode}
+ * can be read from another, using the Astyanax API.
+ */
+public class CouchDBClusterLiveTest {
+
+    // private String provider = "rackspace-cloudservers-uk";
+    private String provider = "aws-ec2:eu-west-1";
+
+    protected TestApplication app;
+    protected Location testLocation;
+    protected CouchDBCluster cluster;
+
+    @BeforeMethod(alwaysRun = true)
+    public void setup() {
+        app = ApplicationBuilder.newManagedApp(TestApplication.class);
+        testLocation = app.getManagementContext().getLocationRegistry().resolve(provider);
+    }
+
+    @AfterMethod(alwaysRun = true)
+    public void shutdown() {
+        Entities.destroyAll(app.getManagementContext());
+    }
+
+    /**
+     * Test that a two node cluster starts up and allows access via the Astyanax API through both nodes.
+     */
+    @Test(groups = "Live")
+    public void canStartupAndShutdown() throws Exception {
+        cluster = app.createAndManageChild(EntitySpec.create(CouchDBCluster.class)
+                .configure("initialSize", 2)
+                .configure("clusterName", "AmazonCluster"));
+        assertEquals(cluster.getCurrentSize().intValue(), 0);
+
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(cluster, CouchDBCluster.GROUP_SIZE, 2);
+        Entities.dumpInfo(app);
+
+        CouchDBNode first = (CouchDBNode) Iterables.get(cluster.getMembers(), 0);
+        CouchDBNode second = (CouchDBNode) Iterables.get(cluster.getMembers(), 1);
+
+        EntityTestUtils.assertAttributeEqualsEventually(first, Startable.SERVICE_UP, true);
+        EntityTestUtils.assertAttributeEqualsEventually(second, Startable.SERVICE_UP, true);
+
+        JcouchdbSupport jcouchdbFirst = new JcouchdbSupport(first);
+        JcouchdbSupport jcouchdbSecond = new JcouchdbSupport(second);
+        jcouchdbFirst.jcouchdbTest();
+        jcouchdbSecond.jcouchdbTest();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java
new file mode 100644
index 0000000..1838354
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeEc2LiveTest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.collect.ImmutableList;
+
+public class CouchDBNodeEc2LiveTest extends AbstractEc2LiveTest {
+
+    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        log.info("Testing Cassandra on {}", loc);
+
+        CouchDBNode couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
+                .configure("httpPort", "8000+"));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
+
+        JcouchdbSupport jcouchdb = new JcouchdbSupport(couchdb);
+        jcouchdb.jcouchdbTest();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java
new file mode 100644
index 0000000..0f8cbd2
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeIntegrationTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBNode;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * CouchDB integration tests.
+ *
+ * Test the operation of the {@link CouchDBNode} class.
+ */
+public class CouchDBNodeIntegrationTest extends AbstractCouchDBNodeTest {
+
+    /**
+     * Test that a node starts and sets SERVICE_UP correctly.
+     */
+    @Test(groups = {"Integration", "WIP"})
+    public void canStartupAndShutdown() {
+        couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
+                .configure("httpPort", "8000+"));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
+
+        couchdb.stop();
+
+        EntityTestUtils.assertAttributeEquals(couchdb, Startable.SERVICE_UP, false);
+    }
+
+    /**
+     * Test that a node can be used with jcouchdb client.
+     */
+    @Test(groups = {"Integration", "WIP"})
+    public void testConnection() throws Exception {
+        couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
+                .configure("httpPort", "8000+"));
+        app.start(ImmutableList.of(testLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
+
+        JcouchdbSupport jcouchdb = new JcouchdbSupport(couchdb);
+        jcouchdb.jcouchdbTest();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java
new file mode 100644
index 0000000..05ce053
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/CouchDBNodeLiveTest.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.collections.MutableMap;
+import brooklyn.util.text.Strings;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * CouchDB live tests.
+ *
+ * Test the operation of the {@link CouchDBNode} class using the jclouds {@code rackspace-cloudservers-uk}
+ * and {@code aws-ec2} providers, with different OS images. The tests use the {@link JcouchdbSupport#jcouchdbTest(CouchDBNode)} method
+ * to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
+ * set, usually in the {@code .brooklyn/brooklyn.properties} file.
+ */
+public class CouchDBNodeLiveTest extends AbstractCouchDBNodeTest {
+
+    private static final Logger log = LoggerFactory.getLogger(CouchDBNodeLiveTest.class);
+
+    @DataProvider(name = "virtualMachineData")
+    public Object[][] provideVirtualMachineData() {
+        return new Object[][] { // ImageId, Provider, Region, Description (for logging)
+            new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
+            new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" }, 
+            new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" }, 
+        };
+    }
+
+    @Test(groups = "Live", dataProvider = "virtualMachineData")
+    protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
+        log.info("Testing CouchDB on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
+
+        Map<String, String> properties = MutableMap.of("imageId", imageId);
+        testLocation = app.getManagementContext().getLocationRegistry()
+                .resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
+
+        couchdb = app.createAndManageChild(EntitySpec.create(CouchDBNode.class)
+                .configure("httpPort", "12345+")
+                .configure("clusterName", "TestCluster"));
+        app.start(ImmutableList.of(testLocation));
+        EntityTestUtils.assertAttributeEqualsEventually(couchdb, Startable.SERVICE_UP, true);
+
+        JcouchdbSupport jcouchdb = new JcouchdbSupport(couchdb);
+        jcouchdb.jcouchdbTest();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java
new file mode 100644
index 0000000..d1c7cf6
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/couchdb/JcouchdbSupport.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.couchdb;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.brooklyn.entity.nosql.couchdb.CouchDBNode;
+import org.jcouchdb.db.Database;
+import org.jcouchdb.db.Server;
+import org.jcouchdb.db.ServerImpl;
+
+import brooklyn.entity.basic.Attributes;
+
+/**
+ * CouchDB test framework for integration and live tests, using jcouchdb API.
+ */
+public class JcouchdbSupport {
+
+    private CouchDBNode node;
+
+    public JcouchdbSupport(CouchDBNode node) {
+        this.node = node;
+    }
+
+    /**
+     * Exercise the {@link CouchDBNode} using the jcouchdb API.
+     */
+    public void jcouchdbTest() throws Exception {
+        Server server = new ServerImpl(node.getAttribute(Attributes.HOSTNAME), node.getHttpPort());
+        assertTrue(server.createDatabase("brooklyn"));
+
+        Database db = new Database(node.getAttribute(Attributes.HOSTNAME), node.getHttpPort(), "brooklyn");
+
+        // create a hash map document with two fields
+        Map<String,String> doc = new HashMap<String, String>();
+        doc.put("first", "one");
+        doc.put("second", "two");
+
+        // create the document in couchdb
+        int before = db.listDocuments(null, null).getTotalRows();
+        db.createDocument(doc);
+        int after = db.listDocuments(null, null).getTotalRows();
+
+        assertEquals(before + 1, after);
+    }
+
+    /**
+     * Write to a {@link CouchDBNode} using the jcouchdb API.
+     */
+    protected void writeData() throws Exception {
+    }
+
+    /**
+     * Read from a {@link CouchDBNode} using the jcouchdb API.
+     */
+    protected void readData() throws Exception {
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java
new file mode 100644
index 0000000..9f794a1
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchClusterIntegrationTest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.brooklyn.entity.nosql.elasticsearch.ElasticSearchCluster;
+import org.apache.brooklyn.entity.nosql.elasticsearch.ElasticSearchNode;
+import org.apache.http.client.methods.HttpGet;
+import org.bouncycastle.util.Strings;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.entity.Entity;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.location.Location;
+import brooklyn.test.Asserts;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.util.http.HttpTool;
+import brooklyn.util.http.HttpToolResponse;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+public class ElasticSearchClusterIntegrationTest extends BrooklynAppLiveTestSupport {
+
+    // FIXME Exception in thread "main" java.lang.UnsupportedClassVersionError: org/elasticsearch/bootstrap/Elasticsearch : Unsupported major.minor version 51.0
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticSearchClusterIntegrationTest.class);
+
+    protected Location testLocation;
+    protected ElasticSearchCluster elasticSearchCluster;
+
+    @BeforeMethod(alwaysRun = true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        testLocation = app.newLocalhostProvisioningLocation();
+    }
+
+    @Test(groups = {"Integration"})
+    public void testStartupAndShutdown() {
+        elasticSearchCluster = app.createAndManageChild(EntitySpec.create(ElasticSearchCluster.class)
+                .configure(DynamicCluster.INITIAL_SIZE, 3));
+        app.start(ImmutableList.of(testLocation));
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchCluster, Startable.SERVICE_UP, true);
+        
+        elasticSearchCluster.stop();
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchCluster, Startable.SERVICE_UP, false);
+    }
+    
+    @Test(groups = {"Integration"})
+    public void testPutAndGet() throws URISyntaxException {
+        elasticSearchCluster = app.createAndManageChild(EntitySpec.create(ElasticSearchCluster.class)
+                .configure(DynamicCluster.INITIAL_SIZE, 3));
+        app.start(ImmutableList.of(testLocation));
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchCluster, Startable.SERVICE_UP, true);
+        assertEquals(elasticSearchCluster.getMembers().size(), 3);
+        assertEquals(clusterDocumentCount(), 0);
+        
+        ElasticSearchNode anyNode = (ElasticSearchNode)elasticSearchCluster.getMembers().iterator().next();
+        
+        String document = "{\"foo\" : \"bar\",\"baz\" : \"quux\"}";
+        
+        String putBaseUri = "http://" + anyNode.getAttribute(Attributes.HOSTNAME) + ":" + anyNode.getAttribute(Attributes.HTTP_PORT);
+        
+        HttpToolResponse putResponse = HttpTool.httpPut(
+                HttpTool.httpClientBuilder()
+                    .port(anyNode.getAttribute(Attributes.HTTP_PORT))
+                    .build(), 
+                new URI(putBaseUri + "/mydocuments/docs/1"), 
+                ImmutableMap.<String, String>of(), 
+                Strings.toByteArray(document)); 
+        assertEquals(putResponse.getResponseCode(), 201);
+        
+        for (Entity entity : elasticSearchCluster.getMembers()) {
+            ElasticSearchNode node = (ElasticSearchNode)entity;
+            String getBaseUri = "http://" + node.getAttribute(Attributes.HOSTNAME) + ":" + node.getAttribute(Attributes.HTTP_PORT);
+            HttpToolResponse getResponse = HttpTool.execAndConsume(
+                    HttpTool.httpClientBuilder().build(),
+                    new HttpGet(getBaseUri + "/mydocuments/docs/1/_source"));
+            assertEquals(getResponse.getResponseCode(), 200);
+            assertEquals(HttpValueFunctions.jsonContents("foo", String.class).apply(getResponse), "bar");
+        }
+        Asserts.succeedsEventually(new Runnable() {
+            public void run() {
+                int count = clusterDocumentCount();
+                assertTrue(count >= 1, "count="+count);
+                LOG.debug("Document count is {}", count);
+            }});
+    }
+    
+    private int clusterDocumentCount() {
+        int result = 0;
+        for (Entity entity : elasticSearchCluster.getMembers()) {
+            result += entity.getAttribute(ElasticSearchNode.DOCUMENT_COUNT);
+        }
+        return result;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java
new file mode 100644
index 0000000..c0c140d
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/elasticsearch/ElasticSearchNodeIntegrationTest.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.elasticsearch;
+
+import static org.testng.Assert.assertEquals;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.brooklyn.entity.nosql.elasticsearch.ElasticSearchNode;
+import org.apache.http.client.methods.HttpGet;
+import org.bouncycastle.util.Strings;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.ApplicationBuilder;
+import brooklyn.entity.basic.Attributes;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.event.feed.http.HttpValueFunctions;
+import brooklyn.location.Location;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+import brooklyn.util.http.HttpTool;
+import brooklyn.util.http.HttpToolResponse;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+public class ElasticSearchNodeIntegrationTest {
+    
+    protected TestApplication app;
+    protected Location testLocation;
+    protected ElasticSearchNode elasticSearchNode;
+
+    @BeforeMethod(alwaysRun = true)
+    public void setup() throws Exception {
+        app = ApplicationBuilder.newManagedApp(TestApplication.class);
+        testLocation = new LocalhostMachineProvisioningLocation();
+    }
+
+    @AfterMethod(alwaysRun = true)
+    public void shutdown() {
+        Entities.destroyAll(app.getManagementContext());
+    }
+    
+    @Test(groups = {"Integration"})
+    public void testStartupAndShutdown() {
+        elasticSearchNode = app.createAndManageChild(EntitySpec.create(ElasticSearchNode.class));
+        app.start(ImmutableList.of(testLocation));
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, Startable.SERVICE_UP, true);
+        
+        elasticSearchNode.stop();
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, Startable.SERVICE_UP, false);
+    }
+    
+    @Test(groups = {"Integration"})
+    public void testDocumentCount() throws URISyntaxException {
+        elasticSearchNode = app.createAndManageChild(EntitySpec.create(ElasticSearchNode.class));
+        app.start(ImmutableList.of(testLocation));
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, Startable.SERVICE_UP, true);
+        
+        EntityTestUtils.assertAttributeEquals(elasticSearchNode, ElasticSearchNode.DOCUMENT_COUNT, 0);
+        
+        String baseUri = "http://" + elasticSearchNode.getAttribute(Attributes.HOSTNAME) + ":" + elasticSearchNode.getAttribute(Attributes.HTTP_PORT);
+        
+        HttpToolResponse pingResponse = HttpTool.execAndConsume(
+                HttpTool.httpClientBuilder().build(),
+                new HttpGet(baseUri));
+        assertEquals(pingResponse.getResponseCode(), 200);
+        
+        String document = "{\"foo\" : \"bar\",\"baz\" : \"quux\"}";
+        
+        HttpToolResponse putResponse = HttpTool.httpPut(
+                HttpTool.httpClientBuilder()
+                    .port(elasticSearchNode.getAttribute(Attributes.HTTP_PORT))
+                    .build(), 
+                new URI(baseUri + "/mydocuments/docs/1"), 
+                ImmutableMap.<String, String>of(), 
+                Strings.toByteArray(document)); 
+        assertEquals(putResponse.getResponseCode(), 201);
+        
+        HttpToolResponse getResponse = HttpTool.execAndConsume(
+                HttpTool.httpClientBuilder().build(),
+                new HttpGet(baseUri + "/mydocuments/docs/1/_source"));
+        assertEquals(getResponse.getResponseCode(), 200);
+        assertEquals(HttpValueFunctions.jsonContents("foo", String.class).apply(getResponse), "bar");
+        
+        EntityTestUtils.assertAttributeEqualsEventually(elasticSearchNode, ElasticSearchNode.DOCUMENT_COUNT, 1);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java
new file mode 100644
index 0000000..413f0c9
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBEc2LiveTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+import com.google.common.collect.ImmutableList;
+import com.mongodb.DBObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertEquals;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+
+public class MongoDBEc2LiveTest extends AbstractEc2LiveTest {
+
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBEc2LiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
+                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, MongoDBServer.SERVICE_UP, true);
+
+        String id = MongoDBTestHelper.insert(entity, "hello", "world!");
+        DBObject docOut = MongoDBTestHelper.getById(entity, id);
+        assertEquals(docOut.get("hello"), "world!");
+    }
+
+    @Test(enabled=false)
+    public void testDummy() {} // Convince TestNG IDE integration that this really does have test methods
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java
new file mode 100644
index 0000000..eb9c362
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBIntegrationTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.ApplicationBuilder;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+import brooklyn.test.entity.TestApplication;
+
+import com.google.common.collect.ImmutableList;
+import com.mongodb.DBObject;
+
+public class MongoDBIntegrationTest {
+
+    private TestApplication app;
+    private LocalhostMachineProvisioningLocation localhostProvisioningLocation;
+
+    @BeforeMethod(alwaysRun=true)
+    public void setUp() throws Exception {
+        localhostProvisioningLocation = new LocalhostMachineProvisioningLocation();
+        app = ApplicationBuilder.newManagedApp(TestApplication.class);
+    }
+
+    @AfterMethod(alwaysRun=true)
+    public void tearDown() throws Exception {
+        if (app != null) Entities.destroyAll(app.getManagementContext());
+    }
+
+    @Test(groups = "Integration")
+    public void testCanStartAndStop() throws Exception {
+        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
+                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
+        app.start(ImmutableList.of(localhostProvisioningLocation));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
+        entity.stop();
+        assertFalse(entity.getAttribute(Startable.SERVICE_UP));
+    }
+
+    @Test(groups = "Integration", dependsOnMethods = { "testCanStartAndStop" })
+    public void testCanReadAndWrite() throws Exception {
+        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
+                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
+        app.start(ImmutableList.of(localhostProvisioningLocation));
+
+        String id = MongoDBTestHelper.insert(entity, "hello", "world!");
+        DBObject docOut = MongoDBTestHelper.getById(entity, id);
+        assertEquals(docOut.get("hello"), "world!");
+    }
+
+    @Test(groups = "Integration", dependsOnMethods = { "testCanStartAndStop" })
+    public void testPollInsertCountSensor() throws Exception {
+        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
+                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
+        app.start(ImmutableList.of(localhostProvisioningLocation));
+        EntityTestUtils.assertAttributeEqualsEventually(entity, Startable.SERVICE_UP, true);
+
+        EntityTestUtils.assertAttributeEventuallyNonNull(entity, MongoDBServer.OPCOUNTERS_INSERTS);
+        Long initialInserts = entity.getAttribute(MongoDBServer.OPCOUNTERS_INSERTS);
+        MongoDBTestHelper.insert(entity, "a", Boolean.TRUE);
+        MongoDBTestHelper.insert(entity, "b", Boolean.FALSE);
+        EntityTestUtils.assertAttributeEqualsEventually(entity, MongoDBServer.OPCOUNTERS_INSERTS, initialInserts + 2);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java
new file mode 100644
index 0000000..b7c7739
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRebindIntegrationTest.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.rebind.RebindTestFixtureWithApp;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.EntityTestUtils;
+
+import com.google.common.base.Predicates;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+
+public class MongoDBRebindIntegrationTest extends RebindTestFixtureWithApp {
+
+    private LocalhostMachineProvisioningLocation loc;
+    
+    @BeforeMethod(alwaysRun=true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        loc = origApp.newLocalhostProvisioningLocation();
+    }
+
+    @Test(groups = {"Integration"})
+    public void testRebindMongoDb() throws Exception {
+        MongoDBServer origEntity = origApp.createAndManageChild(EntitySpec.create(MongoDBServer.class)
+                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
+        origApp.start(ImmutableList.of(loc));
+        EntityTestUtils.assertAttributeEventuallyNonNull(origEntity, MongoDBServer.STATUS_BSON);
+
+        // rebind
+        rebind();
+        final MongoDBServer newEntity = (MongoDBServer) Iterables.find(newApp.getChildren(), Predicates.instanceOf(MongoDBServer.class));
+
+        // confirm effectors still work on entity
+        EntityTestUtils.assertAttributeEqualsEventually(newEntity, MongoDBServer.SERVICE_UP, true);
+        newEntity.stop();
+        EntityTestUtils.assertAttributeEqualsEventually(newEntity, MongoDBServer.SERVICE_UP, false);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java
new file mode 100644
index 0000000..051a698
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetEc2LiveTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import brooklyn.entity.AbstractEc2LiveTest;
+import brooklyn.entity.basic.Entities;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.Asserts;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.mongodb.DBObject;
+
+import groovy.time.TimeDuration;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import java.util.concurrent.Callable;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+
+public class MongoDBReplicaSetEc2LiveTest extends AbstractEc2LiveTest {
+
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBReplicaSetEc2LiveTest.class);
+    private static final Integer REPLICA_SET_SIZE = 3;
+    private static final TimeDuration TIMEOUT = new TimeDuration(0, 0, 180, 0);
+
+    /**
+     * Test that a three node replica set starts and allows access through both nodes.
+     */
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        final MongoDBReplicaSet replicaSet = app.createAndManageChild(EntitySpec.create(MongoDBReplicaSet.class)
+                .configure(DynamicCluster.INITIAL_SIZE, REPLICA_SET_SIZE)
+                .configure("replicaSetName", "mongodb-live-test-replica-set")
+                .configure("memberSpec", EntitySpec.create(MongoDBServer.class)
+                        .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf")
+                        .configure("port", "27017+")));
+
+        assertEquals(replicaSet.getCurrentSize().intValue(), 0);
+
+        app.start(ImmutableList.of(loc));
+
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Callable<Boolean>() {
+            @Override
+            public Boolean call() {
+                assertEquals(replicaSet.getCurrentSize(), REPLICA_SET_SIZE);
+                assertNotNull(replicaSet.getPrimary());
+                assertEquals(replicaSet.getSecondaries().size(), REPLICA_SET_SIZE-1);
+                return true;
+            }
+        });
+
+        Entities.dumpInfo(app);
+
+        // Test inserting a document and reading from secondaries
+        final String documentId = MongoDBTestHelper.insert(replicaSet.getPrimary(), "meaning-of-life", 42);
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Callable<Boolean>() {
+            @Override
+            public Boolean call() throws Exception {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
+                for (MongoDBServer secondary : replicaSet.getSecondaries()) {
+                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
+                    assertEquals(docOut.get("meaning-of-life"), 42);
+                }
+                return true;
+            }
+        });
+
+    }
+
+    @Test(enabled=false)
+    public void testDummy() {} // Convince TestNG IDE integration that this really does have test methods
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java
new file mode 100644
index 0000000..56ed82f
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBReplicaSetIntegrationTest.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertNotEquals;
+import static org.testng.Assert.assertNotNull;
+
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBReplicaSet;
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.BrooklynAppLiveTestSupport;
+import brooklyn.entity.Entity;
+import brooklyn.entity.group.DynamicCluster;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.entity.trait.Startable;
+import brooklyn.location.basic.LocalhostMachineProvisioningLocation;
+import brooklyn.test.Asserts;
+import brooklyn.util.time.Duration;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.mongodb.DBObject;
+
+public class MongoDBReplicaSetIntegrationTest extends BrooklynAppLiveTestSupport {
+
+    @SuppressWarnings("unused")
+    private static final Logger log = LoggerFactory.getLogger(MongoDBReplicaSetIntegrationTest.class);
+    
+    private Collection<LocalhostMachineProvisioningLocation> locs;
+
+    // Replica sets can take a while to start
+    private static final Duration TIMEOUT = Duration.of(3, TimeUnit.MINUTES);
+
+    @BeforeMethod(alwaysRun=true)
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        locs = ImmutableList.of(app.newLocalhostProvisioningLocation());
+    }
+
+    /**
+     * Creates and starts a replica set, asserts it reaches the given size
+     * and that the primary and secondaries are non-null.
+     */
+    private MongoDBReplicaSet makeAndStartReplicaSet(final Integer size, final String testDescription) {
+        // Sets secondaryPreferred so we can read from slaves.
+        final MongoDBReplicaSet replicaSet = app.createAndManageChild(EntitySpec.create(MongoDBReplicaSet.class)
+                .configure(DynamicCluster.INITIAL_SIZE, size)
+                .configure("replicaSetName", "test-rs-"+testDescription)
+                .configure("memberSpec", EntitySpec.create(MongoDBServer.class)
+                        .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf")
+                        .configure("port", "27017+")));
+        app.start(locs);
+
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize(), size);
+                assertNotNull(replicaSet.getPrimary(), "replica set has no primary");
+                assertEquals(replicaSet.getPrimary().getReplicaSet().getName(), "test-rs-"+testDescription+replicaSet.getId());
+                assertEquals(replicaSet.getSecondaries().size(), size-1);
+            }
+        });
+        return replicaSet;
+    }
+
+    @Test(groups = "Integration")
+    public void testCanStartAndStopAReplicaSet() {
+        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "can-start-and-stop");
+        replicaSet.stop();
+        assertFalse(replicaSet.getAttribute(Startable.SERVICE_UP));
+    }
+
+    @Test(groups = "Integration")
+    public void testWriteToMasterAndReadFromSecondary() {
+        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "master-write-secondary-read");
+
+        // Test we can read a document written to the primary from all secondaries
+        final String documentId = MongoDBTestHelper.insert(replicaSet.getPrimary(), "meaning-of-life", 42);
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
+                for (MongoDBServer secondary : replicaSet.getSecondaries()) {
+                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
+                    assertEquals(docOut.get("meaning-of-life"), 42);
+                }
+            }
+        });
+    }
+
+    @Test(groups = "Integration")
+    public void testCanResizeAndReadFromNewInstances() {
+        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "resize-and-read-from-secondaries");
+
+        // Test we can a document written to the primary from all secondaries
+        final String documentId = MongoDBTestHelper.insert(replicaSet.getPrimary(), "meaning-of-life", 42);
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
+                for (MongoDBServer secondary : replicaSet.getSecondaries()) {
+                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
+                    assertEquals(docOut.get("meaning-of-life"), 42);
+                }
+            }
+        });
+
+        // Resize and confirm new members get data
+        replicaSet.resize(5);
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 5);
+                Collection<MongoDBServer> secondaries = replicaSet.getSecondaries();
+                assertEquals(secondaries.size(), 4);
+                for (MongoDBServer secondary : secondaries) {
+                    DBObject docOut = MongoDBTestHelper.getById(secondary, documentId);
+                    assertEquals(docOut.get("meaning-of-life"), 42);
+                }
+            }
+        });
+
+    }
+
+    @Test(groups = "Integration")
+    public void testResizeToEvenNumberOfMembers() {
+        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "resize-even-ignored");
+        assertEquals(replicaSet.getCurrentSize().intValue(), 3);
+        replicaSet.resize(4);
+        Asserts.succeedsEventually(new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 4);
+            }
+        });
+    }
+
+    /**
+     * Test replacing the primary succeeds. More interesting than replacing a secondary
+     * because the removal of a primary must happen _through_ the primary. The flow is:
+     *  - Brooklyn removes the server from the set and stops it
+     *  - The remaining members of the set elect a new primary
+     *  - We remove the original primary from the new primary.
+     */
+    @Test(groups = "Integration")
+    public void testReplacePrimary() {
+        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "replace-primary");
+        final MongoDBServer replaced = replicaSet.getPrimary();
+        replicaSet.replaceMember(replaced.getId());
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 3);
+                for (Entity member : replicaSet.getMembers()) {
+                    assertNotEquals(member.getId(), replaced.getId());
+                }
+                assertNotNull(replicaSet.getPrimary());
+                assertNotEquals(replicaSet.getPrimary().getId(), replaced.getId(), "Expected a new primary to have been elected");
+            }
+        });
+    }
+
+    @Test(groups = "Integration")
+    public void testRemovePrimary() {
+        final MongoDBReplicaSet replicaSet = makeAndStartReplicaSet(3, "remove-primary");
+        final MongoDBServer removed = replicaSet.getPrimary();
+
+        replicaSet.removeMember(removed);
+        removed.stop();
+        Asserts.succeedsEventually(ImmutableMap.of("timeout", TIMEOUT), new Runnable() {
+            @Override
+            public void run() {
+                assertEquals(replicaSet.getCurrentSize().intValue(), 2);
+                for (Entity member : replicaSet.getMembers()) {
+                    assertNotEquals(member.getId(), removed.getId());
+                }
+                assertNotNull(replicaSet.getPrimary());
+                assertNotEquals(replicaSet.getPrimary().getId(), removed.getId(), "Expected a new primary to have been elected");
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java
new file mode 100644
index 0000000..3ef722a
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBRestartIntegrationTest.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import brooklyn.entity.basic.AbstractSoftwareProcessRestartIntegrationTest;
+import brooklyn.entity.basic.SoftwareProcess;
+import brooklyn.entity.proxying.EntitySpec;
+
+/**
+ * Tests restart of the software *process* (as opposed to the VM).
+ */
+@Test(groups="Integration")
+public class MongoDBRestartIntegrationTest extends AbstractSoftwareProcessRestartIntegrationTest {
+    
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBRestartIntegrationTest.class);
+
+    @Override
+    protected EntitySpec<? extends SoftwareProcess> newEntitySpec() {
+        return EntitySpec.create(MongoDBServer.class);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-brooklyn/blob/d5cf5285/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java
----------------------------------------------------------------------
diff --git a/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java
new file mode 100644
index 0000000..de6e597
--- /dev/null
+++ b/software/nosql/src/test/java/org/apache/brooklyn/entity/nosql/mongodb/MongoDBSoftLayerLiveTest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.brooklyn.entity.nosql.mongodb;
+
+import static org.testng.Assert.assertEquals;
+
+import org.apache.brooklyn.entity.nosql.mongodb.MongoDBServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.annotations.Test;
+
+import com.google.common.collect.ImmutableList;
+import com.mongodb.DBObject;
+
+import brooklyn.entity.AbstractSoftlayerLiveTest;
+import brooklyn.entity.proxying.EntitySpec;
+import brooklyn.location.Location;
+import brooklyn.test.EntityTestUtils;
+
+public class MongoDBSoftLayerLiveTest extends AbstractSoftlayerLiveTest {
+
+    @SuppressWarnings("unused")
+    private static final Logger LOG = LoggerFactory.getLogger(MongoDBSoftLayerLiveTest.class);
+
+    @Override
+    protected void doTest(Location loc) throws Exception {
+        MongoDBServer entity = app.createAndManageChild(EntitySpec.create(MongoDBServer.class)
+                .configure("mongodbConfTemplateUrl", "classpath:///test-mongodb.conf"));
+        app.start(ImmutableList.of(loc));
+
+        EntityTestUtils.assertAttributeEqualsEventually(entity, MongoDBServer.SERVICE_UP, true);
+
+        String id = MongoDBTestHelper.insert(entity, "hello", "world!");
+        DBObject docOut = MongoDBTestHelper.getById(entity, id);
+        assertEquals(docOut.get("hello"), "world!");
+    }
+
+    @Test(enabled=false)
+    public void testDummy() {} // Convince TestNG IDE integration that this really does have test methods
+
+}