You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@usergrid.apache.org by sn...@apache.org on 2016/02/04 18:57:30 UTC

[06/50] [abbrv] usergrid git commit: Move and re-organize the AWS cluster deployment along with updates to the Usergrid artifact and dependencies specified.

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/init_instance/install_elasticsearch.sh
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/init_instance/install_elasticsearch.sh b/stack/awscluster/src/main/dist/init_instance/install_elasticsearch.sh
deleted file mode 100644
index b488320..0000000
--- a/stack/awscluster/src/main/dist/init_instance/install_elasticsearch.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  The ASF licenses this file to You
-#  under the Apache License, Version 2.0 (the "License"); you may not
-#  use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.  For additional information regarding
-#  copyright in this work, please see the NOTICE file in the top level
-#  directory of this distribution.
-#
-
-
-pushd /etc/apt/sources.list.d
-
-# Install and stop ElasticSearch
-cat >> elasticsearch.sources.list << EOF
-deb http://packages.elasticsearch.org/elasticsearch/1.4/debian stable main
-EOF
-apt-get update
-apt-get --force-yes -y install elasticsearch=1.4.2
-/etc/init.d/elasticsearch stop
-
-mkdir -p /mnt/data/elasticsearch
-chown elasticsearch /mnt/data/elasticsearch
-
-mkdir -p /mnt/log/elasticsearch
-chown elasticsearch /mnt/log/elasticsearch
-
-# Configure ElasticSearch
-
-
-echo "vm.swappiness = 0" >> /etc/sysctl.conf
-sysctl -p
-
-# No need to do this, elasticsearch nodes are also cassandra nodes
-
-cd /usr/share/usergrid/scripts
-
-#If we're the master, register ourselves and move on, if we're not, wait for the master to come up
-if [ "$ES_MASTER" = "true" ]; then
-    groovy registry_register.groovy elasticsearch_master
-else
-    groovy registry_register.groovy elasticsearch
-    groovy wait_for_instances.groovy elasticsearch_master 1
-fi
-
-
-# leave room for Cassandra: use about one half of RAM for heap
-case `(curl http://169.254.169.254/latest/meta-data/instance-type)` in
-'c3.large' )
-    # total of 15g
-    export ES_HEAP_SIZE=1920m
-;;
-'c3.xlarge' )
-    # total of 7.5g
-    export ES_HEAP_SIZE=3840m
-;;
-'c3.2xlarge' )
-    # total of 15g
-    export ES_HEAP_SIZE=7680m
-;;
-'c3.4xlarge' )
-    # total of 30g
-    export ES_HEAP_SIZE=15g
-esac
-
-
-
-
-cat >> /etc/default/elasticsearch << EOF
-ES_HEAP_SIZE=${ES_HEAP_SIZE}
-MAX_OPEN_FILES=65535
-MAX_MAP_COUNT=262144
-MAX_LOCKED_MEMORY=unlimited
-JAVA_HOME=/usr/lib/jvm/jdk1.7.0
-ES_HEAP_NEWSIZE=4g
-ES_JAVA_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:SurvivorRatio=4 -Xloggc:/mnt/raid/elasticsearch/jvm"
-EOF
-
-#Set it because Matt says so
-ulimit -l unlimited
-
-cat >> /etc/security/limits.conf << EOF
-elasticsearch - nofile 65535
-elasticsearch - memlock unlimited
-EOF
-
-
-cd /usr/share/usergrid/scripts
-groovy ./configure_elasticsearch.groovy > /etc/elasticsearch/elasticsearch.yml
-
-update-rc.d elasticsearch defaults 95 10
-
-pushd /usr/share/elasticsearch/bin
-
-#Install kopf
-
-./plugin --install lmenezes/elasticsearch-kopf/1.2
-
-#Install bigdesk
-
-./plugin -install royrusso/elasticsearch-HQ
-
-./plugin -install karmi/elasticsearch-paramedic
-
-./plugin -install elasticsearch/elasticsearch-cloud-aws/2.4.1
-
-popd
-
-
-# Go!
-/etc/init.d/elasticsearch start
-
-popd

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/init_instance/install_opscenter_agent.sh
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/init_instance/install_opscenter_agent.sh b/stack/awscluster/src/main/dist/init_instance/install_opscenter_agent.sh
deleted file mode 100644
index a5679e4..0000000
--- a/stack/awscluster/src/main/dist/init_instance/install_opscenter_agent.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-# 
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  The ASF licenses this file to You
-#  under the Apache License, Version 2.0 (the "License"); you may not
-#  use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.  For additional information regarding
-#  copyright in this work, please see the NOTICE file in the top level
-#  directory of this distribution.
-#
-
-
-# Install and stop Cassandra
-echo "deb http://debian.datastax.com/community stable main" | sudo tee -a /etc/apt/sources.list.d/datastax.community.list
-
-curl -L https://debian.datastax.com/debian/repo_key | sudo apt-key add -
-
-sudo apt-get update
-sudo apt-get install datastax-agent
-
-
-cd /usr/share/usergrid/scripts
-
-#Wait for the opscenter node to come up
-groovy wait_for_instances.groovy opscenter 1
-
-#Wait for opscenter to come up
-
-groovy configure_opscenter_agent.groovy > /var/lib/datastax-agent/conf/address.yaml
-
-sudo service datastax-agent stop
-sudo service datastax-agent start
-
-

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/init_instance/install_oraclejdk.sh
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/init_instance/install_oraclejdk.sh b/stack/awscluster/src/main/dist/init_instance/install_oraclejdk.sh
deleted file mode 100644
index 414215d..0000000
--- a/stack/awscluster/src/main/dist/init_instance/install_oraclejdk.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-
-# 
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  The ASF licenses this file to You
-#  under the Apache License, Version 2.0 (the "License"); you may not
-#  use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.  For additional information regarding
-#  copyright in this work, please see the NOTICE file in the top level
-#  directory of this distribution.
-#
-
-pushd /tmp
-
-# Build environment for scripts
-. /etc/profile.d/aws-credentials.sh
-. /etc/profile.d/usergrid-env.sh
-
-echo ${RELEASE_BUCKET}
-
-# Get JDK from the release bucket
-s3cmd --config=/etc/s3cfg get s3://${RELEASE_BUCKET}/jdk-7u60-linux-x64.gz
-
-# Install it as they do here: 
-# http://askubuntu.com/questions/56104/how-can-i-install-sun-oracles-proprietary-java-6-7-jre-or-jdk
-tar -xvf jdk-7u60-linux-x64.gz
-mkdir -p /usr/lib/jvm
-mv ./jdk1.7.0_60 /usr/lib/jvm/jdk1.7.0
-
-update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/jdk1.7.0/bin/java" 2000
-update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/jdk1.7.0/bin/javac" 2000
-update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/jdk1.7.0/bin/javaws" 2000
-
-chmod a+x /usr/bin/java 
-chmod a+x /usr/bin/javac 
-chmod a+x /usr/bin/javaws
-chown -R root:root /usr/lib/jvm/jdk1.7.0
-
-sudo rm /usr/lib/jvm/default-java
-sudo ln -s /usr/lib/jvm/jdk1.7.0 /usr/lib/jvm/default-java
-
-#Install the JNA for cassandra to use
-
-sudo apt-get install libjna-java
-
-
-popd

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/init_instance/install_yourkit.sh
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/init_instance/install_yourkit.sh b/stack/awscluster/src/main/dist/init_instance/install_yourkit.sh
deleted file mode 100644
index 2bcfcd3..0000000
--- a/stack/awscluster/src/main/dist/init_instance/install_yourkit.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-# 
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  The ASF licenses this file to You
-#  under the Apache License, Version 2.0 (the "License"); you may not
-#  use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.  For additional information regarding
-#  copyright in this work, please see the NOTICE file in the top level
-#  directory of this distribution.
-#
-
-
-# Optional, install yourkit remote profiler
-
-if [[ $YOURKIT = "true" ]]; then
-
-mkdir -p /mnt/yourkit
-cd /mnt/yourkit
-s3cmd --config=/etc/s3cfg get s3://${RELEASE_BUCKET}/yjp-2014-build-14114.zip
-unzip /mnt/yourkit/yjp-2014-build-14114.zip
-
-mkdir -p /mnt/yourkitreports
-
-chown -R tomcat7.tomcat7 /mnt/yourkitreports
-
-cat >> /etc/default/tomcat7 << EOF
-JAVA_OPTS="\${JAVA_OPTS} -agentpath:/mnt/yourkit/yjp-2014-build-14114/bin/linux-x86-64/libyjpagent.so=port=10001,logdir=/mnt/yourkitreports,dir=/mnt/yourkitreports,onexit=snapshot"
-EOF
-
-fi

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/init_instance/update_keyspaces.sh
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/init_instance/update_keyspaces.sh b/stack/awscluster/src/main/dist/init_instance/update_keyspaces.sh
deleted file mode 100644
index 95ec658..0000000
--- a/stack/awscluster/src/main/dist/init_instance/update_keyspaces.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/bash
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  The ASF licenses this file to You
-#  under the Apache License, Version 2.0 (the "License"); you may not
-#  use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.  For additional information regarding
-#  copyright in this work, please see the NOTICE file in the top level
-#  directory of this distribution.
-#
-
-#Install keyspaces
-# Build environment for scripts
-. /etc/profile.d/aws-credentials.sh
-. /etc/profile.d/usergrid-env.sh
-
-pushd /etc/apt/sources.list.d
-
-#Run the cassandra cql to create the keyspaces.  Note this only works for the
-# us-east region for the replication factor on the keyspaces
-
-
-#Install cassandra so we have the cli
-curl -L http://debian.datastax.com/debian/repo_key | apt-key add -
-
-sudo cat >> /etc/apt/sources.list.d/cassandra.sources.list << EOF
-deb http://debian.datastax.com/community stable main
-EOF
-
-apt-get update
-apt-get -y --force-yes install libcap2 cassandra=1.2.19
-/etc/init.d/cassandra stop
-
-#Get the first instance of cassandra
-cd /usr/share/usergrid/scripts
-CASSHOST=$(groovy get_first_instance.groovy cassandra)
-
-#We have to wait for cass to actually start before we can run our CQL.   Sleep 5 seconds between retries
-while ! echo exit | nc ${CASSHOST} 9160; do sleep 5; done
-
-#WE have to remove our -1 from the end, since us-east and us-west dont support -1 in cassandra
-CASS_REGION=${EC2_REGION%-1}
-
-#Update the keyspace replication and run the cql
-sed -i.bak "s/KEYSPACE_REGION/${CASS_REGION}/g" /usr/share/usergrid/cql/update_locks.cql
-
-sed -i.bak "s/REPLICATION_FACTOR/${CASSANDRA_REPLICATION_FACTOR}/g" /usr/share/usergrid/cql/update_locks.cql
-
-
-/usr/bin/cassandra-cli -h ${CASSHOST} -f  /usr/share/usergrid/cql/update_locks.cql
-
-
-#Update the keyspace region and run the cql
-sed -i.bak "s/KEYSPACE_REGION/${CASS_REGION}/g" /usr/share/usergrid/cql/update_usergrid.cql
-sed -i.bak "s/REPLICATION_FACTOR/${CASSANDRA_REPLICATION_FACTOR}/g" /usr/share/usergrid/cql/update_usergrid.cql
-
-/usr/bin/cassandra-cli -h ${CASSHOST} -f  /usr/share/usergrid/cql/update_usergrid.cql
-
-
-#Update the keyspace region and run the cql
-sed -i.bak "s/KEYSPACE_REGION/${CASS_REGION}/g" /usr/share/usergrid/cql/update_usergrid_applications.cql
-sed -i.bak "s/REPLICATION_FACTOR/${CASSANDRA_REPLICATION_FACTOR}/g" /usr/share/usergrid/cql/update_usergrid_applications.cql
-
-/usr/bin/cassandra-cli -h ${CASSHOST} -f  /usr/share/usergrid/cql/update_usergrid_applications.cql
-
-
-popd

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/lib/log4j.properties
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/lib/log4j.properties b/stack/awscluster/src/main/dist/lib/log4j.properties
deleted file mode 100644
index 013563b..0000000
--- a/stack/awscluster/src/main/dist/lib/log4j.properties
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# for production, you should probably set the root to INFO
-# and the pattern to %c instead of %l.  (%l is slower.)
-
-# output messages into a rolling log file as well as stdout
-log4j.rootLogger=INFO,stdout
-
-# stdout
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-#log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d %p (%t) %c{1} - %m%n
-
-log4j.logger.org.apache.usergrid=INFO
-
-log4j.logger.org.apache.usergrid.persistence.cassandra.DB=WARN, stdout
-log4j.logger.org.apache.usergrid.persistence.cassandra.BATCH=WARN, stdout
-log4j.logger.org.apache.usergrid.persistence.cassandra.EntityManagerFactoryImpl=WARN, stdout
-log4j.logger.org.apache.usergrid.persistence.cassandra.DaoUtils=WARN, stdout
-log4j.logger.org.apache.usergrid.persistence.cassandra.EntityManagerImpl=WARN, stdout
-log4j.logger.org.apache.usergrid.persistence.cassandra.ConnectionRefImpl=WARN, stdout
-log4j.logger.me.prettyprint.cassandra.hector.TimingLogger=WARN, stdout
-log4j.logger.org.apache.usergrid.rest.security.AllowAjaxFilter=WARN, stdout
-log4j.logger.me.prettyprint.hector.api.beans.AbstractComposite=ERROR, stdout
-
-#log4j.logger.org.apache.usergrid.persistence=INFO
-#log4j.logger.org.apache.usergrid.corepersistence=DEBUG
-#log4j.logger.com.netflix.hystrix=DEBUG
-#log4j.logger.org.antlr=DEBUG
-
-#log4j.logger.org.apache.usergrid.persistence.CollectionIT=DEBUG
-#log4j.logger.org.apache.usergrid.persistence.index=DEBUG
-#log4j.logger.org.apache.usergrid.persistence.collection=DEBUG
-#log4j.logger.org.elasticsearch=DEBUG
-log4j.logger.org.apache.usergrid.rest.filters.MeteringFilter=ERROR
-
-#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG, stdout
-

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/update.sh
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/update.sh b/stack/awscluster/src/main/dist/update.sh
deleted file mode 100644
index d3c399f..0000000
--- a/stack/awscluster/src/main/dist/update.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  The ASF licenses this file to You
-#  under the Apache License, Version 2.0 (the "License"); you may not
-#  use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.  For additional information regarding
-#  copyright in this work, please see the NOTICE file in the top level
-#  directory of this distribution.
-#
-
-sudo mkdir __tmpupdate__
-pushd __tmpupdate__
-
-    sudo s3cmd --config=/etc/s3cfg get s3://${RELEASE_BUCKET}/ROOT.war
-    # sudo tar xzvf awscluster-1.0-SNAPSHOT-any.tar.gz
-    sudo /etc/init.d/tomcat7 stop
-    sudo cp -r ROOT.war /var/lib/tomcat7/webapps
-
-    pushd /usr/share/usergrid/scripts
-        sudo groovy configure_portal_new.groovy > /var/lib/tomcat7/webapps/portal/config.js
-    popd
-
-    sudo /etc/init.d/tomcat7 start
-
-popd
-sudo rm -rf __tmpupdate__

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/dist/webapps/dummy.txt
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/dist/webapps/dummy.txt b/stack/awscluster/src/main/dist/webapps/dummy.txt
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/NodeRegistry.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/NodeRegistry.groovy b/stack/awscluster/src/main/groovy/NodeRegistry.groovy
deleted file mode 100644
index 2cd70ef..0000000
--- a/stack/awscluster/src/main/groovy/NodeRegistry.groovy
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-/**
- * A utility class that search simple db for the node type provided and returns a list of hostnames as a string array
- */
-import com.amazonaws.auth.BasicAWSCredentials
-import com.amazonaws.regions.Region
-import com.amazonaws.regions.Regions
-import com.amazonaws.services.ec2.AmazonEC2Client
-import com.amazonaws.services.ec2.model.*
-
-class NodeRegistry {
-
-
-    public static final String TAG_PREFIX = "tag:"
-    //taken from aws
-    public static final String STACK_NAME = "usergrid:stack-name";
-    public static final String NODE_TYPE = "usergrid:node_type";
-    public static final String SEARCH_INSTANCE_STATE = "instance-state-name";
-    public static final String SEARCH_STACK_NAME = TAG_PREFIX + STACK_NAME
-    public static final String SEARCH_NODE_TYPE = TAG_PREFIX + NODE_TYPE
-
-    private String accessKey = (String) System.getenv().get("AWS_ACCESS_KEY")
-    private String secretKey = (String) System.getenv().get("AWS_SECRET_KEY")
-    private String stackName = (String) System.getenv().get("STACK_NAME")
-    private String instanceId = (String) System.getenv().get("EC2_INSTANCE_ID");
-    private String region = (String) System.getenv().get("EC2_REGION");
-    private String domain = stackName
-
-    private BasicAWSCredentials creds;
-    private AmazonEC2Client ec2Client;
-
-
-    NodeRegistry() {
-
-        if (region == null) {
-            throw new IllegalArgumentException("EC2_REGION must be defined")
-        }
-
-        if (instanceId == null) {
-            throw new IllegalArgumentException("EC2_INSTANCE_ID must be defined")
-        }
-
-        if (stackName == null) {
-            throw new IllegalArgumentException("STACK_NAME must be defined")
-        }
-
-        if (accessKey == null) {
-            throw new IllegalArgumentException("AWS_ACCESS_KEY must be defined")
-        }
-
-        if (secretKey == null) {
-            throw new IllegalArgumentException("AWS_SECRET_KEY must be defined")
-        }
-
-        creds = new BasicAWSCredentials(accessKey, secretKey)
-        ec2Client = new AmazonEC2Client(creds)
-        def regionEnum = Regions.fromName(region);
-        ec2Client.setRegion(Region.getRegion(regionEnum))
-
-
-    }
-
-    /**
-     * Search for the node type, return a string array of hostnames that match it within the running domain
-     * @param defNodeType
-     */
-    def searchNode(def nodeType) {
-
-
-        def stackNameFilter = new Filter(SEARCH_STACK_NAME).withValues(stackName)
-        def nodeTypeFilter = new Filter(SEARCH_NODE_TYPE).withValues(nodeType)
-        def instanceState = new Filter(SEARCH_INSTANCE_STATE).withValues(InstanceStateName.Running.toString());
-
-        //sort by created date
-        def servers = new TreeSet<ServerEntry>();
-
-
-        def token = null
-
-
-
-        while (true) {
-
-            def describeRequest = new DescribeInstancesRequest().withFilters(stackNameFilter, nodeTypeFilter, instanceState)
-
-            if (token != null) {
-                describeRequest.withNextToken(token);
-            }
-
-
-            def nodes = ec2Client.describeInstances(describeRequest)
-
-            for (reservation in nodes.getReservations()) {
-
-                for (instance in reservation.getInstances()) {
-                    servers.add(new ServerEntry(instance.launchTime, instance.publicDnsName));
-                }
-
-            }
-
-            //nothing to do, exit the loop
-            if (nodes.nextToken == null) {
-                break;
-            }
-
-            token = nodes.nextToken;
-
-        }
-
-
-
-
-        return createResults(servers);
-    }
-
-    def createResults(def servers) {
-
-        def results = [];
-
-        for (server in servers) {
-            results.add(server.publicIp)
-        }
-
-        return results;
-    }
-
-    /**
-     * Add the node to the database if it doesn't exist
-     */
-    def addNode(def nodeType) {
-
-        //add the node type
-        def tagRequest = new CreateTagsRequest().withTags(new Tag(NODE_TYPE, nodeType), new Tag(STACK_NAME, stackName)).withResources(instanceId)
-
-
-
-        ec2Client.createTags(tagRequest)
-
-
-    }
-
-    /**
-     * Wait until the number of servers are available with the type specified
-     * @param nodeType
-     * @param count
-     */
-    def waitUntilAvailable(def nodeType, def numberOfServers){
-
-        while (true) {
-            try {
-                def selectResult = searchNode(nodeType)
-
-                def count = selectResult.size();
-
-                if (count >= numberOfServers) {
-                    println("count = ${count}, total number of servers is ${numberOfServers}.  Breaking")
-                    break
-                }
-
-                println("Found ${count} nodes but need at least ${numberOfServers}.  Waiting...")
-            } catch (Exception e) {
-                println "ERROR waiting for ${nodeType} ${e.getMessage()}, will continue waiting"
-            }
-            Thread.sleep(2000)
-        }
-    }
-
-
-    class ServerEntry implements Comparable<ServerEntry> {
-        private final Date launchDate;
-        private final String publicIp;
-
-        ServerEntry(final Date launchDate, final String publicIp) {
-            this.launchDate = launchDate
-            this.publicIp = publicIp
-        }
-
-        @Override
-        int compareTo(final ServerEntry o) {
-
-            int compare = launchDate.compareTo(o.launchDate)
-
-            if(compare == 0){
-                compare =  publicIp.compareTo(o.publicIp);
-            }
-
-            return compare
-        }
-
-        boolean equals(final o) {
-            if (this.is(o)) return true
-            if (getClass() != o.class) return false
-
-            final ServerEntry that = (ServerEntry) o
-
-            if (launchDate != that.launchDate) return false
-            if (publicIp != that.publicIp) return false
-
-            return true
-        }
-
-        int hashCode() {
-            int result
-            result = launchDate.hashCode()
-            result = 31 * result + publicIp.hashCode()
-            return result
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_cassandra.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_cassandra.groovy b/stack/awscluster/src/main/groovy/configure_cassandra.groovy
deleted file mode 100644
index 946e801..0000000
--- a/stack/awscluster/src/main/groovy/configure_cassandra.groovy
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-// configure_cassandra.groovy 
-// 
-// Emits Cassandra config file based on environment and Cassandra node 
-// registry in SimpleDB
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-
-String hostName  = (String)System.getenv().get("PUBLIC_HOSTNAME")
-String clusterName  = (String)System.getenv().get("CASSANDRA_CLUSTER_NAME")
-
-
-// build seed list by listing all Cassandra nodes found in SimpleDB domain with our stackName
-
-NodeRegistry registry = new NodeRegistry();
-
-def selectResult = registry.searchNode('cassandra')
-def seeds = ""
-def sep = ""
-for (host in selectResult) {
-    seeds = "${seeds}${sep}${host}"
-    sep = ","
-}
-
-
-def cassandraConfig = """
-
-
-cluster_name: '${clusterName}'
-listen_address: ${hostName}
-seed_provider:
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          - seeds: "${seeds}"
-auto_bootstrap: false 
-num_tokens: 256
-hinted_handoff_enabled: true
-hinted_handoff_throttle_in_kb: 1024
-max_hints_delivery_threads: 2
-authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-data_file_directories:
-    - /mnt/data/cassandra/data
-commitlog_directory: /mnt/data/cassandra/commitlog
-disk_failure_policy: stop
-key_cache_size_in_mb: 2048
-key_cache_save_period: 14400
-row_cache_size_in_mb: 2048
-row_cache_save_period: 14400
-row_cache_provider: SerializingCacheProvider
-saved_caches_directory: /mnt/data/cassandra/saved_caches
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-commitlog_segment_size_in_mb: 32
-flush_largest_memtables_at: 0.75
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-concurrent_reads: 32
-concurrent_writes: 32
-memtable_flush_queue_size: 4
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-storage_port: 7000
-ssl_storage_port: 7001
-rpc_address: 0.0.0.0
-start_native_transport: false
-native_transport_port: 9042
-start_rpc: true
-rpc_port: 9160
-rpc_keepalive: true
-rpc_server_type: sync
-thrift_framed_transport_size_in_mb: 15
-thrift_max_message_length_in_mb: 16
-incremental_backups: false
-snapshot_before_compaction: false
-auto_snapshot: true
-column_index_size_in_kb: 64
-in_memory_compaction_limit_in_mb: 64
-multithreaded_compaction: false
-compaction_throughput_mb_per_sec: 16
-compaction_preheat_key_cache: true
-read_request_timeout_in_ms: 10000
-range_request_timeout_in_ms: 10000
-write_request_timeout_in_ms: 10000
-truncate_request_timeout_in_ms: 60000
-request_timeout_in_ms: 10000
-cross_node_timeout: false
-endpoint_snitch: Ec2Snitch
-dynamic_snitch_update_interval_in_ms: 100
-dynamic_snitch_reset_interval_in_ms: 600000
-dynamic_snitch_badness_threshold: 0.1
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-index_interval: 128
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-internode_compression: all
-"""
-
-println cassandraConfig

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_elasticsearch.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_elasticsearch.groovy b/stack/awscluster/src/main/groovy/configure_elasticsearch.groovy
deleted file mode 100644
index 173e4e6..0000000
--- a/stack/awscluster/src/main/groovy/configure_elasticsearch.groovy
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-
-//
-// configure_elasticsearch.groovy
-//
-// Emits Elasticsearch config file based on environment and Elasticsearch node
-// registry in SimpleDB
-//
-
-
-String accessKey = (String)System.getenv().get("AWS_ACCESS_KEY")
-String secretKey = (String)System.getenv().get("AWS_SECRET_KEY")
-
-String hostName  = (String)System.getenv().get("PUBLIC_HOSTNAME")
-def clusterName  = (String)System.getenv().get("ES_CLUSTER_NAME")
-
-def isMaster = ((String)System.getenv().get("ES_MASTER")).toBoolean()
-
-int esNumServers = ((String)System.getenv().get("ES_NUM_SERVERS")).toInteger()
-///int quorum = esNumServers/2+1;
-
-//TODO get this from the number of master nodes
-int quorum = 1
-
-NodeRegistry registry = new NodeRegistry();
-
-// build seed list by listing all Elasticsearch nodes found in SimpleDB domain with our stackName
-def selectResult = registry.searchNode('elasticsearch_master')
-def esnodes = ""
-def sep = ""
-for (hostname in selectResult) {
-   esnodes = "${esnodes}${sep}\"${hostname}\""
-   sep = ","
-}
-
-
-def nodeData = !isMaster
-def nodeMaster = isMaster
-
-
-
-def elasticSearchConfig = """
-cluster.name: ${clusterName}
-discovery.zen.minimum_master_nodes: ${quorum}
-discovery.zen.ping.multicast.enabled: false
-discovery.zen.ping.unicast.hosts: [${esnodes}]
-node:
-    name: ${hostName}
-network:
-    host: ${hostName}
-path:
-    logs: /mnt/log/elasticsearch
-    data: /mnt/data/elasticsearch
-
-#Set the logging level to INFO by default
-es.logger.level: INFO
-
-#Set our threadpool size.  Our bulk pool and search pools are quite large.  We may want to turn these down if we
-#overload the system
-#
-# Temporarily removing.  We don't know better :)
-# http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_don_8217_t_touch_these_settings.html#_threadpools
-#
-threadpool:
-    index:
-        type: fixed
-        size: 160
-        queue_size: 1000
-    bulk:
-        type: fixed
-        size: 160
-        queue_size: 1000
-    search:
-        size: 320
-        type: fixed
-        queue_size: 1000
-
-action.auto_create_index: false
-
-action.disable_delete_all_indices: true
-
-#################################
-# Operational settings taken from a loggly blog here.  Tweak and work as required
-# https://www.loggly.com/blog/nine-tips-configuring-elasticsearch-for-high-performance/
-#################################
-
-#Set the mlock all to better utilize system resources
-bootstrap.mlockall: true
-
-#Only cache 25% of our available memory
-indices.fielddata.cache.size: 25%
-
-#If you haven't used it in 10 minutes, evict it from the cache
-#indices.fielddata.cache.expire: 10m
-
-#Only allow rebalancing of 2 shards at a time
-cluster.routing.allocation.cluster_concurrent_rebalance: 2
-
-#Re-shard when our disks start getting full
-cluster.routing.allocation.disk.threshold_enabled: true
-cluster.routing.allocation.disk.watermark.low: .97
-cluster.routing.allocation.disk.watermark.high: .99
-
-#Set streaming high water marks so reboots don't kill our service
-cluster.routing.allocation.node_concurrent_recoveries: 40
-cluster.routing.allocation.node_initial_primaries_recoveries: 40
-indices.recovery.concurrent_streams: 16
-indices.recovery.max_bytes_per_sec: 300mb
-
-
-##############################
-# Master or data node options
-#############################
-
-node.data: ${nodeData}
-node.master: ${nodeMaster}
-
-
-###############
-# Logging options
-# We want to turn on logging for slow queries and executions, so
-###############
-
-index.search.slowlog.threshold.query.warn: 10s
-index.search.slowlog.threshold.query.info: 5s
-index.search.slowlog.threshold.query.debug: 2s
-index.search.slowlog.threshold.query.trace: 500ms
-
-index.search.slowlog.threshold.fetch.warn: 1s
-index.search.slowlog.threshold.fetch.info: 800ms
-index.search.slowlog.threshold.fetch.debug: 500ms
-index.search.slowlog.threshold.fetch.trace: 200ms
-
-
-index.indexing.slowlog.threshold.index.warn: 10s
-index.indexing.slowlog.threshold.index.info: 5s
-index.indexing.slowlog.threshold.index.debug: 2s
-index.indexing.slowlog.threshold.index.trace: 500ms
-
-########
-# AWS PLUGIM
-##########
-
-cloud.aws.access_key: ${accessKey}
-cloud.aws.secret_key: ${secretKey}
-
-
-
-"""
-
-println elasticSearchConfig

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_opscenter_agent.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_opscenter_agent.groovy b/stack/awscluster/src/main/groovy/configure_opscenter_agent.groovy
deleted file mode 100644
index dfde6e0..0000000
--- a/stack/awscluster/src/main/groovy/configure_opscenter_agent.groovy
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-// configure_cassandra.groovy 
-// 
-// Emits Cassandra config file based on environment and Cassandra node 
-// registry in SimpleDB
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-String accessKey = (String)System.getenv().get("AWS_ACCESS_KEY")
-String secretKey = (String)System.getenv().get("AWS_SECRET_KEY")
-String stackName = (String)System.getenv().get("STACK_NAME")
-
-String domain    = stackName
-
-
-NodeRegistry registry = new NodeRegistry();
-
-// build seed list by listing all Cassandra nodes found in SimpleDB domain with our stackName
-def selectResult = registry.searchNode('opscenter')
-
-def opsCenterNode = selectResult[0]
-
-
-def clientconfig = """
-
-
-stomp_interface: ${opsCenterNode}
-"""
-
-println clientconfig

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_opscenter_cassandra.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_opscenter_cassandra.groovy b/stack/awscluster/src/main/groovy/configure_opscenter_cassandra.groovy
deleted file mode 100644
index b9239a1..0000000
--- a/stack/awscluster/src/main/groovy/configure_opscenter_cassandra.groovy
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- *
- *  * Licensed to the Apache Software Foundation (ASF) under one
- *  * or more contributor license agreements.  See the NOTICE file
- *  * distributed with this work for additional information
- *  * regarding copyright ownership.  The ASF licenses this file
- *  * to you under the Apache License, Version 2.0 (the
- *  * "License"); you may not use this file except in compliance
- *  * with the License.  You may obtain a copy of the License at
- *  *
- *  *    http://www.apache.org/licenses/LICENSE-2.0
- *  *
- *  * Unless required by applicable law or agreed to in writing,
- *  * software distributed under the License is distributed on an
- *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- *  * KIND, either express or implied.  See the License for the
- *  * specific language governing permissions and limitations
- *  * under the License.
- *
- */
-
-
-// configure_cassandra.groovy 
-// 
-// Emits Cassandra config file based on environment and Cassandra node 
-// registry in SimpleDB
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-
-String hostName  = (String)System.getenv().get("PUBLIC_HOSTNAME")
-// build seed list by listing all Cassandra nodes found in SimpleDB domain with our stackName
-
-
-
-def cassandraConfig = """
-
-
-cluster_name: 'opscenter'
-listen_address: ${hostName}
-seed_provider:
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          - seeds: "${hostName}"
-auto_bootstrap: false 
-num_tokens: 256
-hinted_handoff_enabled: true
-hinted_handoff_throttle_in_kb: 1024
-max_hints_delivery_threads: 2
-authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-data_file_directories:
-    - /mnt/data/cassandra/data
-commitlog_directory: /mnt/data/cassandra/commitlog
-disk_failure_policy: stop
-key_cache_size_in_mb: 2048
-key_cache_save_period: 14400
-row_cache_size_in_mb: 2048
-row_cache_save_period: 14400
-row_cache_provider: SerializingCacheProvider
-saved_caches_directory: /mnt/data/cassandra/saved_caches
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-commitlog_segment_size_in_mb: 32
-flush_largest_memtables_at: 0.75
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-concurrent_reads: 32
-concurrent_writes: 32
-memtable_flush_queue_size: 4
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-storage_port: 7000
-ssl_storage_port: 7001
-rpc_address: 0.0.0.0
-start_native_transport: false
-native_transport_port: 9042
-start_rpc: true
-rpc_port: 9160
-rpc_keepalive: true
-rpc_server_type: sync
-thrift_framed_transport_size_in_mb: 15
-thrift_max_message_length_in_mb: 16
-incremental_backups: false
-snapshot_before_compaction: false
-auto_snapshot: true
-column_index_size_in_kb: 64
-in_memory_compaction_limit_in_mb: 64
-multithreaded_compaction: false
-compaction_throughput_mb_per_sec: 16
-compaction_preheat_key_cache: true
-read_request_timeout_in_ms: 10000
-range_request_timeout_in_ms: 10000
-write_request_timeout_in_ms: 10000
-truncate_request_timeout_in_ms: 60000
-request_timeout_in_ms: 10000
-cross_node_timeout: false
-endpoint_snitch: Ec2Snitch
-dynamic_snitch_update_interval_in_ms: 100
-dynamic_snitch_reset_interval_in_ms: 600000
-dynamic_snitch_badness_threshold: 0.1
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-index_interval: 128
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-internode_compression: all
-"""
-
-println cassandraConfig

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_opscenter_usergrid.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_opscenter_usergrid.groovy b/stack/awscluster/src/main/groovy/configure_opscenter_usergrid.groovy
deleted file mode 100644
index df4dcae..0000000
--- a/stack/awscluster/src/main/groovy/configure_opscenter_usergrid.groovy
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- *  * Licensed to the Apache Software Foundation (ASF) under one
- *  * or more contributor license agreements.  See the NOTICE file
- *  * distributed with this work for additional information
- *  * regarding copyright ownership.  The ASF licenses this file
- *  * to you under the Apache License, Version 2.0 (the
- *  * "License"); you may not use this file except in compliance
- *  * with the License.  You may obtain a copy of the License at
- *  *
- *  *    http://www.apache.org/licenses/LICENSE-2.0
- *  *
- *  * Unless required by applicable law or agreed to in writing,
- *  * software distributed under the License is distributed on an
- *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- *  * KIND, either express or implied.  See the License for the
- *  * specific language governing permissions and limitations
- *  * under the License.
- *
- */
-
-
-// configure_cassandra.groovy 
-// 
-// Emits Cassandra config file based on environment and Cassandra node 
-// registry in SimpleDB
-//
-
-
-String hostName  = (String)System.getenv().get("PUBLIC_HOSTNAME")
-// build seed list by listing all Cassandra nodes found in SimpleDB domain with our stackName
-
-NodeRegistry registry = new NodeRegistry();
-
-def selectResult = registry.searchNode('cassandra')
-def seeds = ""
-def sep = ""
-for (host in selectResult) {
-    seeds = "${seeds}${sep}${host}"
-    sep = ","
-}
-
-
-//We need to point to at least 1 node in the cassandra cluster so that we can bootstrap monitoring
-def usergridConfig = """
-
-[cassandra]
-seed_hosts = ${seeds}
-
-#TODO, this doesn't seem to work, I think opscenter is broken.  Try this again at a later time and remove opscenter exclusion below
-#[storage_cassandra]
-#seed_hosts = ${hostName}
-#api_port = 9160
-
-
-"""
-
-println usergridConfig

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_portal_new.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_portal_new.groovy b/stack/awscluster/src/main/groovy/configure_portal_new.groovy
deleted file mode 100644
index 753be68..0000000
--- a/stack/awscluster/src/main/groovy/configure_portal_new.groovy
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-//
-// Emits text to be appending to end of config.js
-//
-def baseUrl = "http://${System.getenv().get("DNS_NAME")}.${System.getenv().get("DNS_DOMAIN")}"
-config = """\n\
-
-Usergrid.overrideUrl = '${baseUrl}';
-"""
-println config

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/configure_usergrid.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/configure_usergrid.groovy b/stack/awscluster/src/main/groovy/configure_usergrid.groovy
deleted file mode 100644
index a4d6a52..0000000
--- a/stack/awscluster/src/main/groovy/configure_usergrid.groovy
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-//
-// configure_usergrid.groovy
-//
-// Emits usergrid properties file based on environment and Cassandra node registry in SimpleDB
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-
-String accessKey = (String)System.getenv().get("AWS_ACCESS_KEY")
-String secretKey = (String)System.getenv().get("AWS_SECRET_KEY")
-
-def baseUrl      = "http://${System.getenv().get("DNS_NAME")}.${System.getenv().get("DNS_DOMAIN")}"
-String stackName = (String)System.getenv().get("STACK_NAME")
-String domain    = stackName
-String hostName  = (String)System.getenv().get("PUBLIC_HOSTNAME")
-def replFactor   = System.getenv().get("CASSANDRA_REPLICATION_FACTOR")
-def clusterName  = System.getenv().get("CASSANDRA_CLUSTER_NAME")
-def readConsistencyLevel  = System.getenv().get("CASSANDRA_READ_CONSISTENCY")
-def writeConsistencyLevel  = System.getenv().get("CASSANDRA_WRITE_CONSISTENCY")
-
-def superUserEmail     = System.getenv().get("SUPER_USER_EMAIL")
-def testAdminUserEmail = System.getenv().get("TEST_ADMIN_USER_EMAIL")
-
-def numEsNodes = Integer.parseInt(System.getenv().get("ES_NUM_SERVERS"))
-//Override number of shards.  Set it to 2x the cluster size
-def esShards = numEsNodes*2;
-
-
-//This gives us 3 copies, which means we'll have a quorum with primary + 1 replica
-def esReplicas = 1;
-
-def tomcatThreads = System.getenv().get("TOMCAT_THREADS")
-
-def workerCount = System.getenv().get("INDEX_WORKER_COUNT")
-
-//temporarily set to equal since we now have a sane tomcat thread calculation
-def hystrixThreads = tomcatThreads
-
-//if we end in -1, we remove it
-def ec2Region = System.getenv().get("EC2_REGION")
-def cassEc2Region = ec2Region.replace("-1", "")
-
-NodeRegistry registry = new NodeRegistry();
-
-def selectResult = registry.searchNode('cassandra')
-
-// build seed list by listing all Cassandra nodes found in SimpleDB domain with our stackName
-def cassandras = ""
-def sep = ""
-for (item in selectResult) {
-    cassandras = "${cassandras}${sep}${item}:9160"
-    sep = ","
-}
-
-// TODO T.N Make this the graphite url
-selectResult = registry.searchNode('graphite')
-def graphite = ""
-sep = ""
-for (item in selectResult) {
-    graphite = "${graphite}${sep}${item}"
-    sep = ","
-}
-
-// cassandra nodes are also our elasticsearch nodes
-selectResult = registry.searchNode('elasticsearch')
-def esnodes = ""
-sep = ""
-for (item in selectResult) {
-    esnodes = "${esnodes}${sep}${item}"
-    sep = ","
-}
-
-def usergridConfig = """
-######################################################
-# Minimal Usergrid configuration properties for local Tomcat and Cassandra
-
-cassandra.url=${cassandras}
-cassandra.cluster=${clusterName}
-cassandra.keyspace.strategy=org.apache.cassandra.locator.NetworkTopologyStrategy
-cassandra.keyspace.replication=${cassEc2Region}:${replFactor}
-
-# This property is required to be set and cannot be left to the default.
-usergrid.cluster_name=usergrid
-
-cassandra.timeout=5000
-cassandra.connections=${tomcatThreads}
-hystrix.threadpool.graph_user.coreSize=${hystrixThreads}
-hystrix.threadpool.graph_async.coreSize=${hystrixThreads}
-usergrid.read.cl=${readConsistencyLevel}
-usergrid.write.cl=${writeConsistencyLevel}
-
-
-
-elasticsearch.cluster_name=${clusterName}
-elasticsearch.hosts=${esnodes}
-elasticsearch.port=9300
-elasticsearch.number_shards=${esShards}
-elasticsearch.number_replicas=${esReplicas}
-
-######################################################
-# Custom mail transport
-
-mail.transport.protocol=smtp
-mail.smtp.host=localhost
-mail.smtp.port=25
-mail.smtp.auth=false
-mail.smtp.quitwait=false
-
-# TODO: make all usernames and passwords configurable via Cloud Formation parameters.
-
-
-######################################################
-# Admin and test user setup
-
-usergrid.sysadmin.login.allowed=true
-usergrid.sysadmin.login.name=superuser
-usergrid.sysadmin.login.password=test
-usergrid.sysadmin.login.email=${superUserEmail}
-
-usergrid.sysadmin.email=${superUserEmail}
-#We don't want to require user approval so we can quickly create tests
-usergrid.sysadmin.approve.users=false
-#We dont want to require organizations to be approved so we can auto create them
-usergrid.sysadmin.approve.organizations=false
-
-# Base mailer account - default for all outgoing messages
-usergrid.management.mailer=Admin <${superUserEmail}>
-
-usergrid.setup-test-account=true
-
-usergrid.test-account.app=test-app
-usergrid.test-account.organization=test-organization
-usergrid.test-account.admin-user.username=test
-usergrid.test-account.admin-user.name=Test User
-usergrid.test-account.admin-user.email=${testAdminUserEmail}
-usergrid.test-account.admin-user.password=test
-
-######################################################
-# Auto-confirm and sign-up notifications settings
-
-usergrid.management.admin_users_require_confirmation=false
-usergrid.management.admin_users_require_activation=false
-
-usergrid.management.organizations_require_activation=false
-usergrid.management.notify_sysadmin_of_new_organizations=true
-usergrid.management.notify_sysadmin_of_new_admin_users=true
-
-######################################################
-# URLs
-
-# Redirect path when request come in for TLD
-usergrid.redirect_root=${baseUrl}/status
-usergrid.api.url.base=${baseUrl}
-
-\n\
-
-
-usergrid.metrics.graphite.host=${graphite}
-
-usergrid.queue.region=${ec2Region}
-
-# Enable scheduler for import/export jobs
-usergrid.scheduler.enabled=true
-usergrid.scheduler.job.workers=1
-
-
-#Set our ingest rate
-elasticsearch.worker_count=${workerCount}
-
-"""
-
-println usergridConfig

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/create_dashboard.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/create_dashboard.groovy b/stack/awscluster/src/main/groovy/create_dashboard.groovy
deleted file mode 100644
index af6b68d..0000000
--- a/stack/awscluster/src/main/groovy/create_dashboard.groovy
+++ /dev/null
@@ -1,79 +0,0 @@
-import groovy.json.JsonOutput
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-//
-// wait_for_instances.groovy
-//
-// Wait for enough Cassandra servers are up before proceding,
-// Enough means count greater than or equal to replication factor.
-//
-def createMetric(def title, def collectdMetric, def servers, def array) {
-
-    def serversJson = []
-
-    for (server in servers) {
-
-        def normalizedServer = server.replaceAll("\\.", "_")
-
-        serversJson.add("collectd.${normalizedServer}.${collectdMetric}")
-
-    }
-
-
-    def metric = ["target": serversJson, "title": title]
-
-    array.add(metric)
-
-}
-
-
-NodeRegistry registry = new NodeRegistry();
-
-
-def servers = registry.searchNode("rest")
-
-
-
-def json = []
-
-createMetric("Used Memory", "memory.memory-used", servers, json)
-
-createMetric("Free Memory", "memory.memory-free", servers, json)
-
-createMetric("Load Short Term", "load.load.shortterm", servers, json)
-
-createMetric("Network Received", "interface-eth0.if_octets.rx", servers, json)
-
-createMetric("Network Sent", "interface-eth0.if_packets.tx", servers, json)
-
-createMetric("Tomcat Heap", "GenericJMX-memory-heap.memory-used", servers, json)
-
-createMetric("Tomcat Non Heap", "GenericJMX-memory-nonheap.memory-used", servers, json)
-
-createMetric("Tomcat Old Gen", "GenericJMX-memory_pool-CMS_Old_Gen.memory-used", servers, json)
-
-createMetric("Tomcat Permgen", "GenericJMX-memory_pool-CMS_Perm_Gen.memory-used", servers, json)
-
-
-
-def jsonString = JsonOutput.toJson(json)
-println JsonOutput.prettyPrint(jsonString)
-
-

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/get_first_instance.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/get_first_instance.groovy b/stack/awscluster/src/main/groovy/get_first_instance.groovy
deleted file mode 100644
index 5330718..0000000
--- a/stack/awscluster/src/main/groovy/get_first_instance.groovy
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-//
-// wait_for_instances.groovy
-//
-// Wait for enough Cassandra servers are up before proceding,
-// Enough means count greater than or equal to replication factor.
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-
-if (args.size() !=1  )  {
-  println "this script expects one argument.  get_first_instance.groovy nodetype "
-  // You can even print the usage here.
-  return 1;
-}
-
-String nodetype = args[0]
-
-
-NodeRegistry registry = new NodeRegistry();
-
-
-def selectResult = registry.searchNode(nodetype)
-
-
-println "${selectResult[0]}"

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/registry_register.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/registry_register.groovy b/stack/awscluster/src/main/groovy/registry_register.groovy
deleted file mode 100644
index 144b18b..0000000
--- a/stack/awscluster/src/main/groovy/registry_register.groovy
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-// 
-// configure_usergrid.groovy 
-// 
-// Register this host machine as a Cassandra node in our stack. 
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-
-if (args.size() != 1 )  {
-  println "This script expects one argument. registry_register.groovy nodetype"
-  return 1;
-}
-
-String nodetype = args[0];
-
-NodeRegistry registry = new NodeRegistry();
-registry.addNode(nodetype);

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/tag_instance.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/tag_instance.groovy b/stack/awscluster/src/main/groovy/tag_instance.groovy
deleted file mode 100644
index 99d3288..0000000
--- a/stack/awscluster/src/main/groovy/tag_instance.groovy
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-// 
-// tag_instance.groovy 
-// 
-// Tag instance so we can easily identify it in the EC2 console 
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.ec2.*
-import com.amazonaws.services.ec2.model.*
-
-String type       = (String)System.getenv().get("TYPE")
-String accessKey  = (String)System.getenv().get("AWS_ACCESS_KEY")
-String secretKey  = (String)System.getenv().get("AWS_SECRET_KEY")
-String instanceId = (String)System.getenv().get("EC2_INSTANCE_ID")
-String stackName  = (String)System.getenv().get("STACK_NAME")
-
-
-String moreMetaData = ""
-
-if (args.size() == 1 )  {
-    moreMetaData = args[0]
-}
-
-
-def creds = new BasicAWSCredentials(accessKey, secretKey)
-def ec2Client = new AmazonEC2Client(creds)
-
-def resources = new ArrayList()
-resources.add(instanceId)
-
-def tags = new ArrayList()
-def tag = "${stackName}-${type}-${instanceId}${moreMetaData}"
-tags.add(new Tag("Name", tag))
-
-ec2Client.createTags(new CreateTagsRequest(resources, tags))
-
-println "Tagged instance as ${tag}"

http://git-wip-us.apache.org/repos/asf/usergrid/blob/d478adb0/stack/awscluster/src/main/groovy/wait_for_instances.groovy
----------------------------------------------------------------------
diff --git a/stack/awscluster/src/main/groovy/wait_for_instances.groovy b/stack/awscluster/src/main/groovy/wait_for_instances.groovy
deleted file mode 100644
index fcd77b1..0000000
--- a/stack/awscluster/src/main/groovy/wait_for_instances.groovy
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  The ASF licenses this file to You
- * under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.  For additional information regarding
- * copyright in this work, please see the NOTICE file in the top level
- * directory of this distribution.
- */
-
-
-//
-// wait_for_instances.groovy
-//
-// Wait for enough Cassandra servers are up before proceding,
-// Enough means count greater than or equal to replication factor.
-//
-import com.amazonaws.auth.*
-import com.amazonaws.services.simpledb.*
-import com.amazonaws.services.simpledb.model.*
-
-
-if (args.size() !=2 )  {
-  println "this script expects two arguments.  wait_for_instances.groovy nodetype numberOfServers"
-  // You can even print the usage here.
-  return 1;
-}
-
-String nodetype = args[0]
-int numberOfServers = args[1].toInteger()
-
-
-NodeRegistry registry = new NodeRegistry();
-
-println "Waiting for ${numberOfServers} nodes of type ${nodetype} to register..."
-
-registry.waitUntilAvailable(nodetype, numberOfServers)
-
-println "Waiting done."