You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@usergrid.apache.org by sn...@apache.org on 2014/10/28 18:56:54 UTC

[01/12] fixes to push test

Repository: incubator-usergrid
Updated Branches:
  refs/heads/two-dot-o-events e837a0c97 -> 1f0e6e439


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala
deleted file mode 100755
index 7c411b0..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-/**
- * Performs organization registration
- *
- *
- * Produces:
- *
- * orgName The name of the created organization
- * userName  The user name of the admin to log in with
- * password The password of the admin to use
- */
-object OrganizationScenarios {
-
-  //register the org with the randomly generated org
-  val createOrgAndAdmin = exec(http("Create Organization")
-  .post("/management/organizations")
-  .headers(Headers.jsonAnonymous)
-  .body(StringBody("{\"organization\":\"" + Settings.org + "\",\"username\":\"${username}\",\"name\":\"${username}\",\"email\":\"${username}@apigee.com\",\"password\":\"${password}\"}"))
-  .check(status.is(200)))
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/TokenScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/TokenScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/TokenScenarios.scala
deleted file mode 100755
index 9dff0df..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/TokenScenarios.scala
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-/**
- * Class that will get the token and insert it into the test session.
- * Assumes that  the following values are present in the session.
- *
- * Expects:
- *
- * userName  The user name to log in with
- * password The password to use
- *
- * Produces:
- *
- * authToken A valid access token if the login attempt is successful
- */
-
-object TokenScenarios {
-
-
-  val getManagementToken =
-    exec(
-      http("POST Org Token")
-        .post("/management/token")
-        .headers(Headers.jsonAnonymous)
-        //pass in the the username and password, store the "access_token" json response element as the var "authToken" in the session
-        .body(StringBody("{\"username\":\"${username}\",\"password\":\"${password}\",\"grant_type\":\"password\"}"))
-        .check(jsonPath("access_token")
-        .saveAs("authToken"))
-    )
-
-  val getUserToken =
-    exec(
-      http("POST user token")
-        .post("/token")
-        .body(StringBody("{\"grant_type\":\"password\",\"username\":\"${user1}\",\"password\":\"password\"}"))
-        .check(status.is(200))
-    )
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/UserScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/UserScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/UserScenarios.scala
deleted file mode 100755
index 843bc2e..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/UserScenarios.scala
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-
-object UserScenarios {
-
-  val getRandomUser = exec(
-    http("GET user")
-      .get("/users/user" + Utils.generateRandomInt(1, Settings.numEntities))
-      .check(status.is(200))
-  )
-
-  val postUser = exec(
-    http("POST geolocated Users")
-      .post("/users")
-      .body(StringBody("{\"location\":{\"latitude\":\"${latitude}\",\"longitude\":\"${longitude}\"},\"username\":\"${username}\"," +
-      "\"displayName\":\"${displayName}\",\"age\":\"${age}\",\"seen\":\"${seen}\",\"weight\":\"${weight}\"," +
-      "\"height\":\"${height}\",\"aboutMe\":\"${aboutMe}\",\"profileId\":\"${profileId}\",\"headline\":\"${headline}\"," +
-      "\"showAge\":\"${showAge}\",\"relationshipStatus\":\"${relationshipStatus}\",\"ethnicity\":\"${ethnicity}\",\"password\":\"password\"}"))
-      .check(status.is(200))
-  )
-
-  val postUser400ok = exec(
-    http("POST geolocated Users")
-      .post("/users")
-      .body(StringBody("{\"location\":{\"latitude\":\"${latitude}\",\"longitude\":\"${longitude}\"},\"username\":\"${username}\"," +
-      "\"displayName\":\"${displayName}\",\"age\":\"${age}\",\"seen\":\"${seen}\",\"weight\":\"${weight}\"," +
-      "\"height\":\"${height}\",\"aboutMe\":\"${aboutMe}\",\"profileId\":\"${profileId}\",\"headline\":\"${headline}\"," +
-      "\"showAge\":\"${showAge}\",\"relationshipStatus\":\"${relationshipStatus}\",\"ethnicity\":\"${ethnicity}\",\"password\":\"password\"}"))
-      .check(status.in(200 to 400))
-  )
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Headers.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Headers.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Headers.scala
deleted file mode 100755
index 319bdcf..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Headers.scala
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-/**
- *
- */
-object Headers {
-
-  /**
-   * Headers for anonymous posts
-   */
-  val jsonAnonymous = Map(
-    "Cache-Control" -> """no-cache""",
-    "Content-Type" -> """application/json; charset=UTF-8"""
-  )
-
-  /**
-   * Headers for authorized users with token and json content type
-   */
-  val jsonAuthorized = Map(
-    "Cache-Control" -> """no-cache""",
-    "Content-Type" -> """application/json; charset=UTF-8""",
-    "Authorization" -> "Bearer ${authToken}"
-  )
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Settings.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Settings.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Settings.scala
deleted file mode 100755
index 5588b67..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Settings.scala
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-object Settings {
-
-  // Target settings
-  val org = System.getProperty("org")
-  val app = System.getProperty("app")
-  val baseUrl = System.getProperty("baseurl")
-  val httpConf = http.baseURL(baseUrl + "/" + org + "/" + app)
-
-  // Simulation settings
-  val numUsers:Int = Integer.getInteger("numUsers", 10).toInt
-  val numEntities:Int = Integer.getInteger("numEntities", 5000).toInt
-  val numDevices:Int = Integer.getInteger("numDevices", 2000).toInt
-
-  val rampTime:Int = Integer.getInteger("rampTime", 0).toInt // in seconds
-  val duration:Int = Integer.getInteger("duration", 300).toInt // in seconds
-  val throttle:Int = Integer.getInteger("throttle", 50).toInt // in seconds
-
-  // Geolocation settings
-  val centerLatitude:Double = 37.442348 // latitude of center point
-  val centerLongitude:Double = -122.138268 // longitude of center point
-  val userLocationRadius:Double = 32000 // location of requesting user in meters
-  val geosearchRadius:Int = 8000 // search area in meters
-
-  // Push Notification settings
-  val pushNotifier = System.getProperty("notifier")
-  val pushProvider = System.getProperty("provider")
-
-  def createRandomPushNotifier:String = {
-    return Utils.generateUniqueName("notifier")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Utils.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Utils.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Utils.scala
deleted file mode 100755
index 396f0b9..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/settings/Utils.scala
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import scala.util.Random
-import scala.math
-import Array._
-
-/**
- *
- * Utility for creating various data elements
- *
- */
-object Utils {
-
-  private val RNG = new Random
-
-  /**
-   * Generate a new uuid and replace the '-' with empty
-   */
-  def generateUUIDString(): String = {
-    return java.util.UUID.randomUUID.toString.replace("-", "")
-  }
-
-  /**
-   * Generate a unique string with a prefix
-   *
-   * @param prefix
-   * @return
-   */
-  def generateUniqueName(prefix : String): String = {
-     return prefix + generateUUIDString()
-  }
-
-  // random number in between [a...b]
-  def generateRandomInt(lowerBound: Int, upperBound: Int) = RNG.nextInt(upperBound - lowerBound) + lowerBound
-
-  def generateRandomGeolocation(radius: Double, centerLatitude: Double, centerLongitude: Double):Map[String, String] = {
-
-    var rd = radius / 111300 // Convert Radius from meters to degrees.
-    var u = RNG.nextFloat()
-    var v = RNG.nextFloat()
-    var q = math.sqrt(u) * rd
-    var w = q * rd
-    var t = 2 * math.Pi * v
-    var x = math.cos(t) * w
-    var y = math.sin(t) * w
-    var xp = x/math.cos(centerLatitude)
-    var latitude = (y + centerLatitude).toString
-    var longitude = (xp + centerLongitude).toString
-    var geolocation: Map[String, String] = Map("latitude"->latitude,"longitude"->longitude)
-
-    return geolocation
-  }
-
-  def generateRandomQueryString: String = {
-
-    val queryParams = Array("age", "height", "weight")
-    var queryString = ""
-
-    for (numParams <- 1 to generateRandomInt(1, queryParams.length)) {
-      queryString = "age=" + Utils.generateRandomInt(18, 65).toString
-      if (numParams == 2) {
-        queryString += "%20AND%20height=" + Utils.generateRandomInt(48, 84).toString
-      } else if (numParams == 3) {
-        queryString += "%20AND%20weight=" + Utils.generateRandomInt(120, 350).toString
-      }
-    }
-
-    return queryString
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala
deleted file mode 100644
index ac9bb7f..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-class GetEntitySimulation extends Simulation {
-
-  // Target settings
-  val httpConf = Settings.httpConf
-
-  // Simulation settings
-  val numUsers:Int = Settings.numUsers
-  val numEntities:Int = Settings.numEntities
-  val rampTime:Int = Settings.rampTime
-  val throttle:Int = Settings.throttle
-
-  val feeder = FeederGenerator.generateEntityNameFeeder("user", numEntities).circular
-
-  val scnToRun = scenario("GET entity")
-    .exec(UserScenarios.getRandomUser)
-
-  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf)).maxDuration(Settings.duration)
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala
deleted file mode 100755
index 0c47a32..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-class PostDevicesSimulation extends Simulation {
-
-  // Target settings
-  val httpConf = Settings.httpConf
-
-  // Simulation settings
-  val numUsers:Int = Settings.numUsers
-  val numEntities:Int = Settings.numEntities
-  val rampTime:Int = Settings.rampTime
-  val throttle:Int = Settings.throttle
-
-  val feeder = FeederGenerator.generateEntityNameFeeder("device", numEntities)
-
-  val scnToRun = scenario("POST device")
-    .feed(feeder)
-    .exec(DeviceScenarios.postDeviceWithNotifier)
-
-  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala
deleted file mode 100755
index b49afc7..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-class PostUsersSimulation extends Simulation {
-
-  // Target settings
-  val httpConf = Settings.httpConf
-
-  // Simulation settings
-  val numUsers:Int = Settings.numUsers
-  val rampTime:Int = Settings.rampTime
-  val throttle:Int = Settings.throttle
-
-  // Geolocation settings
-  val centerLatitude:Double = Settings.centerLatitude
-  val centerLongitude:Double = Settings.centerLongitude
-  val userLocationRadius:Double = Settings.userLocationRadius
-  val geosearchRadius:Int = Settings.geosearchRadius
-
-  val feeder = FeederGenerator.generateUserWithGeolocationFeeder(numUsers, userLocationRadius, centerLatitude, centerLongitude).queue
-
-  val scnToRun = scenario("POST geolocated users")
-    .feed(feeder)
-    .exec(UserScenarios.postUser)
-
-  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetDeviceSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetDeviceSimulation.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetDeviceSimulation.scala
deleted file mode 100755
index 66b3c5d..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetDeviceSimulation.scala
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-/**
- *
- * Simple test for setting up multiple orgs and creating push notifications
- *
- */
-class PushNotificationTargetDeviceSimulation extends Simulation {
-
-  val numUsers:Int = Settings.numUsers
-  val numEntities:Int = Settings.numEntities
-  val rampTime:Int = Settings.rampTime
-  val throttle:Int = Settings.throttle
-  val duration:Int = Settings.duration  
-  val httpConf = Settings.httpConf
-    .acceptHeader("application/json")
-
-  val createNotifier = NotifierScenarios.createNotifier
-  val createDevice = DeviceScenarios.postDeviceWithNotifier
-  val sendNotification = NotificationScenarios.sendNotification
-
-  val deviceNameFeeder = FeederGenerator.generateEntityNameFeeder("device", numEntities).circular
-
-  val scnToRun = scenario("Create Push Notification")    
-    .during(duration.seconds) {
-      feed(deviceNameFeeder)
-      .exec(sendNotification)
-    }
-
-
-  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetUserSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetUserSimulation.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetUserSimulation.scala
deleted file mode 100644
index fd565ba..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/simulations/PushTargetUserSimulation.scala
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-class PushNotificationTargetUserSimulation extends Simulation {
-
-  val numUsers:Int = Settings.numUsers
-  val numEntities:Int = Settings.numEntities
-  val rampTime:Int = Settings.rampTime
-  val throttle:Int = Settings.throttle
-  val duration:Int = Settings.duration
-  val httpConf = Settings.httpConf
-    .acceptHeader("application/json")
-
-  val notifier = Settings.pushNotifier
-  val createDevice = DeviceScenarios.postDeviceWithNotifier400ok
-  val sendNotification = NotificationScenarios.sendNotification
-  val createUser = UserScenarios.postUser400ok
-  val deviceNameFeeder = FeederGenerator.generateEntityNameFeeder("device", numEntities).circular
-  val userFeeder = FeederGenerator.generateUserWithGeolocationFeeder(numEntities, Settings.userLocationRadius, Settings.centerLatitude, Settings.centerLongitude)
-
-  val scnToRun = scenario("Create Push Notification")
-    .feed(userFeeder)
-    .exec(createUser)
-    .pause(1000)
-    .exec(http("Check user and user devices")
-      .get("/users/${username}/devices")
-      .check(status.is(200))
-    )
-    .feed(deviceNameFeeder)
-    .exec(createDevice)
-    .pause(1000)
-    .exec(http("Check device connections")
-      .get("/devices/${entityName}/users")
-      .check(status.is(200))
-    )
-    .exec(http("Connect user with device")
-      .post("/users/${username}/devices/${entityName}")
-      .check(status.is(200))
-    )
-    .exec(http("Send Notification to All Devices")
-      .post("/users/${username}/notifications")
-      .body(StringBody("{\"payloads\":{\"" + notifier + "\":\"testmessage\"}}"))
-      .check(status.is(200))
-    )
-
-
-  setUp(scnToRun.inject(constantUsersPerSec(numUsers) during (duration)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
-
-}


[10/12] git commit: Restoring some semblance of my original formatting, take it easy with that auto-formatter folks!

Posted by sn...@apache.org.
Restoring some semblance of my original formatting, take it easy with that auto-formatter folks!


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/09d4ba44
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/09d4ba44
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/09d4ba44

Branch: refs/heads/two-dot-o-events
Commit: 09d4ba444a20421316adfa3efd3e064d93a5fbb6
Parents: 3771e3f
Author: Dave Johnson <dm...@apigee.com>
Authored: Tue Oct 28 12:10:42 2014 -0400
Committer: Dave Johnson <dm...@apigee.com>
Committed: Tue Oct 28 12:10:42 2014 -0400

----------------------------------------------------------------------
 .../corepersistence/CpRelationManager.java      | 274 +++++++++----------
 1 file changed, 125 insertions(+), 149 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/09d4ba44/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
index 8c0d886..180d08c 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
@@ -186,10 +186,12 @@ public class CpRelationManager implements RelationManager {
     private ResultsLoaderFactory resultsLoaderFactory;
 
 
+
     public CpRelationManager() {}
 
 
-    public CpRelationManager init( EntityManager em,
+    public CpRelationManager init( 
+        EntityManager em, 
             CpEntityManagerFactory emf,
             UUID applicationId,
             EntityRef headEntity, 
@@ -223,7 +225,8 @@ public class CpRelationManager implements RelationManager {
         EntityCollectionManager ecm = managerCache.getEntityCollectionManager( headEntityScope );
         if ( logger.isDebugEnabled() ) {
             logger.debug( "Loading head entity {}:{} from scope\n   app {}\n   owner {}\n   name {}",
-                new Object[] { headEntity.getType(),
+                new Object[] {
+                    headEntity.getType(), 
                     headEntity.getUuid(),
                     headEntityScope.getApplication(),
                     headEntityScope.getOwner(),
@@ -233,7 +236,7 @@ public class CpRelationManager implements RelationManager {
 
         //TODO PERFORMANCE why are we loading this again here?
         this.cpHeadEntity = ecm.load( new SimpleId( 
-                headEntity.getUuid(), headEntity.getType() ) ).toBlocking() .lastOrDefault( null );
+            headEntity.getUuid(), headEntity.getType() )).toBlocking().lastOrDefault(null);
 
         // commented out because it is possible that CP entity has not been created yet
         Assert.notNull( cpHeadEntity, "cpHeadEntity cannot be null" );
@@ -248,18 +251,17 @@ public class CpRelationManager implements RelationManager {
     public Set<String> getCollectionIndexes( String collectionName ) throws Exception {
         final Set<String> indexes = new HashSet<String>();
 
-        GraphManager gm = managerCache.getGraphManager( applicationScope );
+        GraphManager gm = managerCache.getGraphManager(applicationScope);
 
         String edgeTypePrefix = CpNamingUtils.getEdgeTypeFromCollectionName( collectionName );
 
-        logger.debug( "getCollectionIndexes(): Searching for edge type prefix {} to target {}:{}",
-            new Object[] { edgeTypePrefix,
-                cpHeadEntity.getId().getType(),
-                cpHeadEntity.getId().getUuid() 
-            } ); 
+        logger.debug("getCollectionIndexes(): Searching for edge type prefix {} to target {}:{}", 
+            new Object[] {
+                edgeTypePrefix, cpHeadEntity.getId().getType(), cpHeadEntity.getId().getUuid()
+        });
 
-        Observable<String> types = gm.getEdgeTypesFromSource( 
-            new SimpleSearchEdgeType( cpHeadEntity.getId(), edgeTypePrefix, null ) );
+        Observable<String> types= gm.getEdgeTypesFromSource( 
+            new SimpleSearchEdgeType( cpHeadEntity.getId(), edgeTypePrefix,  null ));
 
         Iterator<String> iter = types.toBlockingObservable().getIterator();
         while ( iter.hasNext() ) {
@@ -297,7 +299,6 @@ public class CpRelationManager implements RelationManager {
 
     /**
      * Gets containing collections and/or connections depending on the edge type you pass in
-     *
      * @param limit Max number to return
      * @param edgeType Edge type, edge type prefix or null to allow any edge type
      * @param fromEntityType Only consider edges from entities of this type
@@ -306,35 +307,34 @@ public class CpRelationManager implements RelationManager {
 
         Map<EntityRef, Set<String>> results = new LinkedHashMap<EntityRef, Set<String>>();
 
-        GraphManager gm = managerCache.getGraphManager( applicationScope );
+        GraphManager gm = managerCache.getGraphManager(applicationScope);
 
         Iterator<String> edgeTypes = gm.getEdgeTypesToTarget( new SimpleSearchEdgeType( 
-                cpHeadEntity.getId(), edgeType, null ) ).toBlocking() .getIterator();
+            cpHeadEntity.getId(), edgeType, null) ).toBlocking().getIterator();
 
-        logger.debug( "getContainers(): "
+        logger.debug("getContainers(): "
                 + "Searched for edges of type {}\n   to target {}:{}\n   in scope {}\n   found: {}",
-            new Object[] { edgeType,
+            new Object[] {
+                edgeType,
                 cpHeadEntity.getId().getType(),
                 cpHeadEntity.getId().getUuid(),
                 applicationScope.getApplication(),
                 edgeTypes.hasNext() 
-            } );
+        });
 
         while ( edgeTypes.hasNext() ) {
 
             String etype = edgeTypes.next();
 
             Observable<Edge> edges = gm.loadEdgesToTarget( new SimpleSearchByEdgeType( 
-                cpHeadEntity.getId(), etype, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, null ) );
+                cpHeadEntity.getId(), etype, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, null ));
 
             Iterator<Edge> iter = edges.toBlockingObservable().getIterator();
             while ( iter.hasNext() ) {
                 Edge edge = iter.next();
 
-                if (     fromEntityType != null 
-                     && !fromEntityType.equals( edge.getSourceNode().getType() ) ) {
-                    logger.debug( "Ignoring edge from entity type {}", 
-                            edge.getSourceNode().getType() );
+                if ( fromEntityType != null && !fromEntityType.equals( edge.getSourceNode().getType() )) {
+                    logger.debug("Ignoring edge from entity type {}", edge.getSourceNode().getType());
                     continue;
                 }
 
@@ -342,10 +342,9 @@ public class CpRelationManager implements RelationManager {
                         edge.getSourceNode().getType(), edge.getSourceNode().getUuid() );
 
                 String name = null;
-                if ( CpNamingUtils.isConnectionEdgeType( edge.getType() ) ) {
+                if ( CpNamingUtils.isConnectionEdgeType( edge.getType() )) {
                     name = CpNamingUtils.getConnectionType( edge.getType() );
-                }
-                else {
+                } else {
                     name = CpNamingUtils.getCollectionName( edge.getType() );
                 }
                 addMapSet( results, eref, name );
@@ -366,11 +365,17 @@ public class CpRelationManager implements RelationManager {
 
         final GraphManager gm = managerCache.getGraphManager( applicationScope );
 
-        logger.debug( "updateContainingCollectionsAndCollections(): " + "Searched for edges to target {}:{}\n   in scope {}\n   found: {}",
-            new Object[] { cpHeadEntity.getId().getType(),
+        Iterator<String> edgeTypesToTarget = gm.getEdgeTypesToTarget( new SimpleSearchEdgeType( 
+            cpHeadEntity.getId(), null, null) ).toBlockingObservable().getIterator();
+
+        logger.debug("updateContainingCollectionsAndCollections(): "
+                + "Searched for edges to target {}:{}\n   in scope {}\n   found: {}", 
+            new Object[] {
+                cpHeadEntity.getId().getType(), 
                 cpHeadEntity.getId().getUuid(),
-                applicationScope.getApplication() 
-            } );
+                applicationScope.getApplication(),
+                edgeTypesToTarget.hasNext()
+        });
 
         // loop through all types of edge to target
 
@@ -444,13 +449,11 @@ public class CpRelationManager implements RelationManager {
 
         String edgeType = CpNamingUtils.getEdgeTypeFromConnectionType( connectionType );
 
-        logger.debug( "isConnectionMember(): Checking for edge type {} from {}:{} to {}:{}",
+        logger.debug("isConnectionMember(): Checking for edge type {} from {}:{} to {}:{}", 
             new Object[] { 
                 edgeType,
-                headEntity.getType(),
-                headEntity.getUuid(),
-                entity.getType(), entity.getUuid() 
-            } );
+                headEntity.getType(), headEntity.getUuid(), 
+                entity.getType(), entity.getUuid() });
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
         Observable<Edge> edges = gm.loadEdgeVersions( new SimpleSearchByEdge( 
@@ -474,13 +477,11 @@ public class CpRelationManager implements RelationManager {
 
         String edgeType = CpNamingUtils.getEdgeTypeFromCollectionName( collName );
 
-        logger.debug( "isCollectionMember(): Checking for edge type {} from {}:{} to {}:{}",
+        logger.debug("isCollectionMember(): Checking for edge type {} from {}:{} to {}:{}", 
             new Object[] { 
                 edgeType,
-                headEntity.getType(),
-                headEntity.getUuid(),
-                entity.getType(), entity.getUuid() 
-            } );
+                headEntity.getType(), headEntity.getUuid(), 
+                entity.getType(), entity.getUuid() });
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
         Observable<Edge> edges = gm.loadEdgeVersions( new SimpleSearchByEdge( 
@@ -709,8 +710,7 @@ public class CpRelationManager implements RelationManager {
     @Override
     @Metered( group = "core", name = "RelationManager_createItemInCollection" )
     public Entity createItemInCollection( 
-            String collName, String itemType, Map<String, Object> properties )
-            throws Exception {
+        String collName, String itemType, Map<String, Object> properties) throws Exception {
 
         if ( headEntity.getUuid().equals( applicationId ) ) {
             if ( itemType.equals( TYPE_ENTITY ) ) {
@@ -730,7 +730,6 @@ public class CpRelationManager implements RelationManager {
 
         else if ( headEntity.getType().equals( Group.ENTITY_TYPE ) 
                 && ( collName.equals( COLLECTION_ROLES ) ) ) {
-
             UUID groupId = headEntity.getUuid();
             String roleName = ( String ) properties.get( PROPERTY_NAME );
             return em.createGroupRole( groupId, roleName, ( Long ) properties.get( PROPERTY_INACTIVITY ) );
@@ -804,15 +803,17 @@ public class CpRelationManager implements RelationManager {
         final EntityIndexBatch batch = ei.createBatch();
 
         // remove item from collection index
-        IndexScope indexScope = new IndexScopeImpl( cpHeadEntity.getId(),
-                CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ) );
+        IndexScope indexScope = new IndexScopeImpl(
+            cpHeadEntity.getId(), 
+            CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ));
 
         batch.deindex( indexScope, memberEntity );
 
         // remove collection from item index 
-        IndexScope itemScope = new IndexScopeImpl( memberEntity.getId(), CpNamingUtils
-                .getCollectionScopeNameFromCollectionName(
-                        Schema.defaultCollectionName( cpHeadEntity.getId().getType() ) ) );
+        IndexScope itemScope = new IndexScopeImpl(
+            memberEntity.getId(), 
+            CpNamingUtils.getCollectionScopeNameFromCollectionName(
+                    Schema.defaultCollectionName( cpHeadEntity.getId().getType() ) ));
 
 
         batch.deindex( itemScope, cpHeadEntity );
@@ -857,8 +858,8 @@ public class CpRelationManager implements RelationManager {
 
 
     @Override
-    public void copyRelationships( String srcRelationName, EntityRef dstEntityRef, String dstRelationName )
-            throws Exception {
+    public void copyRelationships(String srcRelationName, EntityRef dstEntityRef, 
+            String dstRelationName) throws Exception {
 
         headEntity = em.validate( headEntity );
         dstEntityRef = em.validate( dstEntityRef );
@@ -903,15 +904,17 @@ public class CpRelationManager implements RelationManager {
 
         headEntity = em.validate( headEntity );
 
-        CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collName );
+        CollectionInfo collection = 
+            getDefaultSchema().getCollection( headEntity.getType(), collName );
 
         if ( collection == null ) {
             throw new RuntimeException( "Cannot find collection-info for '" + collName 
                     + "' of " + headEntity.getType() + ":" + headEntity .getUuid() );
         }
 
-        IndexScope indexScope = new IndexScopeImpl( cpHeadEntity.getId(),
-                CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ) );
+        IndexScope indexScope = new IndexScopeImpl(
+            cpHeadEntity.getId(), 
+            CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ));
 
         EntityIndex ei = managerCache.getEntityIndex( applicationScope );
 
@@ -997,7 +1000,7 @@ public class CpRelationManager implements RelationManager {
         EntityCollectionManager targetEcm = managerCache.getEntityCollectionManager( targetScope );
 
         if ( logger.isDebugEnabled() ) {
-            logger.debug( "createConnection(): " 
+            logger.debug("createConnection(): "
                 + "Indexing connection type '{}'\n   from source {}:{}]\n"
                 + "   to target {}:{}\n   from scope\n   app {}\n   owner {}\n   name {}", 
                 new Object[] {
@@ -1009,7 +1012,7 @@ public class CpRelationManager implements RelationManager {
                     targetScope.getApplication(), 
                     targetScope.getOwner(), 
                     targetScope.getName()
-                } );
+            });
         }
 
         org.apache.usergrid.persistence.model.entity.Entity targetEntity = targetEcm.load( 
@@ -1335,8 +1338,9 @@ public class CpRelationManager implements RelationManager {
             final EntityIndex ei = managerCache.getEntityIndex( applicationScope );
 
 
-            logger.debug( "Searching connected entities from scope {}:{}", 
-                    indexScope.getOwner().toString(), indexScope.getName() );
+            logger.debug("Searching connected entities from scope {}:{}",
+                indexScope.getOwner().toString(),
+                indexScope.getName());
 
             query = adjustQuery( query );
             CandidateResults crs = ei.search( indexScope, query );
@@ -1422,8 +1426,9 @@ public class CpRelationManager implements RelationManager {
 
             EntityIndex ei = managerCache.getEntityIndex( applicationScope );
 
-            logger.debug( "Searching connections from the all-types scope {}:{}", 
-                    indexScope.getOwner().toString(), indexScope.getName() );
+            logger.debug("Searching connections from the all-types scope {}:{}",
+                indexScope.getOwner().toString(),
+                indexScope.getName());
 
             query = adjustQuery( query );
             CandidateResults crs = ei.search( indexScope, query );
@@ -1437,8 +1442,9 @@ public class CpRelationManager implements RelationManager {
             query.getConnectionType() ) );
         EntityIndex ei = managerCache.getEntityIndex( applicationScope );
         
-        logger.debug( "Searching connections from the scope {}:{}", 
-                indexScope.getOwner().toString(), indexScope.getName() );
+        logger.debug("Searching connections from the scope {}:{}",
+            indexScope.getOwner().toString(),
+            indexScope.getName());
 
         query = adjustQuery( query );
         CandidateResults crs = ei.search( indexScope, query );
@@ -1738,55 +1744,26 @@ public class CpRelationManager implements RelationManager {
 
 
     public IndexUpdate batchStartIndexUpdate( 
-            Mutator<ByteBuffer> batch,
-            Entity entity,
-            String entryName,
-            Object entryValue,
-            UUID timestampUuid,
-            boolean schemaHasProperty,
-            boolean isMultiValue,
-            boolean removeListEntry,
-            boolean fulltextIndexed )
+            Mutator<ByteBuffer> batch, Entity entity, String entryName,
+            Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
+             boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed )
             throws Exception {
-
-        return batchStartIndexUpdate( 
-                batch,
-                entity,
-                entryName,
-                entryValue,
-                timestampUuid,
-                schemaHasProperty,
-                isMultiValue,
-                removeListEntry,
-                fulltextIndexed,
-                false );
+        return batchStartIndexUpdate( batch, entity, entryName, entryValue, timestampUuid, 
+                schemaHasProperty, isMultiValue, removeListEntry, fulltextIndexed, false );
     }
 
 
-    @Metered( group = "core", name = "RelationManager_batchStartIndexUpdate" )
+    @Metered(group = "core", name = "RelationManager_batchStartIndexUpdate")
     public IndexUpdate batchStartIndexUpdate( 
-            Mutator<ByteBuffer> batch,
-            Entity entity,
-            String entryName,
-            Object entryValue,
-            UUID timestampUuid,
-            boolean schemaHasProperty,
-            boolean isMultiValue,
-            boolean removeListEntry,
-            boolean fulltextIndexed,
+        Mutator<ByteBuffer> batch, Entity entity, String entryName,
+        Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
+        boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed,
             boolean skipRead ) throws Exception {
 
         long timestamp = getTimestampInMicros( timestampUuid );
 
-        IndexUpdate indexUpdate = new IndexUpdate( 
-                batch,
-                entity,
-                entryName,
-                entryValue,
-                schemaHasProperty,
-                isMultiValue,
-                removeListEntry,
-                timestampUuid );
+        IndexUpdate indexUpdate = new IndexUpdate( batch, entity, entryName, entryValue, 
+                schemaHasProperty, isMultiValue, removeListEntry, timestampUuid );
 
         // entryName = entryName.toLowerCase();
 
@@ -1891,8 +1868,11 @@ public class CpRelationManager implements RelationManager {
 
             if ( isMultiValue ) {
                 addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                    asList( entryName, indexValueCode( entryValue ), 
-                    toIndexableValue( entryValue ), indexUpdate.getTimestampUuid() ), null, timestamp );
+                        asList( entryName, 
+                            indexValueCode( entryValue ), 
+                            toIndexableValue( entryValue ),
+                            indexUpdate.getTimestampUuid() ),
+                        null, timestamp );
             }
             else {
                 // int i = 0;
@@ -1933,13 +1913,15 @@ public class CpRelationManager implements RelationManager {
      *
      * @throws Exception the exception
      */
-    @Metered( group = "core", name = "RelationManager_batchUpdateBackwardConnectionsDictionaryIndexes" )
-    public IndexUpdate batchUpdateBackwardConnectionsDictionaryIndexes( IndexUpdate indexUpdate ) throws Exception {
+    @Metered(group = "core", name = "RelationManager_batchUpdateBackwardConnectionsDictionaryIndexes")
+    public IndexUpdate batchUpdateBackwardConnectionsDictionaryIndexes( 
+            IndexUpdate indexUpdate ) throws Exception {
 
         logger.debug( "batchUpdateBackwardConnectionsListIndexes" );
 
         boolean entityHasDictionary = getDefaultSchema()
-                .isDictionaryIndexedInConnections( indexUpdate.getEntity().getType(), indexUpdate.getEntryName() );
+                .isDictionaryIndexedInConnections( 
+                        indexUpdate.getEntity().getType(), indexUpdate.getEntryName() );
 
         if ( !entityHasDictionary ) {
             return indexUpdate;
@@ -1951,8 +1933,8 @@ public class CpRelationManager implements RelationManager {
 
 
     /**
-     * Search each reverse connection type in the graph for connections. If one is found, update the index
-     * appropriately
+     * Search each reverse connection type in the graph for connections.  
+     * If one is found, update the index appropriately
      *
      * @param indexUpdate The index update to use
      *
@@ -1968,7 +1950,8 @@ public class CpRelationManager implements RelationManager {
 
         for ( String connectionType : connectionTypes ) {
 
-            PagingResultsIterator itr = getReversedConnectionsIterator( targetEntity, connectionType );
+            PagingResultsIterator itr = 
+                    getReversedConnectionsIterator( targetEntity, connectionType );
 
             for ( Object connection : itr ) {
 
@@ -1997,9 +1980,9 @@ public class CpRelationManager implements RelationManager {
      *
      * @throws Exception the exception
      */
-    @Metered( group = "core", name = "RelationManager_batchUpdateConnectionIndex" )
-    public IndexUpdate batchUpdateConnectionIndex( IndexUpdate indexUpdate, ConnectionRefImpl connection )
-            throws Exception {
+    @Metered(group = "core", name = "RelationManager_batchUpdateConnectionIndex")
+    public IndexUpdate batchUpdateConnectionIndex( 
+            IndexUpdate indexUpdate, ConnectionRefImpl connection ) throws Exception {
 
         logger.debug( "batchUpdateConnectionIndex" );
 
@@ -2015,17 +1998,12 @@ public class CpRelationManager implements RelationManager {
                 batchDeleteConnectionIndexEntries( indexUpdate, entry, connection, index_keys );
 
                 if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( 
-                        indexUpdate.getEntity(),
-                        entry.getTimestampUuid(),
+                    EntityLocationRef loc = 
+                        new EntityLocationRef( indexUpdate.getEntity(), entry.getTimestampUuid(),
                         entry.getValue().toString() );
                     batchDeleteLocationInConnectionsIndex( 
-                        indexUpdate.getBatch(),
-                        indexBucketLocator,
-                        applicationId,
-                        index_keys,
-                        entry.getPath(),
-                        loc );
+                        indexUpdate.getBatch(), indexBucketLocator, applicationId,
+                        index_keys, entry.getPath(), loc );
                 }
             }
             else {
@@ -2034,24 +2012,22 @@ public class CpRelationManager implements RelationManager {
         }
 
         if ( ( indexUpdate.getNewEntries().size() > 0 ) 
-                && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue() && !indexUpdate.isRemoveListEntry() ) ) ) {
+                && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue()
+                && !indexUpdate.isRemoveListEntry() ) ) ) {
 
             for ( IndexUpdate.IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
 
                 batchAddConnectionIndexEntries( indexUpdate, indexEntry, connection, index_keys );
 
                 if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( 
+                    EntityLocationRef loc =
+                            new EntityLocationRef( 
                         indexUpdate.getEntity(),
                         indexEntry.getTimestampUuid(),
                         indexEntry.getValue().toString() );
                     batchStoreLocationInConnectionsIndex( 
-                        indexUpdate.getBatch(),
-                            indexBucketLocator,
-                            applicationId,
-                            index_keys,
-                            indexEntry.getPath(),
-                            loc );
+                            indexUpdate.getBatch(), indexBucketLocator, applicationId,
+                            index_keys, indexEntry.getPath(), loc );
                 }
             }
 
@@ -2066,7 +2042,7 @@ public class CpRelationManager implements RelationManager {
 
         for ( String index : indexUpdate.getIndexesSet() ) {
             addInsertToMutator( indexUpdate.getBatch(), ENTITY_DICTIONARIES,
-                    key( connection.getConnectingIndexId(), Schema.DICTIONARY_INDEXES ), index, null,
+                    key( connection.getConnectingIndexId(), Schema.DICTIONARY_INDEXES), index, null,
                     indexUpdate.getTimestamp() );
         }
 
@@ -2098,12 +2074,11 @@ public class CpRelationManager implements RelationManager {
      * @param resultsLevel The results level to return
      */
     private Results getConnectingEntities( 
-        EntityRef targetEntity,
-            String connectionType,
-            String connectedEntityType,
+            EntityRef targetEntity, String connectionType, String connectedEntityType,
             Level resultsLevel ) throws Exception {
 
-        return getConnectingEntities( targetEntity, connectionType, connectedEntityType, resultsLevel, 0 );
+        return getConnectingEntities(
+                targetEntity, connectionType, connectedEntityType, resultsLevel, 0);
     }
 
 
@@ -2115,12 +2090,8 @@ public class CpRelationManager implements RelationManager {
      * @param connectedEntityType The connected entity type, if not specified all types are returned
      * @param count result limit
      */
-    private Results getConnectingEntities( 
-            EntityRef targetEntity,
-            String connectionType,
-            String connectedEntityType,
-            Level level,
-            int count ) throws Exception {
+    private Results getConnectingEntities( EntityRef targetEntity, String connectionType, 
+            String connectedEntityType, Level level, int count) throws Exception {
 
         Query query = new Query();
         query.setResultsLevel( level );
@@ -2200,9 +2171,11 @@ public class CpRelationManager implements RelationManager {
         logger.debug( "batchAddConnectionIndexEntries" );
 
         // entity_id,prop_name
-        Object property_index_key = key( index_keys[ConnectionRefImpl.ALL], INDEX_CONNECTIONS, entry.getPath(),
-                indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                        index_keys[ConnectionRefImpl.ALL], entry.getPath() ) );
+        Object property_index_key = key( index_keys[ConnectionRefImpl.ALL], 
+                INDEX_CONNECTIONS, entry.getPath(),
+                indexBucketLocator.getBucket( applicationId, 
+                        IndexBucketLocator.IndexType.CONNECTION, index_keys[ConnectionRefImpl.ALL],
+                        entry.getPath() ) );
 
         // entity_id,entity_type,prop_name
         Object entity_type_prop_index_key =
@@ -2218,7 +2191,8 @@ public class CpRelationManager implements RelationManager {
 
         // entity_id,connection_type,entity_type,prop_name
         Object connection_type_and_entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
+            key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], 
+                INDEX_CONNECTIONS, entry.getPath(),
                         indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
                                 index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], entry.getPath() ) );
 
@@ -2229,17 +2203,19 @@ public class CpRelationManager implements RelationManager {
 
         // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
         addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, entity_type_prop_index_key,
-                entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectionType() ), conn.getUuid(),
-                indexUpdate.getTimestamp() );
+            entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectionType() ),
+            conn.getUuid(), indexUpdate.getTimestamp() );
 
         // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
         addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_prop_index_key,
-                entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectedEntityType() ), conn.getUuid(),
-                indexUpdate.getTimestamp() );
+            entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectedEntityType() ),
+            conn.getUuid(), indexUpdate.getTimestamp() );
 
         // composite(property_value,connected_entity_id,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_and_entity_type_prop_index_key,
-                entry.getIndexComposite( conn.getConnectedEntityId() ), conn.getUuid(), indexUpdate.getTimestamp() );
+        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, 
+            connection_type_and_entity_type_prop_index_key,
+            entry.getIndexComposite( conn.getConnectedEntityId() ), conn.getUuid(),
+            indexUpdate.getTimestamp() );
 
         return indexUpdate.getBatch();
     }


[06/12] git commit: Merge branch 'mvn_loadtests' of github.com:amuramoto/incubator-usergrid into mvn_loadtests

Posted by sn...@apache.org.
Merge branch 'mvn_loadtests' of github.com:amuramoto/incubator-usergrid into mvn_loadtests


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/d5ead331
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/d5ead331
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/d5ead331

Branch: refs/heads/two-dot-o-events
Commit: d5ead33122361da6d69f9490a4e7492c9915f16a
Parents: 0eda724 7947bae
Author: amuramoto <am...@apigee.com>
Authored: Mon Oct 27 15:51:39 2014 -0700
Committer: amuramoto <am...@apigee.com>
Committed: Mon Oct 27 15:51:39 2014 -0700

----------------------------------------------------------------------
 stack/loadtests/README.md | 52 ------------------------------------------
 1 file changed, 52 deletions(-)
----------------------------------------------------------------------



[03/12] git commit: fixes to push test

Posted by sn...@apache.org.
fixes to push test


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/6149bf13
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/6149bf13
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/6149bf13

Branch: refs/heads/two-dot-o-events
Commit: 6149bf1397c14c23f94575f094a03b893b7e0029
Parents: c9d6b7e
Author: amuramoto <am...@apigee.com>
Authored: Mon Oct 27 10:08:32 2014 -0700
Committer: amuramoto <am...@apigee.com>
Committed: Mon Oct 27 10:08:32 2014 -0700

----------------------------------------------------------------------
 stack/loadtests/README.md                       |  53 +++++++
 stack/loadtests/pom.xml                         | 140 +++++++++++++++++
 .../datagenerators/EntityDataGenerator.scala    |  59 +++++++
 .../datagenerators/FeederGenerator.scala        | 114 ++++++++++++++
 .../scenarios/ApplicationScenarios.scala        |  46 ++++++
 .../scenarios/ConnectionScenarios.scala         |  36 +++++
 .../usergrid/scenarios/DeviceScenarios.scala    |  85 ++++++++++
 .../usergrid/scenarios/GeoScenarios.scala       |  44 ++++++
 .../scenarios/NotificationScenarios.scala       |  74 +++++++++
 .../usergrid/scenarios/NotifierScenarios.scala  |  66 ++++++++
 .../scenarios/OrganizationScenarios.scala       |  43 ++++++
 .../usergrid/scenarios/TokenScenarios.scala     |  60 ++++++++
 .../usergrid/scenarios/UserScenarios.scala      |  53 +++++++
 .../org/apache/usergrid/settings/Headers.scala  |  43 ++++++
 .../org/apache/usergrid/settings/Settings.scala |  50 ++++++
 .../org/apache/usergrid/settings/Utils.scala    |  91 +++++++++++
 .../simulations/GetEntitySimulation.scala       |  44 ++++++
 .../simulations/PostDevicesSimulation.scala     |  45 ++++++
 .../simulations/PostUsersSimulation.scala       |  50 ++++++
 ...PushNotificationTargetDeviceSimulation.scala |  57 +++++++
 .../PushNotificationTargetUserSimulation.scala  |  72 +++++++++
 stack/loadtests/src/main/scripts/gatling-ug.sh  |  51 ++++++
 stack/loadtests/src/test/resources/gatling.conf | 154 +++++++++++++++++++
 stack/loadtests/src/test/resources/logback.xml  |  20 +++
 .../loadtests/src/test/resources/recorder.conf  |  37 +++++
 stack/loadtests/src/test/scala/Engine.scala     |  16 ++
 .../src/test/scala/IDEPathHelper.scala          |  21 +++
 stack/loadtests/src/test/scala/Recorder.scala   |  12 ++
 28 files changed, 1636 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/README.md
----------------------------------------------------------------------
diff --git a/stack/loadtests/README.md b/stack/loadtests/README.md
new file mode 100644
index 0000000..0c1774a
--- /dev/null
+++ b/stack/loadtests/README.md
@@ -0,0 +1,53 @@
+To make it easy for you to load test your instance of Usergrid, we have bundledin the Gatling load test tool, along with some pre-built tests of different functionality. To get started do the following:
+
+### Setting up Gatling
+1. Unzip loadtest.zip
+2. cd to the 'gatling' dir
+3. Run 'sh loadtest_setup.sh'. This will do the following:
+	- Add some handy options to gatling/bin/gatling.sh that will allow you to set certain test parameters using environment variables (more on this later)
+	- Run the PostUsersSimulation, which will load 5k users with geolocation data into a specified UG org/app. This is just to seed some data entities to make it easier to run some of the tests.
+4. Set the following environment variables:
+- GATLING_BASE_URL - Required. UG base url, e.g. http://api.usergrid.com/.
+- GATLING_ORG      - Required. UG organization name.
+- GATLING_APP      - Required. UG application name.
+
+- GATLING_NUMUSERS - Number of users in the simulation. Default is 100.
+- GATLING_DURATION - Duration of the simulation. Default is 300.
+- GATLING_RAMPTIME - Time period to inject the users over. Default is 0.
+- GATLING_THROTTLE - Requests per second the simulation to try to reach. Default is 50.
+
+- GATLING_NOTIFIER - Name of the notifier to use for PushNotificationSimulation.
+- GATLING_PROVIDER - Push notification provider that corresponds to the notifier, e.g. apple, google, etc.
+
+### Running load tests
+To run Gatling, do the following:
+1. Run 'gatling/bin/gatling.sh'
+2. Enter the number of the test you want to run from the list (see below for an explanation of each test)
+3. Optional. Set a identifier for the results of this run of the simulation
+4. Optional. Set a description for this run of the simulation
+
+### Viewing results
+Results of the test are output to the gatling/results. The output directory is shown once the test has successfully run. The location of the generated report is also shown.
+
+### Default tests
+The following default tests are available. Not that the GATLING_BASE_URL, GATLING_ORG, and GATLING_APP environment variables must be set before any tests can be run. Each test also requires certain additional env variables to be set.
+
+- PostUsersSimulation
+
+POSTs 5k entities with geolocation data to /users. Entities are named sequentially, i.e. user1, user2, etc.
+
+- GetEntitySimulation
+
+Performs simple GETs on the /users collection. You should run PostUsersSimulation or loadtest_Setup.sh first to load data into the collection.
+
+- PostDevicesSimulation
+
+POSTs a user-specified number of entities in the /devices collection. This is useful if you want to load test push notifications
+
+- PushTargetDeviceSimulation
+
+Creates users, devices, connects users with devices, then sends push notification to all user devices. To run this, you will need to do create a notifier, then set the GATLING_NOTIFIER environment variable to equal the name or UUID of the notifier. You'll also need to set GATLING_PROVIDER to match the provider in the notifier.
+
+- PushTargetDeviceSimulation
+
+Sends push notifications. To run this, you will need to do create a notifier, then set the GATLING_NOTIFIER environment variable to equal the name or UUID of the notifier. You'll also need to set GATLING_PROVIDER to match the provider in the notifier.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/pom.xml
----------------------------------------------------------------------
diff --git a/stack/loadtests/pom.xml b/stack/loadtests/pom.xml
new file mode 100644
index 0000000..0fa6272
--- /dev/null
+++ b/stack/loadtests/pom.xml
@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<groupId>org.apache.usergrid</groupId>
+	<artifactId>gatling</artifactId>
+	<version>1.0-SNAPSHOT</version>
+
+	<repositories>
+		<repository>
+			<id>sonatype</id>
+			<name>Sonatype OSS</name>
+			<url>https://oss.sonatype.org/content/groups/public</url>
+			<releases>
+				<updatePolicy>never</updatePolicy>
+			</releases>
+			<snapshots>
+				<enabled>true</enabled>
+			</snapshots>
+		</repository>
+	</repositories>
+	<pluginRepositories>
+		<pluginRepository>
+			<id>sonatype</id>
+			<name>Sonatype OSS</name>
+			<url>https://oss.sonatype.org/content/groups/public</url>
+			<snapshots>
+				<enabled>true</enabled>
+			</snapshots>
+		</pluginRepository>
+	</pluginRepositories>
+
+	<properties>
+		<maven.compiler.source>1.7</maven.compiler.source>
+		<maven.compiler.target>1.6</maven.compiler.target>
+		<scala.version>2.10.4</scala.version>
+		<encoding>UTF-8</encoding>
+
+		<gatling.version>2.0.0</gatling.version>
+		<gatling-highcharts.version>2.0.0</gatling-highcharts.version>
+
+		<scala-maven-plugin.version>3.1.6</scala-maven-plugin.version>
+	</properties>
+
+	<dependencyManagement>
+		<dependencies>
+			<dependency>
+				<groupId>io.gatling</groupId>
+				<artifactId>gatling-app</artifactId>
+				<version>${gatling.version}</version>
+			</dependency>
+			<dependency>
+				<groupId>io.gatling</groupId>
+				<artifactId>gatling-recorder</artifactId>
+				<version>${gatling.version}</version>
+			</dependency>
+			<dependency>
+				<groupId>io.gatling.highcharts</groupId>
+				<artifactId>gatling-charts-highcharts</artifactId>
+				<version>${gatling-highcharts.version}</version>
+			</dependency>
+			<dependency>
+				<groupId>org.scala-lang</groupId>
+				<artifactId>scala-library</artifactId>
+				<version>${scala.version}</version>
+			</dependency>
+		</dependencies>
+	</dependencyManagement>
+
+	<dependencies>
+		<dependency>
+			<groupId>io.gatling.highcharts</groupId>
+			<artifactId>gatling-charts-highcharts</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>io.gatling</groupId>
+			<artifactId>gatling-app</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>io.gatling</groupId>
+			<artifactId>gatling-recorder</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.scala-lang</groupId>
+			<artifactId>scala-library</artifactId>
+		</dependency>
+	</dependencies>
+
+	<build>
+		<sourceDirectory>src/main/scala</sourceDirectory>
+		<testSourceDirectory>src/test/scala</testSourceDirectory>
+		<pluginManagement>
+			<plugins>
+				<plugin>
+					<groupId>net.alchim31.maven</groupId>
+					<artifactId>scala-maven-plugin</artifactId>
+					<version>${scala-maven-plugin.version}</version>
+				</plugin>
+				<plugin>
+					<groupId>io.gatling</groupId>
+					<artifactId>gatling-maven-plugin</artifactId>
+					<version>${gatling.version}</version>
+				</plugin>
+			</plugins>
+		</pluginManagement>
+		<plugins>
+			<plugin>
+				<groupId>net.alchim31.maven</groupId>
+				<artifactId>scala-maven-plugin</artifactId>
+				<executions>
+					<execution>
+						<goals>
+							<goal>compile</goal>
+							<goal>testCompile</goal>
+						</goals>
+						<configuration>
+							<args>
+								<arg>-target:jvm-1.6</arg>
+								<arg>-deprecation</arg>
+								<arg>-feature</arg>
+								<arg>-unchecked</arg>
+								<arg>-language:implicitConversions</arg>
+								<arg>-language:postfixOps</arg>
+							</args>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>io.gatling</groupId>
+				<artifactId>gatling-maven-plugin</artifactId>
+        <configuration>
+          <simulationsFolder>src/main/scala</simulationsFolder>
+          <simulationClass>org.apache.usergrid.simulations.PushNotificationTargetDeviceSimulation</simulationClass>
+        </configuration>
+
+			</plugin>
+		</plugins>
+	</build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/EntityDataGenerator.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/EntityDataGenerator.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/EntityDataGenerator.scala
new file mode 100755
index 0000000..5d1d8f6
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/EntityDataGenerator.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.datagenerators
+
+ import org.apache.usergrid.settings.Utils
+
+ import scala.collection.mutable.ArrayBuffer
+
+object EntityDataGenerator {
+
+  def generateBlockUserLists(numUsers: Int): Map[String, String] = {
+
+    var blocks: ArrayBuffer[String] = new ArrayBuffer[String]
+    var blockedBy: ArrayBuffer[String] = new ArrayBuffer[String]
+
+    for (numBlock <- 1 to Utils.generateRandomInt(1, 7)) {
+      blocks += "user".concat(Utils.generateRandomInt(0, numUsers).toString)
+    }
+
+    for (numBlockedBy <- 1 to Utils.generateRandomInt(1, 7)) {
+      blockedBy += "user".concat(Utils.generateRandomInt(0, numUsers).toString)
+    }
+
+    return Map("blocks" -> blocks.toArray.mkString(","), "blockedBy" -> blockedBy.toArray.mkString(","))
+
+  }
+
+  def generateUser(userId: Int): Map[String,String] = {
+
+    return Map("username" -> "user".concat(userId.toString),
+      "profileId" -> Utils.generateRandomInt(10000, 1000000).toString,
+      "displayName" -> Utils.generateRandomInt(10000, 1000000).toString,
+      "showAge" -> Utils.generateRandomInt(0, 1).toString,
+      "ethnicity" -> Utils.generateRandomInt(1, 15).toString,
+      "relationshipStatus" -> Utils.generateRandomInt(1, 4).toString,
+      "headline" -> "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
+      "aboutMe" -> "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
+      "age" -> Utils.generateRandomInt(18, 65).toString,
+      "height" -> Utils.generateRandomInt(48, 84).toString,
+      "weight" -> Utils.generateRandomInt(120, 350).toString,
+      "seen" -> Utils.generateRandomInt(50, 100000).toString
+    )
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/FeederGenerator.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/FeederGenerator.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/FeederGenerator.scala
new file mode 100755
index 0000000..9f17900
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/datagenerators/FeederGenerator.scala
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.datagenerators
+
+import io.gatling.core.Predef._
+ import org.apache.usergrid.settings.Utils
+ import scala.collection.mutable.ArrayBuffer
+
+object FeederGenerator {
+
+  def generateUserWithGeolocationFeeder(numUsers: Int, radius: Double, centerLatitude: Double, centerLongitude: Double): Array[Map[String, String]] = {
+    var userArray: ArrayBuffer[Map[String, String]] = new ArrayBuffer[Map[String, String]]
+    for (userCount <- 1 to numUsers) {
+      var user: Map[String, String] = EntityDataGenerator.generateUser(userCount)
+      var geolocation: Map[String, String] = Utils.generateRandomGeolocation(radius, centerLatitude, centerLongitude)
+      var blockLists: Map[String, String] = EntityDataGenerator.generateBlockUserLists(numUsers)
+
+      user = user ++ geolocation ++ blockLists
+
+      userArray += user
+    }
+    return userArray.toArray
+  }
+
+  def generateGeolocationFeeder(radius: Double, centerLatitude: Double, centerLongitude: Double): Feeder[String] = {
+
+    val geolocationFeeder = new Feeder[String] {
+
+      // always return true as this feeder can be polled infinitively
+      override def hasNext = true
+
+      override def next: Map[String, String] = {
+        var geolocation: Map[String, String] = Utils.generateRandomGeolocation(radius, centerLatitude, centerLongitude)
+        Map("latitude" -> geolocation("latitude"), "longitude" -> geolocation("longitude"))
+      }
+    }
+
+    return geolocationFeeder
+
+  }
+
+  def generateGeolocationWithQueryFeeder(radius: Double, centerLatitude: Double, centerLongitude: Double): Feeder[String] = {
+
+    val geolocationFeeder = new Feeder[String] {
+
+      // always return true as this feeder can be polled infinitively
+      override def hasNext = true
+
+      override def next: Map[String, String] = {
+        var geolocation: Map[String, String] = Utils.generateRandomGeolocation(radius, centerLatitude, centerLongitude)
+        var queryParams = Utils.generateRandomQueryString
+        Map("latitude" -> geolocation("latitude"), "longitude" -> geolocation("longitude"), "queryParams" -> queryParams)
+      }
+    }
+
+    return geolocationFeeder
+
+  }
+
+  def generateUserConnectionFeeder(numUsers: Int): Feeder[String] = {
+
+    val userIdFeeder = new Feeder[String] {
+
+      // always return true as this feeder can be polled infinitively
+      override def hasNext = true
+
+      override def next: Map[String, String] = {
+        Map("user1" -> "user".concat(Utils.generateRandomInt(1, numUsers).toString), "user2" -> "user".concat(Utils.generateRandomInt(1, numUsers).toString))
+      }
+    }
+
+    return userIdFeeder
+
+  }
+
+  def generateEntityNameFeeder(prefix: String, numEntities: Int): Array[Map[String, String]] = {
+
+    var nameArray: ArrayBuffer[Map[String, String]] = new ArrayBuffer[Map[String, String]]
+
+    for (entityCount <- 1 to numEntities) {
+      nameArray += Map("entityName" -> prefix.concat(entityCount.toString))
+    }
+
+    return nameArray.toArray
+
+  }
+
+  def generateRandomEntityNameFeeder(prefix: String, numEntities: Int): Array[Map[String, String]] = {
+
+    var nameArray: ArrayBuffer[Map[String, String]] = new ArrayBuffer[Map[String, String]]
+
+    for (entityCount <- 1 to numEntities) {
+      nameArray += Map("entityName" -> prefix.concat(Utils.generateRandomInt(0, 100000000).toString))
+    }
+
+    return nameArray.toArray
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala
new file mode 100755
index 0000000..ffc7d96
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.settings.{Settings, Headers}
+
+ /**
+ * Performs organization registration
+ *
+ *
+ * Expects:
+ *
+ * authToken The auth token to use when creating the application
+ * orgName The organization name
+ *
+ * Produces:
+ *
+ * appName The name of the created application
+ */
+object ApplicationScenarios {
+
+  val createApplication = exec(http("Create Application")
+    .post("/management/organizations/${org}/applications")
+    .headers(Headers.jsonAuthorized)
+    .body(StringBody("{\"name\":\"" + Settings.app + "\"}"))
+    .check(status.is(200))
+
+    )
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala
new file mode 100755
index 0000000..4a5e2ae
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+
+object ConnectionScenarios {
+
+  val postConnection = exec(
+    http("POST connection")
+    .post("/users/${user1}/likes/users/${user2}")
+    .check(status.is(200))
+  )
+
+  val postUserToDeviceConnection = exec(
+    http("Connect user with device")
+    .post("/users/${username}/devices/${deviceId}")
+    .check(status.is(200))
+  )
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala
new file mode 100755
index 0000000..5737e24
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef.StringBody
+import io.gatling.http.Predef._
+import org.apache.usergrid.settings.Settings
+
+/**
+ *
+ * Creates a new device
+ *
+ * Expects:
+ *
+ * authToken The auth token to use when creating the application
+ * orgName The name of the org
+ * appName The name of the app
+ * notifierName The name of the created notifier
+ *
+ * Produces:
+ *
+ * deviceName the name of the device created
+ *
+ */
+object DeviceScenarios {
+
+  val notifier = Settings.pushNotifier
+
+  /**
+   * Create a device
+   */
+  val postDeviceWithNotifier = exec(http("Create device with notifier")
+    .post("/devices")
+    .body(StringBody("""{"deviceModel":"Fake Device",
+    "deviceOSVerion":"Negative Version",
+    """" + notifier + """.notifier.id":"${entityName}"}"""))
+    .check(status.is(200), jsonPath("$..entities[0].uuid").exists , jsonPath("$..entities[0].uuid").saveAs("deviceId")))
+
+
+  val postDeviceWithNotifier400ok = exec(http("Create device with notifier")
+    .post("/devices")
+    .body(StringBody("""{"name":"${entityName}",
+    "deviceModel":"Fake Device",
+    "deviceOSVerion":"Negative Version",
+    "${notifier}.notifier.id":"${entityName}"}"""))
+    .check(status.in(200 to 400), jsonPath("$.entities[0].uuid").saveAs("deviceId")))
+
+
+  /**
+   * Requires: entityName to feed to the device name.  If it exists, it will be created
+   */
+  val maybeCreateDevice = exec(
+    //try to do a GET on device name, if it 404's create it
+    http("Check and create device").get("/devices/${entityName}").check(status.not(404).saveAs("deviceExists")))
+    //create the device if we got a 404
+    .doIf("${deviceExists}", "404") {
+
+    exec(
+
+      http("Create device and save deviceId").post("/devices").body(StringBody(
+        """{"name":"${entityName}",
+          "deviceModel":"Fake Device",
+          "deviceOSVerion":"Negative Version",
+          "${notifier}.notifier.id":"${entityName}"}"""))
+          .check(status.is(200), jsonPath("$.entities[0].uuid").saveAs("deviceId"))
+    )
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/GeoScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/GeoScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/GeoScenarios.scala
new file mode 100755
index 0000000..2954abd
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/GeoScenarios.scala
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.settings.{Utils, Settings}
+
+ object GeoScenarios {
+
+  val getGeolocation = exec(
+      http("GET geolocated user")
+        .get("/users?ql=location%20within%20" + Settings.geosearchRadius + "%20of%20${latitude},${longitude}")
+        .check(status.is(200))
+    )
+
+  val getGeolocationWithQuery = exec(
+      http("GET geolocated user with query")
+        .get("/users?ql=${queryParams}%20AND%20location%20within%20" + Settings.geosearchRadius + "%20of%20${latitude},${longitude}")
+        .check(status.is(200))
+    )
+
+  val updateGeolocation = exec(
+    http("PUT user location")
+      .put("/users/user" + Utils.generateRandomInt(1, Settings.numUsers))
+      .body(StringBody("{\"location\":{\"latitude\":\"${latitude}\",\"longitude\":\"${longitude}\"}}"))
+      .check(status.is(200))
+  )
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
new file mode 100755
index 0000000..dad4cae
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import java.io.File
+import java.nio.file.{Paths, Files}
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+import scala.concurrent.duration._
+
+import scala.io.Source
+
+import org.apache.usergrid.settings.Settings
+
+/**
+ *
+ * Creates a new device
+ *
+ * Expects:
+ *
+ * authToken The auth token to use when creating the application
+ * orgName The name of the org
+ * appName The name of the app
+ * notifierName The name of the created notifier
+ * deviceName the name of the device created to send the notification to
+ *
+ * Produces:
+ *
+ * N/A
+ *
+ *
+ */
+object NotificationScenarios {
+
+  val notifier = Settings.pushNotifier
+
+  /**
+   * send the notification now
+   */
+  val sendNotification = exec(http("Send Single Notification")
+      .post("/devices/${entityName}/notifications")
+      .body(StringBody("{\"payloads\":{\"" + notifier + "\":\"testmessage\"}}"))
+      .check(status.is(200))
+    )
+
+  val sendNotificationToUser= exec(http("Send Notification to All Devices")
+    .post("/users/${username}/notifications")
+    .body(StringBody("{\"debug\":\"true\",\"payloads\":{\"" + notifier + "\":\"testmessage\"}}"))
+    .check(status.is(200))
+  )
+
+  /**
+   * TODO: Add posting to users, which would expect a user in the session
+   */
+
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala
new file mode 100755
index 0000000..00cdd71
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.settings.Settings
+ import scala.concurrent.duration._
+
+/**
+ *
+ * Creates a new no-op notifier
+ *
+ *
+ * Expects:
+ *
+ * authToken The auth token to use when creating the application
+ * orgName The name of the org
+ * appName The name of the app
+ *
+ * Produces:
+ *
+ * notifierName The name of the created notifier
+ *
+ */
+object NotifierScenarios {
+  
+  val notifier = Settings.pushNotifier
+  val provider = Settings.pushProvider
+
+  /**
+   * Create a notifier
+   */
+  val createNotifier = exec(
+      session => {
+        session.set("notifier", notifier)
+        session.set("provider", provider)
+      }
+    )
+
+    .exec(http("Create Notifier")
+    .post("/notifiers")
+    .body(StringBody("{\"name\":\"" + notifier + "\",\"provider\":\"" + provider + "\"}"))
+    //remnants of trying to upload an apple certificate
+//    .param("name", "${notifierName}")
+//    .param("provider", "apple")
+//    .param("environment", "mock")
+//    .fileBody("p12Certificate", Map).fileBody(pkcs12Cert)
+    .check(status.is(200)))
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala
new file mode 100755
index 0000000..f79efd6
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/OrganizationScenarios.scala
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.settings.{Settings, Headers}
+ import scala.concurrent.duration._
+
+/**
+ * Performs organization registration
+ *
+ *
+ * Produces:
+ *
+ * orgName The name of the created organization
+ * userName  The user name of the admin to log in with
+ * password The password of the admin to use
+ */
+object OrganizationScenarios {
+
+  //register the org with the randomly generated org
+  val createOrgAndAdmin = exec(http("Create Organization")
+  .post("/management/organizations")
+  .headers(Headers.jsonAnonymous)
+  .body(StringBody("{\"organization\":\"" + Settings.org + "\",\"username\":\"${entityName}\",\"name\":\"${entityName}\",\"email\":\"${entityName}@apigee.com\",\"password\":\"test\"}"))
+  .check(status.is(200)))
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/TokenScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/TokenScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/TokenScenarios.scala
new file mode 100755
index 0000000..3508d6f
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/TokenScenarios.scala
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.settings.Headers
+ import scala.concurrent.duration._
+
+/**
+ * Class that will get the token and insert it into the test session.
+ * Assumes that  the following values are present in the session.
+ *
+ * Expects:
+ *
+ * userName  The user name to log in with
+ * password The password to use
+ *
+ * Produces:
+ *
+ * authToken A valid access token if the login attempt is successful
+ */
+
+object TokenScenarios {
+
+
+  val getManagementToken =
+    exec(
+      http("POST Org Token")
+        .post("/management/token")
+        .headers(Headers.jsonAnonymous)
+        //pass in the the username and password, store the "access_token" json response element as the var "authToken" in the session
+        .body(StringBody("{\"username\":\"${username}\",\"password\":\"${password}\",\"grant_type\":\"password\"}"))
+        .check(jsonPath("access_token")
+        .saveAs("authToken"))
+    )
+
+  val getUserToken =
+    exec(
+      http("POST user token")
+        .post("/token")
+        .body(StringBody("{\"grant_type\":\"password\",\"username\":\"${user1}\",\"password\":\"password\"}"))
+        .check(status.is(200))
+    )
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
new file mode 100755
index 0000000..ff96714
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.scenarios
+
+import io.gatling.core.Predef._
+ import io.gatling.http.Predef.StringBody
+ import io.gatling.http.Predef._
+ import io.gatling.http.request.StringBody
+ import org.apache.usergrid.settings.{Settings, Utils}
+
+ object UserScenarios {
+
+  val getRandomUser = exec(
+    http("GET user")
+      .get("/users/user" + Utils.generateRandomInt(1, Settings.numEntities))
+      .check(status.is(200))
+  )
+
+  val postUser = exec(
+    http("POST geolocated Users")
+      .post("/users")
+      .body(new StringBody("""{"location":{"latitude":"${latitude}","longitude":"${longitude}"},"username":"${username}",
+      "displayName":"${displayName}","age":"${age}","seen":"${seen}","weight":"${weight}",
+      "height":"${height}","aboutMe":"${aboutMe}","profileId":"${profileId}","headline":"${headline}","
+      "showAge":"${showAge}","relationshipStatus":"${relationshipStatus}","ethnicity":"${ethnicity}","password":"password"}"""))
+      .check(status.is(200))
+  )
+
+  val postUser400ok = exec(
+    http("POST geolocated Users")
+      .post("/users")
+      .body(new StringBody("""{"location":{"latitude":"${latitude}","longitude":"${longitude}"},"username":"${username}",
+      "displayName":"${displayName}","age":"${age}","seen":"${seen}","weight":"${weight}",
+      "height":"${height}","aboutMe":"${aboutMe}","profileId":"${profileId}","headline":"${headline}",
+      "showAge":"${showAge}","relationshipStatus":"${relationshipStatus}","ethnicity":"${ethnicity}","password":"password"}"""))
+      .check(status.in(200 to 400))
+  )
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Headers.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Headers.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Headers.scala
new file mode 100755
index 0000000..9b735e5
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Headers.scala
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.settings
+
+/**
+ *
+ */
+object Headers {
+
+  /**
+   * Headers for anonymous posts
+   */
+  val jsonAnonymous = Map(
+    "Cache-Control" -> """no-cache""",
+    "Content-Type" -> """application/json; charset=UTF-8"""
+  )
+
+  /**
+   * Headers for authorized users with token and json content type
+   */
+  val jsonAuthorized = Map(
+    "Cache-Control" -> """no-cache""",
+    "Content-Type" -> """application/json; charset=UTF-8""",
+    "Authorization" -> "Bearer ${authToken}"
+  )
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Settings.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Settings.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Settings.scala
new file mode 100755
index 0000000..bb82100
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Settings.scala
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.settings
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+import scala.concurrent.duration._
+
+object Settings {
+
+  // Target settings
+  val org = System.getProperty("org")
+  val app = System.getProperty("app")
+  val baseUrl = System.getProperty("baseurl")
+  val httpConf = http.baseURL(baseUrl + "/" + org + "/" + app)
+
+  // Simulation settings
+  val numUsers:Int = Integer.getInteger("numUsers", 10).toInt
+  val numEntities:Int = Integer.getInteger("numEntities", 5000).toInt
+  val numDevices:Int = Integer.getInteger("numDevices", 2000).toInt
+
+  val rampTime:Int = Integer.getInteger("rampTime", 0).toInt // in seconds
+  val duration:Int = Integer.getInteger("duration", 300).toInt // in seconds
+  val throttle:Int = Integer.getInteger("throttle", 50).toInt // in seconds
+
+  // Geolocation settings
+  val centerLatitude:Double = 37.442348 // latitude of center point
+  val centerLongitude:Double = -122.138268 // longitude of center point
+  val userLocationRadius:Double = 32000 // location of requesting user in meters
+  val geosearchRadius:Int = 8000 // search area in meters
+
+  // Push Notification settings
+  val pushNotifier = System.getProperty("pushNotifier")
+  val pushProvider = System.getProperty("pushProvider")
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Utils.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Utils.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Utils.scala
new file mode 100755
index 0000000..8997d8c
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/settings/Utils.scala
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.settings
+
+import scala.util.Random
+import scala.math
+import Array._
+
+/**
+ *
+ * Utility for creating various data elements
+ *
+ */
+object Utils {
+
+  private val RNG = new Random
+
+  /**
+   * Generate a new uuid and replace the '-' with empty
+   */
+  def generateUUIDString(): String = {
+    return java.util.UUID.randomUUID.toString.replace("-", "")
+  }
+
+  /**
+   * Generate a unique string with a prefix
+   *
+   * @param prefix
+   * @return
+   */
+  def generateUniqueName(prefix : String): String = {
+     return prefix + generateUUIDString()
+  }
+
+  // random number in between [a...b]
+  def generateRandomInt(lowerBound: Int, upperBound: Int) = RNG.nextInt(upperBound - lowerBound) + lowerBound
+
+  def generateRandomGeolocation(radius: Double, centerLatitude: Double, centerLongitude: Double):Map[String, String] = {
+
+    var rd = radius / 111300 // Convert Radius from meters to degrees.
+    var u = RNG.nextFloat()
+    var v = RNG.nextFloat()
+    var q = math.sqrt(u) * rd
+    var w = q * rd
+    var t = 2 * math.Pi * v
+    var x = math.cos(t) * w
+    var y = math.sin(t) * w
+    var xp = x/math.cos(centerLatitude)
+    var latitude = (y + centerLatitude).toString
+    var longitude = (xp + centerLongitude).toString
+    var geolocation: Map[String, String] = Map("latitude"->latitude,"longitude"->longitude)
+
+    return geolocation
+  }
+
+  def generateRandomQueryString: String = {
+
+    val queryParams = Array("age", "height", "weight")
+    var queryString = ""
+
+    for (numParams <- 1 to generateRandomInt(1, queryParams.length)) {
+      queryString = "age=" + Utils.generateRandomInt(18, 65).toString
+      if (numParams == 2) {
+        queryString += "%20AND%20height=" + Utils.generateRandomInt(48, 84).toString
+      } else if (numParams == 3) {
+        queryString += "%20AND%20weight=" + Utils.generateRandomInt(120, 350).toString
+      }
+    }
+
+    return queryString
+  }
+
+  def createRandomPushNotifier:String = {
+    return Utils.generateUniqueName("notifier")
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala
new file mode 100644
index 0000000..7b9df21
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/GetEntitySimulation.scala
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.simulations
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.datagenerators.FeederGenerator
+ import org.apache.usergrid.scenarios.UserScenarios
+ import org.apache.usergrid.settings.Settings
+ import scala.concurrent.duration._
+
+class GetEntitySimulation extends Simulation {
+
+  // Target settings
+  val httpConf = Settings.httpConf
+
+  // Simulation settings
+  val numUsers:Int = Settings.numUsers
+  val numEntities:Int = Settings.numEntities
+  val rampTime:Int = Settings.rampTime
+  val throttle:Int = Settings.throttle
+
+  val feeder = FeederGenerator.generateEntityNameFeeder("user", numEntities).circular
+
+  val scnToRun = scenario("GET entity")
+    .exec(UserScenarios.getRandomUser)
+
+  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf)).maxDuration(Settings.duration)
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala
new file mode 100755
index 0000000..d7c6dd8
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostDevicesSimulation.scala
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.simulations
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid.datagenerators.FeederGenerator
+ import org.apache.usergrid.scenarios.DeviceScenarios
+ import org.apache.usergrid.settings.Settings
+ import scala.concurrent.duration._
+
+class PostDevicesSimulation extends Simulation {
+
+  // Target settings
+  val httpConf = Settings.httpConf
+
+  // Simulation settings
+  val numUsers:Int = Settings.numUsers
+  val numEntities:Int = Settings.numEntities
+  val rampTime:Int = Settings.rampTime
+  val throttle:Int = Settings.throttle
+
+  val feeder = FeederGenerator.generateEntityNameFeeder("device", numEntities)
+
+  val scnToRun = scenario("POST device")
+    .feed(feeder)
+    .exec(DeviceScenarios.postDeviceWithNotifier)
+
+  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala
new file mode 100755
index 0000000..cbac041
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PostUsersSimulation.scala
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.simulations
+
+import io.gatling.core.Predef._
+ import org.apache.usergrid.datagenerators.FeederGenerator
+ import org.apache.usergrid.scenarios.UserScenarios
+ import org.apache.usergrid.settings.Settings
+
+ import scala.concurrent.duration._
+
+class PostUsersSimulation extends Simulation {
+
+  // Target settings
+  val httpConf = Settings.httpConf
+
+  // Simulation settings
+  val numUsers:Int = Settings.numUsers
+  val rampTime:Int = Settings.rampTime
+  val throttle:Int = Settings.throttle
+
+  // Geolocation settings
+  val centerLatitude:Double = Settings.centerLatitude
+  val centerLongitude:Double = Settings.centerLongitude
+  val userLocationRadius:Double = Settings.userLocationRadius
+  val geosearchRadius:Int = Settings.geosearchRadius
+
+  val feeder = FeederGenerator.generateUserWithGeolocationFeeder(numUsers, userLocationRadius, centerLatitude, centerLongitude).queue
+
+  val scnToRun = scenario("POST geolocated users")
+    .feed(feeder)
+    .exec(UserScenarios.postUser)
+
+  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetDeviceSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetDeviceSimulation.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetDeviceSimulation.scala
new file mode 100755
index 0000000..731423c
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetDeviceSimulation.scala
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.simulations
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+ import org.apache.usergrid._
+ import org.apache.usergrid.datagenerators.FeederGenerator
+ import org.apache.usergrid.scenarios.{NotificationScenarios, DeviceScenarios, NotifierScenarios}
+ import org.apache.usergrid.settings.Settings
+ import scala.concurrent.duration._
+
+/**
+ *
+ * Simple test for setting up multiple orgs and creating push notifications
+ *
+ */
+class PushNotificationTargetDeviceSimulation extends Simulation {
+
+  val numUsers:Int = Settings.numUsers
+  val numEntities:Int = Settings.numEntities
+  val rampTime:Int = Settings.rampTime
+  val throttle:Int = Settings.throttle
+  val duration:Int = Settings.duration  
+  val httpConf = Settings.httpConf
+    .acceptHeader("application/json")
+
+  val createNotifier = NotifierScenarios.createNotifier
+  val createDevice = DeviceScenarios.postDeviceWithNotifier
+  val sendNotification = NotificationScenarios.sendNotification
+
+  val deviceNameFeeder = FeederGenerator.generateEntityNameFeeder("device", numEntities).circular
+
+  val scnToRun = scenario("Create Push Notification")    
+    .during(duration.seconds) {
+      feed(deviceNameFeeder)
+      .exec(sendNotification)
+    }
+
+
+  setUp(scnToRun.inject(atOnceUsers(numUsers)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
new file mode 100644
index 0000000..9391160
--- /dev/null
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.usergrid.simulations
+
+import io.gatling.core.Predef._
+import io.gatling.http.Predef._
+import org.apache.usergrid.settings.Utils
+import org.apache.usergrid.datagenerators.FeederGenerator
+import org.apache.usergrid.scenarios._
+import org.apache.usergrid.settings.Settings
+import scala.concurrent.duration._
+
+class PushNotificationTargetUserSimulation extends Simulation {
+
+  val duration:Int = Settings.duration
+  val numUsersPerSecond:Int = Settings.numUsers
+  val numEntities:Int = numUsersPerSecond * 3 * duration
+  val rampTime:Int = Settings.rampTime
+  val throttle:Int = Settings.throttle
+
+  val httpConf = Settings.httpConf.acceptHeader("application/json")
+  val notifier = Settings.pushNotifier
+
+  val createNotifier = NotifierScenarios.createNotifier
+  val createDevice = DeviceScenarios.postDeviceWithNotifier
+  val sendNotification = NotificationScenarios.sendNotificationToUser
+  val createUser = UserScenarios.postUser400ok
+  val createOrg = OrganizationScenarios.createOrgAndAdmin
+  val connectUserToDevice = ConnectionScenarios.postUserToDeviceConnection
+
+  val deviceNameFeeder = FeederGenerator.generateEntityNameFeeder("device", numEntities)
+  val userFeeder = FeederGenerator.generateUserWithGeolocationFeeder(numUsersPerSecond * duration, Settings.userLocationRadius, Settings.centerLatitude, Settings.centerLongitude)
+  val orgFeeder = FeederGenerator.generateRandomEntityNameFeeder("org", 1)
+
+  val scnCreateOrg = scenario("Create org")
+    .feed(orgFeeder)
+    .exec(createOrg)
+
+  val scnCreateNotifier = scenario("Create notifier")
+    .exec(createNotifier)
+
+  val scnToRun = scenario("Create Push Notification")
+    .feed(userFeeder)
+    .exec(createUser)
+    .repeat(2){
+      feed(deviceNameFeeder)
+      .exec(createDevice)
+      .exec(connectUserToDevice)
+    }
+    .exec(sendNotification)
+
+
+
+  setUp(scnCreateOrg.inject(atOnceUsers(1)).protocols(http.baseURL(Settings.baseUrl)),
+    scnCreateNotifier.inject(nothingFor(5), atOnceUsers(1)).protocols(httpConf),
+    scnToRun.inject(nothingFor(7), constantUsersPerSec(numUsersPerSecond) during (duration)).throttle(reachRps(throttle) in (rampTime.seconds)).protocols(httpConf))
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/main/scripts/gatling-ug.sh
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scripts/gatling-ug.sh b/stack/loadtests/src/main/scripts/gatling-ug.sh
new file mode 100755
index 0000000..4c6bc0c
--- /dev/null
+++ b/stack/loadtests/src/main/scripts/gatling-ug.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#               http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+die () {
+    echo >&2 "$@"
+    exit 1
+}
+
+[ "$#" -eq 5 ] || die "5 arguments required, $# provided.  Arguments are URL ORG APP NUM_USERS RAMP_TIME"
+
+OLDDIR=`pwd`
+BIN_DIR=`dirname $0`
+cd "${BIN_DIR}/.." && DEFAULT_GATLING_HOME=`pwd` && cd "${OLDDIR}"
+
+GATLING_HOME="${GATLING_HOME:=${DEFAULT_GATLING_HOME}}"
+GATLING_CONF="${GATLING_CONF:=$GATLING_HOME/conf}"
+URL="$1"
+ORG="$2"
+APP="$3"
+USERS="$4"
+RAMP="$5"
+
+#Shift off our first operation
+shift 5
+
+export GATLING_HOME GATLING_CONF
+
+echo "GATLING_HOME is set to ${GATLING_HOME}"
+
+curl -X POST "${URL}/usergrid/sandbox/notifiers" -d '{"name":"notifier82e05787a8c24361a2992c64436b6e6a","provider":"noop"}'
+
+#Add -Ds=<simulation class name>
+
+JAVA_OPTS="-Dthrottle=3000 -Dduration=300 -Dorg=${ORG} -Dbaseurl=${URL} -Dnotifier=notifier82e05787a8c24361a2992c64436b6e6a -DnumEntities=10000 -DnumUsers=${USERS} -DrampTime=${RAMP} -Dapp=${APP} -server -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -Xms512M -Xmx512M -Xmn100M -XX:+HeapDumpOnOutOfMemoryError -XX:+AggressiveOpts -XX:+OptimizeStringConcat -XX:+UseFastAccessorMethods -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -Djava.net.preferIPv4Stack=true -Djava.net.preferIPv6Addresses=false ${JAVA_OPTS}"
+
+echo $JAVA_OPTS
+
+CLASSPATH="$GATLING_HOME/lib/*:$GATLING_CONF:$GATLING_HOME/user-files:${JAVA_CLASSPATH}"
+
+java $JAVA_OPTS -cp "$CLASSPATH" io.gatling.app.Gatling "$@"

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/test/resources/gatling.conf
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/resources/gatling.conf b/stack/loadtests/src/test/resources/gatling.conf
new file mode 100644
index 0000000..1455242
--- /dev/null
+++ b/stack/loadtests/src/test/resources/gatling.conf
@@ -0,0 +1,154 @@
+#########################
+# Gatling Configuration #
+#########################
+
+# This file contains all the settings configurable for Gatling with their default values
+
+gatling {
+  core {
+    #outputDirectoryBaseName = "" # The prefix for each simulation result folder (then suffixed by the report generation timestamp)
+    #runDescription = ""          # The description for this simulation run, displayed in each report
+    #encoding = "utf-8"           # Encoding to use throughout Gatling for file and string manipulation
+    #simulationClass = ""         # The FQCN of the simulation to run (when used in conjunction with noReports, the simulation for which assertions will be validated)
+    #disableCompiler = false      # When set to true, skip compiling and load an already compiled simulation (used in conjunction with simulationClass)
+    #mute = false                 # When set to true, don't ask for simulation name nor run description (currently only used by Gatling SBT plugin)
+
+    extract {
+      regex {
+        #cacheMaxCapacity = 200 # Cache size for the compiled regexes, set to 0 to disable caching
+      }
+      xpath {
+        #cacheMaxCapacity = 200 # Cache size for the compiled XPath queries,  set to 0 to disable caching
+      }
+      jsonPath {
+        #cacheMaxCapacity = 200 # Cache size for the compiled jsonPath queries, set to 0 to disable caching
+        #preferJackson = false  # When set to true, prefer Jackson over Boon for JSON-related operations
+        jackson {
+          #allowComments = false           # Allow comments in JSON files
+          #allowUnquotedFieldNames = false # Allow unquoted JSON fields names
+          #allowSingleQuotes = false       # Allow single quoted JSON field names
+        }
+
+      }
+      css {
+        #cacheMaxCapacity = 200 # Cache size for the compiled CSS selectors queries,  set to 0 to disable caching
+      }
+    }
+
+    timeOut {
+      #simulation = 8640000 # Absolute timeout, in seconds, of a simulation
+    }
+    directory {
+      #data = user-files/data                    # Folder where user's data (e.g. files used by Feeders) is located
+      #requestBodies = user-files/request-bodies # Folder where request bodies are located
+      #simulations = user-files/simulations      # Folder where the bundle's simulations are located
+      simulations = src/main/scala              # Folder where the bundle's simulations are located
+      #reportsOnly = ""                          # If set, name of report folder to look for in order to generate its report
+      #binaries = ""                             # If set, name of the folder where compiles classes are located
+      #results = results                         # Name of the folder where all reports folder are located
+    }
+    zinc {
+      #jvmArgs = "-Xss10M" # JVM args passed to Zinc (in charge of compiling Gatling Simulations)
+    }
+  }
+  charting {
+    #noReports = false       # When set to true, don't generate HTML reports
+    #maxPlotPerSeries = 1000 # Number of points per graph in Gatling reports
+    #accuracy = 10           # Accuracy, in milliseconds, of the report's stats
+    indicators {
+      #lowerBound = 800      # Lower bound for the requests' response time to track in the reports and the console summary
+      #higherBound = 1200    # Higher bound for the requests' response time to track in the reports and the console summary
+      #percentile1 = 95      # Value for the first percentile to track in the reports, the console summary and GraphiteDataWriter
+      #percentile2 = 99      # Value for the second percentile to track in the reports, the console summary and GraphiteDataWriter
+    }
+  }
+  http {
+    #elFileBodiesCacheMaxCapacity = 200        # Cache size for request body EL templates, set to 0 to disable
+    #rawFileBodiesCacheMaxCapacity = 200       # Cache size for request body Raw templates, set to 0 to disable
+    #fetchedCssCacheMaxCapacity = 200          # Cache size for CSS parsed content, set to 0 to disable
+    #fetchedHtmlCacheMaxCapacity = 200         # Cache size for HTML parsed content, set to 0 to disable
+    #redirectPerUserCacheMaxCapacity = 200     # Per virtual user cache size for permanent redirects, set to 0 to disable
+    #expirePerUserCacheMaxCapacity = 200       # Per virtual user cache size for permanent 'Expire' headers, set to 0 to disable
+    #lastModifiedPerUserCacheMaxCapacity = 200 # Per virtual user cache size for permanent 'Last-Modified' headers, set to 0 to disable
+    #etagPerUserCacheMaxCapacity = 200         # Per virtual user cache size for permanent ETag headers, set to 0 to disable
+    #warmUpUrl = "http://goo.gl/pq1Xwu"        # The URL to use to warm-up the HTTP stack (blank means disabled)
+    ssl {
+      trustStore {
+        #type = ""      # Type of SSLContext's TrustManagers store
+        #file = ""      # Location of SSLContext's TrustManagers store
+        #password = ""  # Password for SSLContext's TrustManagers store
+        #algorithm = "" # Algorithm used by SSLContext's TrustManagers store
+      }
+      keyStore {
+        #type = ""      # Type of SSLContext's KeyManagers store
+        #file = ""      # Location of SSLContext's KeyManagers store
+        #password = ""  # Password for SSLContext's KeyManagers store
+        #algorithm = "" # Algorithm used SSLContext's KeyManagers store
+      }
+    }
+    ahc {
+      #allowPoolingConnections = true             # Allow pooling HTTP connections (keep-alive header automatically added)
+      #allowPoolingSslConnections = true          # Allow pooling HTTPS connections (keep-alive header automatically added)
+      #compressionEnforced = false                # Enforce gzip/deflate when Accept-Encoding header is not defined
+      #connectTimeout = 60000                     # Timeout when establishing a connection
+      #pooledConnectionIdleTimeout = 60000        # Timeout when a connection stays unused in the pool
+      #readTimeout = 60000                        # Timeout when a used connection stays idle
+      #connectionTTL = -1                         # Max duration a connection can stay open (-1 means no limit)
+      #ioThreadMultiplier = 2                     # Number of Netty worker threads per core
+      #maxConnectionsPerHost = -1                 # Max number of connections per host (-1 means no limit)
+      #maxConnections = -1                        # Max number of connections (-1 means no limit)
+      #maxRetry = 0                               # Number of times that a request should be tried again
+      #requestTimeout = 60000                     # Timeout of the requests
+      #useProxyProperties = false                 # When set to true, supports standard Proxy System properties
+      #webSocketTimeout = 60000                   # Timeout when a used websocket connection stays idle
+      #useRelativeURIsWithConnectProxies = true   # When set to true, use relative URIs when talking with an SSL proxy or a WebSocket proxy
+      #acceptAnyCertificate = true                # When set to true, doesn't validate SSL certificates
+      #httpClientCodecMaxInitialLineLength = 4096 # Maximum length of the initial line of the response (e.g. "HTTP/1.0 200 OK")
+      #httpClientCodecMaxHeaderSize = 8192        # Maximum size, in bytes, of each request's headers
+      #httpClientCodecMaxChunkSize = 8192         # Maximum length of the content or each chunk
+      #keepEncodingHeader = true                  # Don't drop Encoding response header after decoding
+      #webSocketMaxFrameSize = 10240              # Maximum frame payload size
+    }
+  }
+  data {
+    #writers = "console, file" # The lists of DataWriters to which Gatling write simulation data (currently supported : "console", "file", "graphite", "jdbc")
+    #reader = file             # The DataReader used by the charting engine for reading simulation results
+    console {
+      #light = false           # When set to true, displays a light version without detailed request stats
+    }
+    file {
+      #bufferSize = 8192       # FileDataWriter's internal data buffer size, in bytes
+    }
+    leak {
+      #noActivityTimeout = 30  # Period, in seconds, for which Gatling may have no activity before considering a leak may be happening
+    }
+    jdbc {
+      db {
+        #url = "jdbc:mysql://localhost:3306/temp" # The JDBC URL used by the JDBC DataWriter
+        #username = "root"                        # The database user used by the JDBC DataWriter
+        #password = "123123q"                     # The password for the specified user
+      }
+      #bufferSize = 20                            # The size for each batch of SQL inserts to send to the database
+      create {
+        #createRunRecordTable = "CREATE TABLE IF NOT EXISTS `RunRecords` ( `id` INT NOT NULL AUTO_INCREMENT , `runDate` DATETIME NULL , `simulationId` VARCHAR(45) NULL , `runDescription` VARCHAR(45) NULL , PRIMARY KEY (`id`) )"
+        #createRequestRecordTable = "CREATE TABLE IF NOT EXISTS `RequestRecords` (`id` int(11) NOT NULL AUTO_INCREMENT, `runId` int DEFAULT NULL, `scenario` varchar(45) DEFAULT NULL, `userId` VARCHAR(20) NULL, `name` varchar(50) DEFAULT NULL, `requestStartDate` bigint DEFAULT NULL, `requestEndDate` bigint DEFAULT NULL, `responseStartDate` bigint DEFAULT NULL, `responseEndDate` bigint DEFAULT NULL, `status` varchar(2) DEFAULT NULL, `message` varchar(4500) DEFAULT NULL, `responseTime` bigint DEFAULT NULL, PRIMARY KEY (`id`) )"
+        #createScenarioRecordTable = "CREATE TABLE IF NOT EXISTS `ScenarioRecords` (`id` int(11) NOT NULL AUTO_INCREMENT, `runId` int DEFAULT NULL, `scenarioName` varchar(45) DEFAULT NULL, `userId` VARCHAR(20) NULL, `event` varchar(50) DEFAULT NULL, `startDate` bigint DEFAULT NULL, `endDate` bigint DEFAULT NULL, PRIMARY KEY (`id`) )"
+        #createGroupRecordTable = "CREATE TABLE IF NOT EXISTS `GroupRecords` (`id` int(11) NOT NULL AUTO_INCREMENT, `runId` int DEFAULT NULL, `scenarioName` varchar(45) DEFAULT NULL, `userId` VARCHAR(20) NULL, `entryDate` bigint DEFAULT NULL, `exitDate` bigint DEFAULT NULL, `status` varchar(2) DEFAULT NULL, PRIMARY KEY (`id`) )"
+      }
+      insert {
+        #insertRunRecord = "INSERT INTO RunRecords (runDate, simulationId, runDescription) VALUES (?,?,?)"
+        #insertRequestRecord = "INSERT INTO RequestRecords (runId, scenario, userId, name, requestStartDate, requestEndDate, responseStartDate, responseEndDate, status, message, responseTime) VALUES (?,?,?,?,?,?,?,?,?,?,?)"
+        #insertScenarioRecord = "INSERT INTO ScenarioRecords (runId, scenarioName, userId, event, startDate, endDate) VALUES (?,?,?,?,?,?)"
+        #insertGroupRecord = "INSERT INTO GroupRecords (runId, scenarioName, userId, entryDate, exitDate, status) VALUES (?,?,?,?,?,?)"
+      }
+    }
+    graphite {
+      #light = false              # only send the all* stats
+      #host = "localhost"         # The host where the Carbon server is located
+      #port = 2003                # The port to which the Carbon server listens to
+      #protocol = "tcp"           # The protocol used to send data to Carbon (currently supported : "tcp", "udp")
+      #rootPathPrefix = "gatling" # The common prefix of all metrics sent to Graphite
+      #bufferSize = 8192          # GraphiteDataWriter's internal data buffer size, in bytes
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/test/resources/logback.xml
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/resources/logback.xml b/stack/loadtests/src/test/resources/logback.xml
new file mode 100644
index 0000000..f112f98
--- /dev/null
+++ b/stack/loadtests/src/test/resources/logback.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+
+	<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
+		<encoder>
+			<pattern>%d{HH:mm:ss.SSS} [%-5level] %logger{15} - %msg%n%rEx</pattern>
+			<immediateFlush>false</immediateFlush>
+		</encoder>
+	</appender>
+
+	<!-- Uncomment for logging ALL HTTP request and responses -->
+	 	<logger name="io.gatling.http.ahc.AsyncHandlerActor" level="TRACE" />
+	<!-- Uncomment for logging ONLY FAILED HTTP request and responses -->
+	 	<!--<logger name="io.gatling.http.ahc.AsyncHandlerActor" level="DEBUG" />-->
+
+	<root level="WARN">
+		<appender-ref ref="CONSOLE" />
+	</root>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/test/resources/recorder.conf
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/resources/recorder.conf b/stack/loadtests/src/test/resources/recorder.conf
new file mode 100644
index 0000000..6c2366e
--- /dev/null
+++ b/stack/loadtests/src/test/resources/recorder.conf
@@ -0,0 +1,37 @@
+recorder {
+  core {
+    #encoding = "utf-8"               # The encoding used for reading/writing request bodies and the generated simulation
+    #outputFolder = ""                # The folder where generated simulation will we written
+    #package = ""                     # The package's name of the generated simulation
+    #className = "RecordedSimulation" # The name of the generated Simulation class
+    #thresholdForPauseCreation = 100  # The minimum time, in milliseconds, that must pass between requests to trigger a pause creation
+    #saveConfig = false               # When set to true, the configuration from the Recorder GUI overwrites this configuration
+  }
+  filters {
+    #filterStrategy = "Disabled" # The selected filter resources filter strategy (currently supported : "Disabled", "BlackList", "WhiteList")
+    #whitelist = []              # The list of ressources patterns that are part of the Recorder's whitelist
+    #blacklist = []              # The list of ressources patterns that are part of the Recorder's blacklist
+  }
+  http {
+    #automaticReferer = true       # When set to false, write the referer + enable 'disableAutoReferer' in the generated simulation
+    #followRedirect = true         # When set to false, write redirect requests + enable 'disableFollowRedirect' in the generated simulation
+    #removeConditionalCache = true # When set to true, removes from the generated requests headers leading to request caching
+    #inferHtmlResources = true     # When set to true, add inferred resources + set 'inferHtmlResources' with the configured blacklist/whitelist in the generated simulation
+  }
+  proxy {
+    #port = 8000     # Local port used by Gatling's Proxy for HTTP/HTTPS
+    outgoing {
+      #host = ""     # The outgoing proxy's hostname
+      #username = "" # The username to use to connect to the outgoing proxy
+      #password = "" # The password corresponding to the user to use to connect to the outgoing proxy
+      #port = 0      # The HTTP port to use to connect to the outgoing proxy
+      #sslPort = 0   # If set, The HTTPS port to use to connect to the outgoing proxy
+    }
+  }
+  netty {
+    #maxInitialLineLength = 10000 # Maximum length of the initial line of the response (e.g. "HTTP/1.0 200 OK")
+    #maxHeaderSize = 20000        # Maximum size, in bytes, of each request's headers
+    #maxChunkSize = 8192          # Maximum length of the content or each chunk
+    #maxContentLength = 100000000 # Maximum length of the aggregated content of each response
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/test/scala/Engine.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/scala/Engine.scala b/stack/loadtests/src/test/scala/Engine.scala
new file mode 100644
index 0000000..b6dfa44
--- /dev/null
+++ b/stack/loadtests/src/test/scala/Engine.scala
@@ -0,0 +1,16 @@
+import io.gatling.app.Gatling
+import io.gatling.core.config.GatlingPropertiesBuilder
+
+object Engine extends App {
+
+	val props = new GatlingPropertiesBuilder
+	props.disableCompiler
+	props.dataDirectory(IDEPathHelper.dataDirectory.toString)
+	props.resultsDirectory(IDEPathHelper.resultsDirectory.toString)
+	props.requestBodiesDirectory(IDEPathHelper.requestBodiesDirectory.toString)
+	props.binariesDirectory(IDEPathHelper.mavenBinariesDirectory.toString)
+
+  props.simulationClass("org.apache.usergrid.simulations.PushNotificationTargetUserSimulation")
+
+	Gatling.fromMap(props.build)
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/test/scala/IDEPathHelper.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/scala/IDEPathHelper.scala b/stack/loadtests/src/test/scala/IDEPathHelper.scala
new file mode 100644
index 0000000..988b616
--- /dev/null
+++ b/stack/loadtests/src/test/scala/IDEPathHelper.scala
@@ -0,0 +1,21 @@
+import scala.tools.nsc.io.File
+import scala.tools.nsc.io.Path.string2path
+
+object IDEPathHelper {
+
+	val gatlingConfUrl = getClass.getClassLoader.getResource("gatling.conf").getPath
+	val projectRootDir = File(gatlingConfUrl).parents(2)
+
+	val mavenSourcesDirectory = projectRootDir / "src" / "test" / "scala"
+	val mavenResourcesDirectory = projectRootDir / "src" / "test" / "resources"
+	val mavenTargetDirectory = projectRootDir / "target"
+	val mavenBinariesDirectory = mavenTargetDirectory / "test-classes"
+
+	val dataDirectory = mavenResourcesDirectory / "data"
+	val requestBodiesDirectory = mavenResourcesDirectory / "request-bodies"
+
+	val recorderOutputDirectory = mavenSourcesDirectory
+	val resultsDirectory = mavenTargetDirectory / "results"
+
+	val recorderConfigFile = (mavenResourcesDirectory / "recorder.conf").toFile
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/6149bf13/stack/loadtests/src/test/scala/Recorder.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/scala/Recorder.scala b/stack/loadtests/src/test/scala/Recorder.scala
new file mode 100644
index 0000000..b132063
--- /dev/null
+++ b/stack/loadtests/src/test/scala/Recorder.scala
@@ -0,0 +1,12 @@
+import io.gatling.recorder.config.RecorderPropertiesBuilder
+import io.gatling.recorder.controller.RecorderController
+
+object Recorder extends App {
+
+	val props = new RecorderPropertiesBuilder
+	props.simulationOutputFolder(IDEPathHelper.recorderOutputDirectory.toString)
+	props.simulationPackage("org.apache.usergrid")
+	props.requestBodiesFolder(IDEPathHelper.requestBodiesDirectory.toString)
+
+	RecorderController(props.build, Some(IDEPathHelper.recorderConfigFile))
+}
\ No newline at end of file


[07/12] git commit: Added missing Apache license to load test files

Posted by sn...@apache.org.
Added missing Apache license to load test files


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/34a6d12e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/34a6d12e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/34a6d12e

Branch: refs/heads/two-dot-o-events
Commit: 34a6d12e90bc8e47eab73a52f347af314ed909db
Parents: d5ead33
Author: amuramoto <am...@apigee.com>
Authored: Mon Oct 27 15:59:07 2014 -0700
Committer: amuramoto <am...@apigee.com>
Committed: Mon Oct 27 15:59:07 2014 -0700

----------------------------------------------------------------------
 stack/loadtests/pom.xml                            | 16 ++++++++++++++++
 stack/loadtests/src/test/scala/Engine.scala        | 16 ++++++++++++++++
 stack/loadtests/src/test/scala/IDEPathHelper.scala | 16 ++++++++++++++++
 stack/loadtests/src/test/scala/Recorder.scala      | 16 ++++++++++++++++
 4 files changed, 64 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/34a6d12e/stack/loadtests/pom.xml
----------------------------------------------------------------------
diff --git a/stack/loadtests/pom.xml b/stack/loadtests/pom.xml
index 0a2f4de..53cf8bb 100644
--- a/stack/loadtests/pom.xml
+++ b/stack/loadtests/pom.xml
@@ -1,4 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        See the License for the specific language governing permissions and
+        limitations under the License.
+-->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
 	<modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/34a6d12e/stack/loadtests/src/test/scala/Engine.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/scala/Engine.scala b/stack/loadtests/src/test/scala/Engine.scala
index b6dfa44..00f1004 100644
--- a/stack/loadtests/src/test/scala/Engine.scala
+++ b/stack/loadtests/src/test/scala/Engine.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 import io.gatling.app.Gatling
 import io.gatling.core.config.GatlingPropertiesBuilder
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/34a6d12e/stack/loadtests/src/test/scala/IDEPathHelper.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/scala/IDEPathHelper.scala b/stack/loadtests/src/test/scala/IDEPathHelper.scala
index 988b616..d68ee4d 100644
--- a/stack/loadtests/src/test/scala/IDEPathHelper.scala
+++ b/stack/loadtests/src/test/scala/IDEPathHelper.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 import scala.tools.nsc.io.File
 import scala.tools.nsc.io.Path.string2path
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/34a6d12e/stack/loadtests/src/test/scala/Recorder.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/scala/Recorder.scala b/stack/loadtests/src/test/scala/Recorder.scala
index b132063..266480c 100644
--- a/stack/loadtests/src/test/scala/Recorder.scala
+++ b/stack/loadtests/src/test/scala/Recorder.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 import io.gatling.recorder.config.RecorderPropertiesBuilder
 import io.gatling.recorder.controller.RecorderController
 


[05/12] git commit: changed push load test to use user uuid instead of username; improved error handling for when user already exists; updated loadtest pom to call pushNotificationTargetUserSimulation instead of PushNotificationTargetDeviceSimulation

Posted by sn...@apache.org.
changed push load test to use user uuid instead of username; improved error handling for when user already exists; updated loadtest pom to call pushNotificationTargetUserSimulation instead of PushNotificationTargetDeviceSimulation


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/0eda724f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/0eda724f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/0eda724f

Branch: refs/heads/two-dot-o-events
Commit: 0eda724ffd61c511f7de72317baaa1ad6911ca14
Parents: 6149bf1
Author: amuramoto <am...@apigee.com>
Authored: Mon Oct 27 15:49:31 2014 -0700
Committer: amuramoto <am...@apigee.com>
Committed: Mon Oct 27 15:49:31 2014 -0700

----------------------------------------------------------------------
 stack/loadtests/pom.xml                         |  2 +-
 .../scenarios/NotificationScenarios.scala       |  2 +-
 .../usergrid/scenarios/UserScenarios.scala      | 23 +++++++++-----------
 .../PushNotificationTargetUserSimulation.scala  |  2 +-
 4 files changed, 13 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/0eda724f/stack/loadtests/pom.xml
----------------------------------------------------------------------
diff --git a/stack/loadtests/pom.xml b/stack/loadtests/pom.xml
index 0fa6272..0a2f4de 100644
--- a/stack/loadtests/pom.xml
+++ b/stack/loadtests/pom.xml
@@ -131,7 +131,7 @@
 				<artifactId>gatling-maven-plugin</artifactId>
         <configuration>
           <simulationsFolder>src/main/scala</simulationsFolder>
-          <simulationClass>org.apache.usergrid.simulations.PushNotificationTargetDeviceSimulation</simulationClass>
+          <simulationClass>org.apache.usergrid.simulations.PushNotificationTargetUserSimulation</simulationClass>
         </configuration>
 
 			</plugin>

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/0eda724f/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
index dad4cae..ab4d813 100755
--- a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
@@ -59,7 +59,7 @@ object NotificationScenarios {
     )
 
   val sendNotificationToUser= exec(http("Send Notification to All Devices")
-    .post("/users/${username}/notifications")
+    .post("/users/${userId}/notifications")
     .body(StringBody("{\"debug\":\"true\",\"payloads\":{\"" + notifier + "\":\"testmessage\"}}"))
     .check(status.is(200))
   )

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/0eda724f/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
index ff96714..a2fa227 100755
--- a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/UserScenarios.scala
@@ -30,24 +30,21 @@ import io.gatling.core.Predef._
       .check(status.is(200))
   )
 
-  val postUser = exec(
-    http("POST geolocated Users")
-      .post("/users")
-      .body(new StringBody("""{"location":{"latitude":"${latitude}","longitude":"${longitude}"},"username":"${username}",
-      "displayName":"${displayName}","age":"${age}","seen":"${seen}","weight":"${weight}",
-      "height":"${height}","aboutMe":"${aboutMe}","profileId":"${profileId}","headline":"${headline}","
-      "showAge":"${showAge}","relationshipStatus":"${relationshipStatus}","ethnicity":"${ethnicity}","password":"password"}"""))
-      .check(status.is(200))
-  )
+   val getUserByUsername = exec(
+     http("GET user")
+       .get("/users/${username}")
+       .check(status.is(200), jsonPath("$..entities[0].uuid").saveAs("userId"))
+   )
 
-  val postUser400ok = exec(
+  val postUser = exec(
     http("POST geolocated Users")
       .post("/users")
       .body(new StringBody("""{"location":{"latitude":"${latitude}","longitude":"${longitude}"},"username":"${username}",
       "displayName":"${displayName}","age":"${age}","seen":"${seen}","weight":"${weight}",
       "height":"${height}","aboutMe":"${aboutMe}","profileId":"${profileId}","headline":"${headline}",
       "showAge":"${showAge}","relationshipStatus":"${relationshipStatus}","ethnicity":"${ethnicity}","password":"password"}"""))
-      .check(status.in(200 to 400))
-  )
-
+      .check(status.is(200), status.saveAs("userStatus"), jsonPath("$..entities[0].uuid").saveAs("userId")))
+    .doIf ("${userStatus}", "400") {
+      exec(getUserByUsername)
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/0eda724f/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
index 9391160..af95f8b 100644
--- a/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/simulations/PushNotificationTargetUserSimulation.scala
@@ -38,7 +38,7 @@ class PushNotificationTargetUserSimulation extends Simulation {
   val createNotifier = NotifierScenarios.createNotifier
   val createDevice = DeviceScenarios.postDeviceWithNotifier
   val sendNotification = NotificationScenarios.sendNotificationToUser
-  val createUser = UserScenarios.postUser400ok
+  val createUser = UserScenarios.postUser
   val createOrg = OrganizationScenarios.createOrgAndAdmin
   val connectUserToDevice = ConnectionScenarios.postUserToDeviceConnection
 


[11/12] git commit: Backing out a test breaking change inadvertently made during formatting.

Posted by sn...@apache.org.
Backing out a test breaking change inadvertently made during formatting.


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/744e5a81
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/744e5a81
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/744e5a81

Branch: refs/heads/two-dot-o-events
Commit: 744e5a814205b07522748d46f73291bd90aba0c0
Parents: 09d4ba4
Author: Dave Johnson <dm...@apigee.com>
Authored: Tue Oct 28 13:55:07 2014 -0400
Committer: Dave Johnson <dm...@apigee.com>
Committed: Tue Oct 28 13:55:07 2014 -0400

----------------------------------------------------------------------
 .../org/apache/usergrid/corepersistence/CpRelationManager.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/744e5a81/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
index 180d08c..d0da90f 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
@@ -1504,7 +1504,7 @@ public class CpRelationManager implements RelationManager {
         if ( query.getSortPredicates().isEmpty() ) {
 
             Query.SortPredicate asc = 
-                new Query.SortPredicate( PROPERTY_CREATED, Query.SortDirection.DESCENDING);
+                new Query.SortPredicate( PROPERTY_CREATED, Query.SortDirection.ASCENDING);
 
             query.addSort( asc );
         }


[12/12] git commit: Merge branch 'two-dot-o' into two-dot-o-events

Posted by sn...@apache.org.
Merge branch 'two-dot-o' into two-dot-o-events


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/1f0e6e43
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/1f0e6e43
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/1f0e6e43

Branch: refs/heads/two-dot-o-events
Commit: 1f0e6e439a4ab5f335b70481bea8fa734742c033
Parents: e837a0c 744e5a8
Author: Dave Johnson <dm...@apigee.com>
Authored: Tue Oct 28 13:56:28 2014 -0400
Committer: Dave Johnson <dm...@apigee.com>
Committed: Tue Oct 28 13:56:28 2014 -0400

----------------------------------------------------------------------
 .../corepersistence/CpRelationManager.java      | 937 +++++++++++--------
 stack/loadtests/README.md                       |  52 -
 stack/loadtests/gatling/LICENSE                 | 202 ----
 stack/loadtests/gatling/conf/application.conf   |  21 -
 stack/loadtests/gatling/conf/gatling.conf       | 162 ----
 stack/loadtests/gatling/conf/logback.xml        |  35 -
 stack/loadtests/gatling/conf/recorder.conf      |  51 -
 .../gatling/lib/Saxon-HE-9.5.1-6-compressed.jar | Bin 3813075 -> 0 bytes
 .../gatling/lib/akka-actor_2.10-2.3.6.jar       | Bin 2583959 -> 0 bytes
 .../lib/async-http-client-1.9.0-BETA13.jar      | Bin 579954 -> 0 bytes
 stack/loadtests/gatling/lib/boon-0.26.jar       | Bin 1026950 -> 0 bytes
 .../loadtests/gatling/lib/commons-pool-1.6.jar  | Bin 111119 -> 0 bytes
 .../lib/compiler-interface-0.13.5-sources.jar   | Bin 30056 -> 0 bytes
 .../lib/concurrentlinkedhashmap-lru-1.4.jar     | Bin 116575 -> 0 bytes
 stack/loadtests/gatling/lib/config-1.2.1.jar    | Bin 219554 -> 0 bytes
 .../gatling/lib/fastring_2.10-0.2.4.jar         | Bin 98640 -> 0 bytes
 .../gatling/lib/gatling-app-2.0.0-RC5.jar       | Bin 73052 -> 0 bytes
 .../gatling/lib/gatling-charts-2.0.0-RC5.jar    | Bin 500609 -> 0 bytes
 .../lib/gatling-charts-highcharts-2.0.0-RC5.jar | Bin 214683 -> 0 bytes
 .../gatling/lib/gatling-core-2.0.0-RC5.jar      | Bin 1678475 -> 0 bytes
 .../gatling/lib/gatling-http-2.0.0-RC5.jar      | Bin 1222752 -> 0 bytes
 .../gatling/lib/gatling-jdbc-2.0.0-RC5.jar      | Bin 41648 -> 0 bytes
 .../gatling/lib/gatling-jms-2.0.0-RC5.jar       | Bin 174279 -> 0 bytes
 .../gatling/lib/gatling-metrics-2.0.0-RC5.jar   | Bin 72446 -> 0 bytes
 .../gatling/lib/gatling-recorder-2.0.0-RC5.jar  | Bin 815471 -> 0 bytes
 .../gatling/lib/gatling-redis-2.0.0-RC5.jar     | Bin 19970 -> 0 bytes
 .../gatling/lib/geronimo-jms_1.1_spec-1.1.1.jar | Bin 32359 -> 0 bytes
 .../gatling/lib/incremental-compiler-0.13.5.jar | Bin 2214694 -> 0 bytes
 .../gatling/lib/jackson-annotations-2.4.0.jar   | Bin 38605 -> 0 bytes
 .../gatling/lib/jackson-core-2.4.2.jar          | Bin 225316 -> 0 bytes
 .../gatling/lib/jackson-databind-2.4.2.jar      | Bin 1075759 -> 0 bytes
 stack/loadtests/gatling/lib/jodd-core-3.6.jar   | Bin 373882 -> 0 bytes
 .../loadtests/gatling/lib/jodd-lagarto-3.6.jar  | Bin 204738 -> 0 bytes
 stack/loadtests/gatling/lib/jodd-log-3.6.jar    | Bin 14547 -> 0 bytes
 .../gatling/lib/jsonpath_2.10-0.5.0.jar         | Bin 180090 -> 0 bytes
 stack/loadtests/gatling/lib/jzlib-1.1.3.jar     | Bin 71976 -> 0 bytes
 .../gatling/lib/logback-classic-1.1.2.jar       | Bin 270750 -> 0 bytes
 .../gatling/lib/logback-core-1.1.2.jar          | Bin 427729 -> 0 bytes
 .../loadtests/gatling/lib/netty-3.9.4.Final.jar | Bin 1310154 -> 0 bytes
 stack/loadtests/gatling/lib/opencsv-2.3.jar     | Bin 19827 -> 0 bytes
 .../gatling/lib/redisclient_2.10-2.13.jar       | Bin 712616 -> 0 bytes
 .../gatling/lib/sbt-interface-0.13.5.jar        | Bin 52012 -> 0 bytes
 stack/loadtests/gatling/lib/scala-compiler.jar  | Bin 14445780 -> 0 bytes
 stack/loadtests/gatling/lib/scala-library.jar   | Bin 7126372 -> 0 bytes
 stack/loadtests/gatling/lib/scala-reflect.jar   | Bin 3203471 -> 0 bytes
 .../gatling/lib/scala-swing-2.10.4.jar          | Bin 707298 -> 0 bytes
 .../lib/scalalogging-slf4j_2.10-1.1.0.jar       | Bin 79003 -> 0 bytes
 .../loadtests/gatling/lib/scopt_2.10-3.2.0.jar  | Bin 122918 -> 0 bytes
 stack/loadtests/gatling/lib/slf4j-api-1.7.7.jar | Bin 29257 -> 0 bytes
 stack/loadtests/gatling/lib/t-digest-3.0.jar    | Bin 49754 -> 0 bytes
 stack/loadtests/gatling/lib/threetenbp-1.0.jar  | Bin 507797 -> 0 bytes
 .../gatling/lib/uncommons-maths-1.2.3.jar       | Bin 49923 -> 0 bytes
 stack/loadtests/gatling/lib/zinc-0.3.5.3.jar    | Bin 392810 -> 0 bytes
 stack/loadtests/gatling/scripts/gatling-ug.sh   |  49 -
 .../gatling/user-files/data/search.csv          |   3 -
 .../gatling/user-files/request-bodies/.keep     |   0
 stack/loadtests/loadtest_setup.sh               |  45 -
 stack/loadtests/pom.xml                         | 156 +++
 .../data-generators/EntityDataGenerator.scala   |  57 --
 .../data-generators/FeederGenerator.scala       | 101 --
 .../scenarios/ApplicationScenarios.scala        |  45 -
 .../scenarios/ConnectionScenarios.scala         |  30 -
 .../usergrid/scenarios/DeviceScenarios.scala    |  65 --
 .../usergrid/scenarios/GeoScenarios.scala       |  43 -
 .../scenarios/NotificationScenarios.scala       |  71 --
 .../usergrid/scenarios/NotifierScenarios.scala  |  65 --
 .../scenarios/OrganizationScenarios.scala       |  42 -
 .../usergrid/scenarios/TokenScenarios.scala     |  59 --
 .../usergrid/scenarios/UserScenarios.scala      |  50 -
 .../org/apache/usergrid/settings/Headers.scala  |  43 -
 .../org/apache/usergrid/settings/Settings.scala |  54 --
 .../org/apache/usergrid/settings/Utils.scala    |  87 --
 .../simulations/GetEntitySimulation.scala       |  41 -
 .../simulations/PostDevicesSimulation.scala     |  42 -
 .../simulations/PostUsersSimulation.scala       |  47 -
 .../PushTargetDeviceSimulation.scala            |  53 --
 .../simulations/PushTargetUserSimulation.scala  |  68 --
 .../datagenerators/EntityDataGenerator.scala    |  59 ++
 .../datagenerators/FeederGenerator.scala        | 114 +++
 .../scenarios/ApplicationScenarios.scala        |  46 +
 .../scenarios/ConnectionScenarios.scala         |  36 +
 .../usergrid/scenarios/DeviceScenarios.scala    |  85 ++
 .../usergrid/scenarios/GeoScenarios.scala       |  44 +
 .../scenarios/NotificationScenarios.scala       |  74 ++
 .../usergrid/scenarios/NotifierScenarios.scala  |  66 ++
 .../scenarios/OrganizationScenarios.scala       |  43 +
 .../usergrid/scenarios/TokenScenarios.scala     |  60 ++
 .../usergrid/scenarios/UserScenarios.scala      |  50 +
 .../org/apache/usergrid/settings/Headers.scala  |  43 +
 .../org/apache/usergrid/settings/Settings.scala |  50 +
 .../org/apache/usergrid/settings/Utils.scala    |  91 ++
 .../simulations/GetEntitySimulation.scala       |  44 +
 .../simulations/PostDevicesSimulation.scala     |  45 +
 .../simulations/PostUsersSimulation.scala       |  50 +
 ...PushNotificationTargetDeviceSimulation.scala |  57 ++
 .../PushNotificationTargetUserSimulation.scala  |  72 ++
 stack/loadtests/src/main/scripts/gatling-ug.sh  |  51 +
 stack/loadtests/src/test/resources/gatling.conf | 154 +++
 stack/loadtests/src/test/resources/logback.xml  |  37 +
 .../loadtests/src/test/resources/recorder.conf  |  37 +
 stack/loadtests/src/test/scala/Engine.scala     |  32 +
 .../src/test/scala/IDEPathHelper.scala          |  37 +
 stack/loadtests/src/test/scala/Recorder.scala   |  28 +
 103 files changed, 2228 insertions(+), 2053 deletions(-)
----------------------------------------------------------------------



[09/12] git commit: Restoring some semblance of my original formatting, take it easy with that auto-formatter folks!

Posted by sn...@apache.org.
Restoring some semblance of my original formatting, take it easy with that auto-formatter folks!


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/3771e3f9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/3771e3f9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/3771e3f9

Branch: refs/heads/two-dot-o-events
Commit: 3771e3f933f4d13963b16507389a750c08024c90
Parents: 960ce6b
Author: Dave Johnson <dm...@apigee.com>
Authored: Tue Oct 28 11:21:05 2014 -0400
Committer: Dave Johnson <dm...@apigee.com>
Committed: Tue Oct 28 11:21:05 2014 -0400

----------------------------------------------------------------------
 .../corepersistence/CpRelationManager.java      | 889 ++++++++++++-------
 1 file changed, 555 insertions(+), 334 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3771e3f9/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
index 96f0e1e..8c0d886 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
@@ -189,8 +189,11 @@ public class CpRelationManager implements RelationManager {
     public CpRelationManager() {}
 
 
-    public CpRelationManager init( EntityManager em, CpEntityManagerFactory emf, UUID applicationId,
-                                   EntityRef headEntity, IndexBucketLocator indexBucketLocator ) {
+    public CpRelationManager init( EntityManager em,
+            CpEntityManagerFactory emf,
+            UUID applicationId,
+            EntityRef headEntity, 
+            IndexBucketLocator indexBucketLocator ) {
 
         Assert.notNull( em, "Entity manager cannot be null" );
         Assert.notNull( emf, "Entity manager factory cannot be null" );
@@ -212,21 +215,25 @@ public class CpRelationManager implements RelationManager {
         this.indexBucketLocator = indexBucketLocator; // TODO: this also
 
         // load the Core Persistence version of the head entity as well
-        this.headEntityScope =
-                new CollectionScopeImpl( this.applicationScope.getApplication(), this.applicationScope.getApplication(),
-                        CpNamingUtils.getCollectionScopeNameFromEntityType( headEntity.getType() ) );
+        this.headEntityScope = new CollectionScopeImpl( 
+            this.applicationScope.getApplication(), 
+            this.applicationScope.getApplication(),
+            CpNamingUtils.getCollectionScopeNameFromEntityType( headEntity.getType() ) );
 
         EntityCollectionManager ecm = managerCache.getEntityCollectionManager( headEntityScope );
         if ( logger.isDebugEnabled() ) {
-            logger.debug( "Loading head entity {}:{} from scope\n   app {}\n   owner {}\n   name {}", new Object[] {
-                            headEntity.getType(), headEntity.getUuid(), headEntityScope.getApplication(),
-                            headEntityScope.getOwner(), headEntityScope.getName()
-                    } );
+            logger.debug( "Loading head entity {}:{} from scope\n   app {}\n   owner {}\n   name {}",
+                new Object[] { headEntity.getType(),
+                    headEntity.getUuid(),
+                    headEntityScope.getApplication(),
+                    headEntityScope.getOwner(),
+                    headEntityScope.getName()
+                } );
         }
 
         //TODO PERFORMANCE why are we loading this again here?
-        this.cpHeadEntity = ecm.load( new SimpleId( headEntity.getUuid(), headEntity.getType() ) ).toBlocking()
-                               .lastOrDefault( null );
+        this.cpHeadEntity = ecm.load( new SimpleId( 
+                headEntity.getUuid(), headEntity.getType() ) ).toBlocking() .lastOrDefault( null );
 
         // commented out because it is possible that CP entity has not been created yet
         Assert.notNull( cpHeadEntity, "cpHeadEntity cannot be null" );
@@ -245,12 +252,14 @@ public class CpRelationManager implements RelationManager {
 
         String edgeTypePrefix = CpNamingUtils.getEdgeTypeFromCollectionName( collectionName );
 
-        logger.debug( "getCollectionIndexes(): Searching for edge type prefix {} to target {}:{}", new Object[] {
-                        edgeTypePrefix, cpHeadEntity.getId().getType(), cpHeadEntity.getId().getUuid()
-                } );
+        logger.debug( "getCollectionIndexes(): Searching for edge type prefix {} to target {}:{}",
+            new Object[] { edgeTypePrefix,
+                cpHeadEntity.getId().getType(),
+                cpHeadEntity.getId().getUuid() 
+            } ); 
 
-        Observable<String> types =
-                gm.getEdgeTypesFromSource( new SimpleSearchEdgeType( cpHeadEntity.getId(), edgeTypePrefix, null ) );
+        Observable<String> types = gm.getEdgeTypesFromSource( 
+            new SimpleSearchEdgeType( cpHeadEntity.getId(), edgeTypePrefix, null ) );
 
         Iterator<String> iter = types.toBlockingObservable().getIterator();
         while ( iter.hasNext() ) {
@@ -267,7 +276,8 @@ public class CpRelationManager implements RelationManager {
         //Map<EntityRef, Set<String>> containerEntities = getContainers(-1, "owns", null);
         Map<EntityRef, Set<String>> containerEntities = getContainers();
 
-        Map<String, Map<UUID, Set<String>>> owners = new LinkedHashMap<String, Map<UUID, Set<String>>>();
+        Map<String, Map<UUID, Set<String>>> owners = 
+                new LinkedHashMap<String, Map<UUID, Set<String>>>();
 
         for ( EntityRef owner : containerEntities.keySet() ) {
             Set<String> collections = containerEntities.get( owner );
@@ -298,35 +308,38 @@ public class CpRelationManager implements RelationManager {
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
 
-        Iterator<String> edgeTypes =
-                gm.getEdgeTypesToTarget( new SimpleSearchEdgeType( cpHeadEntity.getId(), edgeType, null ) ).toBlocking()
-                  .getIterator();
+        Iterator<String> edgeTypes = gm.getEdgeTypesToTarget( new SimpleSearchEdgeType( 
+                cpHeadEntity.getId(), edgeType, null ) ).toBlocking() .getIterator();
 
-        logger.debug(
-                "getContainers(): " + "Searched for edges of type {}\n   to target {}:{}\n   in scope {}\n   found: {}",
-                new Object[] {
-                        edgeType, cpHeadEntity.getId().getType(), cpHeadEntity.getId().getUuid(),
-                        applicationScope.getApplication(), edgeTypes.hasNext()
-                } );
+        logger.debug( "getContainers(): "
+                + "Searched for edges of type {}\n   to target {}:{}\n   in scope {}\n   found: {}",
+            new Object[] { edgeType,
+                cpHeadEntity.getId().getType(),
+                cpHeadEntity.getId().getUuid(),
+                applicationScope.getApplication(),
+                edgeTypes.hasNext() 
+            } );
 
         while ( edgeTypes.hasNext() ) {
 
             String etype = edgeTypes.next();
 
-            Observable<Edge> edges = gm.loadEdgesToTarget(
-                    new SimpleSearchByEdgeType( cpHeadEntity.getId(), etype, Long.MAX_VALUE,
-                            SearchByEdgeType.Order.DESCENDING, null ) );
+            Observable<Edge> edges = gm.loadEdgesToTarget( new SimpleSearchByEdgeType( 
+                cpHeadEntity.getId(), etype, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, null ) );
 
             Iterator<Edge> iter = edges.toBlockingObservable().getIterator();
             while ( iter.hasNext() ) {
                 Edge edge = iter.next();
 
-                if ( fromEntityType != null && !fromEntityType.equals( edge.getSourceNode().getType() ) ) {
-                    logger.debug( "Ignoring edge from entity type {}", edge.getSourceNode().getType() );
+                if (     fromEntityType != null 
+                     && !fromEntityType.equals( edge.getSourceNode().getType() ) ) {
+                    logger.debug( "Ignoring edge from entity type {}", 
+                            edge.getSourceNode().getType() );
                     continue;
                 }
 
-                EntityRef eref = new SimpleEntityRef( edge.getSourceNode().getType(), edge.getSourceNode().getUuid() );
+                EntityRef eref = new SimpleEntityRef( 
+                        edge.getSourceNode().getType(), edge.getSourceNode().getUuid() );
 
                 String name = null;
                 if ( CpNamingUtils.isConnectionEdgeType( edge.getType() ) ) {
@@ -353,11 +366,11 @@ public class CpRelationManager implements RelationManager {
 
         final GraphManager gm = managerCache.getGraphManager( applicationScope );
 
-        logger.debug( "updateContainingCollectionsAndCollections(): "
-                        + "Searched for edges to target {}:{}\n   in scope {}\n   found: {}", new Object[] {
-                        cpHeadEntity.getId().getType(), cpHeadEntity.getId().getUuid(),
-                        applicationScope.getApplication()
-                } );
+        logger.debug( "updateContainingCollectionsAndCollections(): " + "Searched for edges to target {}:{}\n   in scope {}\n   found: {}",
+            new Object[] { cpHeadEntity.getId().getType(),
+                cpHeadEntity.getId().getUuid(),
+                applicationScope.getApplication() 
+            } );
 
         // loop through all types of edge to target
 
@@ -367,25 +380,25 @@ public class CpRelationManager implements RelationManager {
         final EntityIndexBatch entityIndexBatch = ei.createBatch();
 
         final int count = gm.getEdgeTypesToTarget(
-                new SimpleSearchEdgeType( cpHeadEntity.getId(), null, null ) )
-                //for each edge type, emit all the edges of that type
-                            .flatMap( new Func1<String, Observable<Edge>>() {
-                                @Override
-                                public Observable<Edge> call( final String etype ) {
-                                    return gm.loadEdgesToTarget(
-                                            new SimpleSearchByEdgeType( cpHeadEntity.getId(), etype, Long.MAX_VALUE,
-                                                    SearchByEdgeType.Order.DESCENDING, null ) );
-                                }
-                            } )
-
-                            //for each edge we receive index and add to the batch
-                            .doOnNext( new Action1<Edge>() {
+            new SimpleSearchEdgeType( cpHeadEntity.getId(), null, null ) )
+
+                // for each edge type, emit all the edges of that type
+                .flatMap( new Func1<String, Observable<Edge>>() {
                     @Override
-                    public void call( final Edge edge ) {
+                    public Observable<Edge> call( final String etype ) {
+                        return gm.loadEdgesToTarget( new SimpleSearchByEdgeType( 
+                            cpHeadEntity.getId(), etype, Long.MAX_VALUE, 
+                            SearchByEdgeType.Order.DESCENDING, null ) );
+                    }
+                } )
 
+                //for each edge we receive index and add to the batch
+                .doOnNext( new Action1<Edge>() {
+                    @Override
+                    public void call( final Edge edge ) {
 
-                        EntityRef sourceEntity =
-                                new SimpleEntityRef( edge.getSourceNode().getType(), edge.getSourceNode().getUuid() );
+                        EntityRef sourceEntity = new SimpleEntityRef( 
+                                edge.getSourceNode().getType(), edge.getSourceNode().getUuid() );
 
                         // reindex the entity in the source entity's collection or connection index
 
@@ -393,29 +406,29 @@ public class CpRelationManager implements RelationManager {
                         if ( CpNamingUtils.isCollectionEdgeType( edge.getType() ) ) {
 
                             String collName = CpNamingUtils.getCollectionName( edge.getType() );
-                            indexScope =
-                                    new IndexScopeImpl( new SimpleId( sourceEntity.getUuid(), sourceEntity.getType() ),
-                                            CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ) );
+                            indexScope = new IndexScopeImpl( new SimpleId( sourceEntity.getUuid(),
+                                sourceEntity.getType() ),
+                                CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ));
                         }
                         else {
 
                             String connName = CpNamingUtils.getCollectionName( edge.getType() );
-                            indexScope =
-                                    new IndexScopeImpl( new SimpleId( sourceEntity.getUuid(), sourceEntity.getType() ),
-                                            CpNamingUtils.getConnectionScopeName( cpEntity.getId().getType(),
-                                                    connName ) );
+                            indexScope = new IndexScopeImpl( new SimpleId( sourceEntity.getUuid(),
+                                sourceEntity.getType() ),
+                                CpNamingUtils.getConnectionScopeName( cpEntity.getId().getType(),
+                                connName ) );
                         }
 
                         entityIndexBatch.index( indexScope, cpEntity );
 
                         // reindex the entity in the source entity's all-types index
 
-                        indexScope = new IndexScopeImpl( new SimpleId( sourceEntity.getUuid(), sourceEntity.getType() ),
-                                CpNamingUtils.ALL_TYPES );
+                        indexScope = new IndexScopeImpl( new SimpleId( 
+                            sourceEntity.getUuid(), sourceEntity.getType() ), CpNamingUtils.ALL_TYPES );
 
                         entityIndexBatch.index( indexScope, cpEntity );
                     }
-                } ).count().toBlocking().lastOrDefault( 0 );
+            } ).count().toBlocking().lastOrDefault( 0 );
 
 
         entityIndexBatch.execute();
@@ -431,14 +444,22 @@ public class CpRelationManager implements RelationManager {
 
         String edgeType = CpNamingUtils.getEdgeTypeFromConnectionType( connectionType );
 
-        logger.debug( "isConnectionMember(): Checking for edge type {} from {}:{} to {}:{}", new Object[] {
-                        edgeType, headEntity.getType(), headEntity.getUuid(), entity.getType(), entity.getUuid()
-                } );
+        logger.debug( "isConnectionMember(): Checking for edge type {} from {}:{} to {}:{}",
+            new Object[] { 
+                edgeType,
+                headEntity.getType(),
+                headEntity.getUuid(),
+                entity.getType(), entity.getUuid() 
+            } );
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
-        Observable<Edge> edges = gm.loadEdgeVersions(
-                new SimpleSearchByEdge( new SimpleId( headEntity.getUuid(), headEntity.getType() ), edgeType, entityId,
-                        Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, null ) );
+        Observable<Edge> edges = gm.loadEdgeVersions( new SimpleSearchByEdge( 
+            new SimpleId( headEntity.getUuid(), headEntity.getType() ), 
+            edgeType,
+            entityId,
+            Long.MAX_VALUE,
+            SearchByEdgeType.Order.DESCENDING, 
+            null ) );
 
         return edges.toBlockingObservable().firstOrDefault( null ) != null;
     }
@@ -453,14 +474,22 @@ public class CpRelationManager implements RelationManager {
 
         String edgeType = CpNamingUtils.getEdgeTypeFromCollectionName( collName );
 
-        logger.debug( "isCollectionMember(): Checking for edge type {} from {}:{} to {}:{}", new Object[] {
-                        edgeType, headEntity.getType(), headEntity.getUuid(), entity.getType(), entity.getUuid()
-                } );
+        logger.debug( "isCollectionMember(): Checking for edge type {} from {}:{} to {}:{}",
+            new Object[] { 
+                edgeType,
+                headEntity.getType(),
+                headEntity.getUuid(),
+                entity.getType(), entity.getUuid() 
+            } );
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
-        Observable<Edge> edges = gm.loadEdgeVersions(
-                new SimpleSearchByEdge( new SimpleId( headEntity.getUuid(), headEntity.getType() ), edgeType, entityId,
-                        Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, null ) );
+        Observable<Edge> edges = gm.loadEdgeVersions( new SimpleSearchByEdge( 
+            new SimpleId( headEntity.getUuid(), headEntity.getType() ), 
+            edgeType,
+            entityId,
+            Long.MAX_VALUE,
+            SearchByEdgeType.Order.DESCENDING,
+            null ) );
 
         return edges.toBlockingObservable().firstOrDefault( null ) != null;
     }
@@ -472,9 +501,12 @@ public class CpRelationManager implements RelationManager {
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
 
-        Observable<Edge> edgesToTarget = gm.loadEdgesToTarget(
-                new SimpleSearchByEdgeType( targetId, CpNamingUtils.getEdgeTypeFromConnectionType( connectionType ),
-                        System.currentTimeMillis(), SearchByEdgeType.Order.DESCENDING, null ) ); // last
+        Observable<Edge> edgesToTarget = gm.loadEdgesToTarget( new SimpleSearchByEdgeType( 
+            targetId,
+            CpNamingUtils.getEdgeTypeFromConnectionType( connectionType ),
+            System.currentTimeMillis(),
+            SearchByEdgeType.Order.DESCENDING, 
+            null ) ); // last
 
         Iterator<Edge> iterator = edgesToTarget.toBlockingObservable().getIterator();
         int count = 0;
@@ -494,9 +526,12 @@ public class CpRelationManager implements RelationManager {
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
 
-        Observable<Edge> edgesFromSource = gm.loadEdgesFromSource(
-                new SimpleSearchByEdgeType( sourceId, CpNamingUtils.getEdgeTypeFromConnectionType( connectionType ),
-                        System.currentTimeMillis(), SearchByEdgeType.Order.DESCENDING, null ) ); // last
+        Observable<Edge> edgesFromSource = gm.loadEdgesFromSource( new SimpleSearchByEdgeType( 
+            sourceId,
+            CpNamingUtils.getEdgeTypeFromConnectionType( connectionType ),
+            System.currentTimeMillis(),
+            SearchByEdgeType.Order.DESCENDING,
+            null ) ); // last
 
         int count = edgesFromSource.take( 2 ).count().toBlocking().last();
 
@@ -511,8 +546,8 @@ public class CpRelationManager implements RelationManager {
 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
 
-        Observable<String> str =
-                gm.getEdgeTypesFromSource( new SimpleSearchEdgeType( cpHeadEntity.getId(), null, null ) );
+        Observable<String> str = gm.getEdgeTypesFromSource( 
+                new SimpleSearchEdgeType( cpHeadEntity.getId(), null, null ) );
 
         Iterator<String> iter = str.toBlockingObservable().getIterator();
         while ( iter.hasNext() ) {
@@ -525,8 +560,11 @@ public class CpRelationManager implements RelationManager {
 
 
     @Override
-    public Results getCollection( String collectionName, UUID startResult, int count, Level resultsLevel,
-                                  boolean reversed ) throws Exception {
+    public Results getCollection( String collectionName,
+            UUID startResult,
+            int count,
+            Level resultsLevel,
+            boolean reversed ) throws Exception {
 
         Query query = Query.fromQL( "select *" );
         query.setLimit( count );
@@ -551,12 +589,14 @@ public class CpRelationManager implements RelationManager {
     @Override
     public Entity addToCollection( String collName, EntityRef itemRef ) throws Exception {
 
-        CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collName );
+        CollectionInfo collection = 
+                getDefaultSchema().getCollection( headEntity.getType(), collName );
         if ( ( collection != null ) && !collection.getType().equals( itemRef.getType() ) ) {
             return null;
         }
 
-        return addToCollection( collName, itemRef, ( collection != null && collection.getLinkedCollection() != null ) );
+        return addToCollection( collName, itemRef, 
+                ( collection != null && collection.getLinkedCollection() != null ) );
     }
 
 
@@ -581,15 +621,16 @@ public class CpRelationManager implements RelationManager {
         }
 
         // load the new member entity to be added to the collection from its default scope
-        CollectionScope memberScope =
-                new CollectionScopeImpl( applicationScope.getApplication(), applicationScope.getApplication(),
-                        CpNamingUtils.getCollectionScopeNameFromEntityType( itemRef.getType() ) );
+        CollectionScope memberScope = new CollectionScopeImpl( 
+                applicationScope.getApplication(),
+                applicationScope.getApplication(),
+                CpNamingUtils.getCollectionScopeNameFromEntityType( itemRef.getType() ) );
 
         EntityCollectionManager memberMgr = managerCache.getEntityCollectionManager( memberScope );
 
         //TODO, this double load should disappear once events are in
-        org.apache.usergrid.persistence.model.entity.Entity memberEntity =
-                memberMgr.load( new SimpleId( itemRef.getUuid(), itemRef.getType() ) ).toBlocking().last();
+        org.apache.usergrid.persistence.model.entity.Entity memberEntity = memberMgr.load( 
+                new SimpleId( itemRef.getUuid(), itemRef.getType() ) ).toBlocking().last();
 
         if ( memberEntity == null ) {
             throw new RuntimeException(
@@ -597,18 +638,23 @@ public class CpRelationManager implements RelationManager {
         }
 
         if ( logger.isDebugEnabled() ) {
-            logger.debug( "Loaded member entity {}:{} from scope\n   app {}\n   " + "owner {}\n   name {} data {}",
-                    new Object[] {
-                            itemRef.getType(), itemRef.getUuid(), memberScope.getApplication(), memberScope.getOwner(),
-                            memberScope.getName(), CpEntityMapUtils.toMap( memberEntity )
-                    } );
+            logger.debug( "Loaded member entity {}:{} from scope\n   app {}\n   " 
+                + "owner {}\n   name {} data {}",
+                new Object[] {
+                    itemRef.getType(),
+                    itemRef.getUuid(),
+                    memberScope.getApplication(),
+                    memberScope.getOwner(),
+                    memberScope.getName(),
+                    CpEntityMapUtils.toMap( memberEntity )
+                } );
         }
 
         String edgeType = CpNamingUtils.getEdgeTypeFromCollectionName( collName );
 
-        UUID timeStampUuid =
-                memberEntity.getId().getUuid() != null && UUIDUtils.isTimeBased( memberEntity.getId().getUuid() ) ?
-                memberEntity.getId().getUuid() : UUIDUtils.newTimeUUID();
+        UUID timeStampUuid = memberEntity.getId().getUuid() != null 
+                && UUIDUtils.isTimeBased( memberEntity.getId().getUuid() ) 
+                ?  memberEntity.getId().getUuid() : UUIDUtils.newTimeUUID();
 
         long uuidHash = UUIDUtils.getUUIDLong( timeStampUuid );
 
@@ -617,10 +663,15 @@ public class CpRelationManager implements RelationManager {
         GraphManager gm = managerCache.getGraphManager( applicationScope );
         gm.writeEdge( edge ).toBlockingObservable().last();
 
-        logger.debug( "Wrote edgeType {}\n   from {}:{}\n   to {}:{}\n   scope {}:{}", new Object[] {
-                edgeType, cpHeadEntity.getId().getType(), cpHeadEntity.getId().getUuid(),
-                memberEntity.getId().getType(), memberEntity.getId().getUuid(),
-                applicationScope.getApplication().getType(), applicationScope.getApplication().getUuid()
+        logger.debug( "Wrote edgeType {}\n   from {}:{}\n   to {}:{}\n   scope {}:{}", 
+            new Object[] { 
+                edgeType,
+                cpHeadEntity.getId().getType(),
+                cpHeadEntity.getId().getUuid(),
+                memberEntity.getId().getType(),
+                memberEntity.getId().getUuid(),
+                applicationScope.getApplication().getType(),
+                applicationScope.getApplication().getUuid()
         } );
 
         ( ( CpEntityManager ) em ).indexEntityIntoCollection( cpHeadEntity, memberEntity, collName );
@@ -635,7 +686,8 @@ public class CpRelationManager implements RelationManager {
         //            headEntityScope.getName()});
 
         if ( connectBack && collection != null && collection.getLinkedCollection() != null ) {
-            getRelationManager( itemEntity ).addToCollection( collection.getLinkedCollection(), headEntity, false );
+            getRelationManager( itemEntity ).addToCollection( 
+                    collection.getLinkedCollection(), headEntity, false );
         }
 
         return itemEntity;
@@ -656,7 +708,8 @@ public class CpRelationManager implements RelationManager {
 
     @Override
     @Metered( group = "core", name = "RelationManager_createItemInCollection" )
-    public Entity createItemInCollection( String collName, String itemType, Map<String, Object> properties )
+    public Entity createItemInCollection( 
+            String collName, String itemType, Map<String, Object> properties )
             throws Exception {
 
         if ( headEntity.getUuid().equals( applicationId ) ) {
@@ -675,7 +728,9 @@ public class CpRelationManager implements RelationManager {
             return em.create( itemType, properties );
         }
 
-        else if ( headEntity.getType().equals( Group.ENTITY_TYPE ) && ( collName.equals( COLLECTION_ROLES ) ) ) {
+        else if ( headEntity.getType().equals( Group.ENTITY_TYPE ) 
+                && ( collName.equals( COLLECTION_ROLES ) ) ) {
+
             UUID groupId = headEntity.getUuid();
             String roleName = ( String ) properties.get( PROPERTY_NAME );
             return em.createGroupRole( groupId, roleName, ( Long ) properties.get( PROPERTY_INACTIVITY ) );
@@ -695,7 +750,8 @@ public class CpRelationManager implements RelationManager {
             addToCollection( collName, itemEntity );
 
             if ( collection != null && collection.getLinkedCollection() != null ) {
-                getRelationManager( getHeadEntity() ).addToCollection( collection.getLinkedCollection(), itemEntity );
+                getRelationManager( getHeadEntity() )
+                        .addToCollection( collection.getLinkedCollection(), itemEntity );
             }
         }
 
@@ -723,21 +779,26 @@ public class CpRelationManager implements RelationManager {
         }
 
         // load the entity to be removed to the collection
-        CollectionScope memberScope =
-                new CollectionScopeImpl( this.applicationScope.getApplication(), this.applicationScope.getApplication(),
-                        CpNamingUtils.getCollectionScopeNameFromEntityType( itemRef.getType() ) );
+        CollectionScope memberScope = new CollectionScopeImpl( 
+                this.applicationScope.getApplication(),
+                this.applicationScope.getApplication(),
+                CpNamingUtils.getCollectionScopeNameFromEntityType( itemRef.getType() ) );
         EntityCollectionManager memberMgr = managerCache.getEntityCollectionManager( memberScope );
 
         if ( logger.isDebugEnabled() ) {
-            logger.debug( "Loading entity to remove from collection "
-                            + "{}:{} from scope\n   app {}\n   owner {}\n   name {}", new Object[] {
-                            itemRef.getType(), itemRef.getUuid(), memberScope.getApplication(), memberScope.getOwner(),
-                            memberScope.getName()
-                    } );
+            logger.debug( "Loading entity to remove from collection " 
+                + "{}:{} from scope\n   app {}\n   owner {}\n   name {}", 
+                new Object[] {
+                    itemRef.getType(),
+                    itemRef.getUuid(),
+                    memberScope.getApplication(),
+                    memberScope.getOwner(),
+                    memberScope.getName()
+               });
         }
 
-        org.apache.usergrid.persistence.model.entity.Entity memberEntity =
-                memberMgr.load( new SimpleId( itemRef.getUuid(), itemRef.getType() ) ).toBlockingObservable().last();
+        org.apache.usergrid.persistence.model.entity.Entity memberEntity = memberMgr.load( 
+            new SimpleId( itemRef.getUuid(), itemRef.getType() ) ).toBlockingObservable().last();
 
         final EntityIndex ei = managerCache.getEntityIndex( applicationScope );
         final EntityIndexBatch batch = ei.createBatch();
@@ -760,15 +821,20 @@ public class CpRelationManager implements RelationManager {
 
         // remove edge from collection to item 
         GraphManager gm = managerCache.getGraphManager( applicationScope );
-        Edge collectionToItemEdge =
-                new SimpleEdge( cpHeadEntity.getId(), CpNamingUtils.getEdgeTypeFromCollectionName( collName ),
-                        memberEntity.getId(), UUIDUtils.getUUIDLong( memberEntity.getId().getUuid() ) );
+        Edge collectionToItemEdge = new SimpleEdge( 
+                cpHeadEntity.getId(), 
+                CpNamingUtils.getEdgeTypeFromCollectionName( collName ), 
+                memberEntity.getId(), UUIDUtils.getUUIDLong( memberEntity.getId().getUuid() ) );
         gm.deleteEdge( collectionToItemEdge ).toBlockingObservable().last();
 
         // remove edge from item to collection
-        Edge itemToCollectionEdge = new SimpleEdge( memberEntity.getId(), CpNamingUtils
-                .getEdgeTypeFromCollectionName( Schema.defaultCollectionName( cpHeadEntity.getId().getType() ) ),
-                cpHeadEntity.getId(), UUIDUtils.getUUIDLong( cpHeadEntity.getId().getUuid() ) );
+        Edge itemToCollectionEdge = new SimpleEdge( 
+                memberEntity.getId(), 
+                CpNamingUtils.getEdgeTypeFromCollectionName( 
+                    Schema.defaultCollectionName( cpHeadEntity.getId().getType() ) ), 
+                cpHeadEntity.getId(), 
+                UUIDUtils.getUUIDLong( cpHeadEntity.getId().getUuid() ) );
+
         gm.deleteEdge( itemToCollectionEdge ).toBlockingObservable().last();
 
         // special handling for roles collection of a group
@@ -797,9 +863,11 @@ public class CpRelationManager implements RelationManager {
         headEntity = em.validate( headEntity );
         dstEntityRef = em.validate( dstEntityRef );
 
-        CollectionInfo srcCollection = getDefaultSchema().getCollection( headEntity.getType(), srcRelationName );
+        CollectionInfo srcCollection = 
+                getDefaultSchema().getCollection( headEntity.getType(), srcRelationName );
 
-        CollectionInfo dstCollection = getDefaultSchema().getCollection( dstEntityRef.getType(), dstRelationName );
+        CollectionInfo dstCollection = 
+                getDefaultSchema().getCollection( dstEntityRef.getType(), dstRelationName );
 
         Results results = null;
         do {
@@ -838,9 +906,8 @@ public class CpRelationManager implements RelationManager {
         CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collName );
 
         if ( collection == null ) {
-            throw new RuntimeException(
-                    "Cannot find collection-info for '" + collName + "' of " + headEntity.getType() + ":" + headEntity
-                            .getUuid() );
+            throw new RuntimeException( "Cannot find collection-info for '" + collName 
+                    + "' of " + headEntity.getType() + ":" + headEntity .getUuid() );
         }
 
         IndexScope indexScope = new IndexScopeImpl( cpHeadEntity.getId(),
@@ -922,29 +989,39 @@ public class CpRelationManager implements RelationManager {
 
         ConnectionRefImpl connection = new ConnectionRefImpl( headEntity, connectionType, connectedEntityRef );
 
-        CollectionScope targetScope =
-                new CollectionScopeImpl( applicationScope.getApplication(), applicationScope.getApplication(),
-                        CpNamingUtils.getCollectionScopeNameFromEntityType( connectedEntityRef.getType() ) );
+        CollectionScope targetScope = new CollectionScopeImpl( 
+            applicationScope.getApplication(), 
+            applicationScope.getApplication(),
+            CpNamingUtils.getCollectionScopeNameFromEntityType( connectedEntityRef.getType() ) );
 
         EntityCollectionManager targetEcm = managerCache.getEntityCollectionManager( targetScope );
 
         if ( logger.isDebugEnabled() ) {
-            logger.debug( "createConnection(): " + "Indexing connection type '{}'\n   from source {}:{}]\n"
-                            + "   to target {}:{}\n   from scope\n   app {}\n   owner {}\n   name {}", new Object[] {
-                            connectionType, headEntity.getType(), headEntity.getUuid(), connectedEntityRef.getType(),
-                            connectedEntityRef.getUuid(), targetScope.getApplication(), targetScope.getOwner(),
-                            targetScope.getName()
-                    } );
+            logger.debug( "createConnection(): " 
+                + "Indexing connection type '{}'\n   from source {}:{}]\n"
+                + "   to target {}:{}\n   from scope\n   app {}\n   owner {}\n   name {}", 
+                new Object[] {
+                    connectionType,
+                    headEntity.getType(),
+                    headEntity.getUuid(),
+                    connectedEntityRef.getType(),
+                    connectedEntityRef.getUuid(),
+                    targetScope.getApplication(), 
+                    targetScope.getOwner(), 
+                    targetScope.getName()
+                } );
         }
 
-        org.apache.usergrid.persistence.model.entity.Entity targetEntity =
-                targetEcm.load( new SimpleId( connectedEntityRef.getUuid(), connectedEntityRef.getType() ) )
-                         .toBlockingObservable().last();
+        org.apache.usergrid.persistence.model.entity.Entity targetEntity = targetEcm.load( 
+            new SimpleId( connectedEntityRef.getUuid(), connectedEntityRef.getType() ) )
+                .toBlockingObservable().last();
 
         String edgeType = CpNamingUtils.getEdgeTypeFromConnectionType( connectionType );
 
         // create graph edge connection from head entity to member entity
-        Edge edge = new SimpleEdge( cpHeadEntity.getId(), edgeType, targetEntity.getId(), System.currentTimeMillis() );
+        Edge edge = new SimpleEdge( 
+                cpHeadEntity.getId(), edgeType, targetEntity.getId(), System.currentTimeMillis() );
+
         GraphManager gm = managerCache.getGraphManager( applicationScope );
         gm.writeEdge( edge ).toBlockingObservable().last();
 
@@ -975,14 +1052,16 @@ public class CpRelationManager implements RelationManager {
 
     @SuppressWarnings( "unchecked" )
     @Metered( group = "core", name = "CpRelationManager_batchUpdateEntityConnection" )
-    public Mutator<ByteBuffer> batchUpdateEntityConnection( Mutator<ByteBuffer> batch, boolean disconnect,
-                                                            ConnectionRefImpl connection, UUID timestampUuid )
-            throws Exception {
+    public Mutator<ByteBuffer> batchUpdateEntityConnection( 
+            Mutator<ByteBuffer> batch, 
+            boolean disconnect, 
+            ConnectionRefImpl conn, 
+            UUID timestampUuid ) throws Exception {
 
         long timestamp = getTimestampInMicros( timestampUuid );
 
-        Entity connectedEntity =
-                em.get( new SimpleEntityRef( connection.getConnectedEntityType(), connection.getConnectedEntityId() ) );
+        Entity connectedEntity = em.get(new SimpleEntityRef( 
+                conn.getConnectedEntityType(), conn.getConnectedEntityId() ) );
 
         if ( connectedEntity == null ) {
             return batch;
@@ -992,59 +1071,59 @@ public class CpRelationManager implements RelationManager {
 
         if ( disconnect ) {
 
-            addDeleteToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectedEntityId(), connection.getConnectedEntityType() ), timestamp );
+            addDeleteToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
+                key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
+                        conn.getConnectionType() ),
+                asList(conn.getConnectedEntityId(), conn.getConnectedEntityType() ), timestamp );
 
-            addDeleteToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectingEntityId(), connection.getConnectingEntityType() ), timestamp );
+            addDeleteToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
+                key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
+                        conn.getConnectionType() ),
+                asList(conn.getConnectingEntityId(), conn.getConnectingEntityType() ), timestamp );
 
             // delete the connection path if there will be no connections left
 
             // check out outbound edges of the given type.  If we have more than the 1 specified,
             // we shouldn't delete the connection types from our outbound index
-            if ( !moreThanOneOutboundConnection( connection.getConnectingEntity(), connection.getConnectionType() ) ) {
+            if ( !moreThanOneOutboundConnection(conn.getConnectingEntity(), conn.getConnectionType() ) ) {
 
-                addDeleteToMutator( batch, ENTITY_DICTIONARIES,
-                        key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
-                        connection.getConnectionType(), timestamp );
+                addDeleteToMutator(batch, ENTITY_DICTIONARIES,
+                        key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
+                        conn.getConnectionType(), timestamp );
             }
 
             //check out inbound edges of the given type.  If we have more than the 1 specified,
             // we shouldn't delete the connection types from our outbound index
-            if ( !moreThanOneInboundConnection( connection.getConnectingEntity(), connection.getConnectionType() ) ) {
+            if ( !moreThanOneInboundConnection(conn.getConnectingEntity(), conn.getConnectionType() ) ) {
 
-                addDeleteToMutator( batch, ENTITY_DICTIONARIES,
-                        key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
-                        connection.getConnectionType(), timestamp );
-            }
+                addDeleteToMutator(batch, ENTITY_DICTIONARIES,
+                    key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
+                    conn.getConnectionType(), timestamp );
+        }
         }
         else {
 
-            addInsertToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectedEntityId(), connection.getConnectedEntityType() ), timestamp,
+            addInsertToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
+                    key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
+                            conn.getConnectionType() ),
+                    asList(conn.getConnectedEntityId(), conn.getConnectedEntityType() ), timestamp,
                     timestamp );
 
-            addInsertToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectingEntityId(), connection.getConnectingEntityType() ), timestamp,
+            addInsertToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
+                    key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
+                            conn.getConnectionType() ),
+                    asList(conn.getConnectingEntityId(), conn.getConnectingEntityType() ), timestamp,
                     timestamp );
 
             // Add connection type to connections set
-            addInsertToMutator( batch, ENTITY_DICTIONARIES,
-                    key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
-                    connection.getConnectionType(), null, timestamp );
+            addInsertToMutator(batch, ENTITY_DICTIONARIES,
+                    key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
+                    conn.getConnectionType(), null, timestamp );
 
             // Add connection type to connections set
-            addInsertToMutator( batch, ENTITY_DICTIONARIES,
-                    key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
-                    connection.getConnectionType(), null, timestamp );
+            addInsertToMutator(batch, ENTITY_DICTIONARIES,
+                    key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
+                    conn.getConnectionType(), null, timestamp );
         }
 
         // Add indexes for the connected entity's list properties
@@ -1058,17 +1137,20 @@ public class CpRelationManager implements RelationManager {
         Schema schema = getDefaultSchema();
 
         for ( String dictionaryName : dictionaryNames ) {
-            boolean has_dictionary = schema.hasDictionary( connectedEntity.getType(), dictionaryName );
-            boolean dictionary_indexed =
-                    schema.isDictionaryIndexedInConnections( connectedEntity.getType(), dictionaryName );
+
+            boolean has_dictionary = schema.hasDictionary( 
+                    connectedEntity.getType(), dictionaryName );
+
+            boolean dictionary_indexed = schema.isDictionaryIndexedInConnections( 
+                    connectedEntity.getType(), dictionaryName );
 
             if ( dictionary_indexed || !has_dictionary ) {
                 Set<Object> elementValues = em.getDictionaryAsSet( connectedEntity, dictionaryName );
                 for ( Object elementValue : elementValues ) {
-                    IndexUpdate indexUpdate =
-                            batchStartIndexUpdate( batch, connectedEntity, dictionaryName, elementValue, timestampUuid,
-                                    has_dictionary, true, disconnect, false );
-                    batchUpdateConnectionIndex( indexUpdate, connection );
+                    IndexUpdate indexUpdate = batchStartIndexUpdate( 
+                            batch, connectedEntity, dictionaryName, elementValue, 
+                            timestampUuid, has_dictionary, true, disconnect, false );
+                    batchUpdateConnectionIndex(indexUpdate, conn );
                 }
             }
         }
@@ -1079,8 +1161,11 @@ public class CpRelationManager implements RelationManager {
 
     @Override
     @Metered( group = "core", name = "RelationManager_createConnection_paired_connection_type" )
-    public ConnectionRef createConnection( String pairedConnectionType, EntityRef pairedEntity, String connectionType,
-                                           EntityRef connectedEntityRef ) throws Exception {
+    public ConnectionRef createConnection( 
+            String pairedConnectionType,
+            EntityRef pairedEntity,
+            String connectionType,
+            EntityRef connectedEntityRef ) throws Exception {
 
         throw new UnsupportedOperationException( "Paired connections not supported" );
     }
@@ -1095,7 +1180,8 @@ public class CpRelationManager implements RelationManager {
 
 
     @Override
-    public ConnectionRef connectionRef( String connectionType, EntityRef connectedEntityRef ) throws Exception {
+    public ConnectionRef connectionRef( 
+            String connectionType, EntityRef connectedEntityRef ) throws Exception {
 
         ConnectionRef connection = new ConnectionRefImpl( headEntity, connectionType, connectedEntityRef );
 
@@ -1104,8 +1190,11 @@ public class CpRelationManager implements RelationManager {
 
 
     @Override
-    public ConnectionRef connectionRef( String pairedConnectionType, EntityRef pairedEntity, String connectionType,
-                                        EntityRef connectedEntityRef ) throws Exception {
+    public ConnectionRef connectionRef( 
+            String pairedConnectionType,
+            EntityRef pairedEntity,
+            String connectionType,
+            EntityRef connectedEntityRef ) throws Exception {
 
         throw new UnsupportedOperationException( "Paired connections not supported" );
     }
@@ -1124,7 +1213,8 @@ public class CpRelationManager implements RelationManager {
         // First, clean up the dictionary records of the connection
         Keyspace ko = cass.getApplicationKeyspace( applicationId );
         Mutator<ByteBuffer> m = createMutator( ko, be );
-        batchUpdateEntityConnection( m, true, ( ConnectionRefImpl ) connectionRef, UUIDGenerator.newTimeUUID() );
+        batchUpdateEntityConnection( 
+                m, true, ( ConnectionRefImpl ) connectionRef, UUIDGenerator.newTimeUUID() );
         batchExecute( m, CassandraService.RETRY_COUNT );
 
         EntityRef connectingEntityRef = connectionRef.getConnectingEntity();  // source
@@ -1132,26 +1222,36 @@ public class CpRelationManager implements RelationManager {
 
         String connectionType = connectionRef.getConnectedEntity().getConnectionType();
 
-        CollectionScope targetScope =
-                new CollectionScopeImpl( applicationScope.getApplication(), applicationScope.getApplication(),
-                        CpNamingUtils.getCollectionScopeNameFromEntityType( connectedEntityRef.getType() ) );
+        CollectionScope targetScope = new CollectionScopeImpl( 
+                applicationScope.getApplication(),
+                applicationScope.getApplication(),
+                CpNamingUtils.getCollectionScopeNameFromEntityType( connectedEntityRef.getType()) );
 
         EntityCollectionManager targetEcm = managerCache.getEntityCollectionManager( targetScope );
 
         if ( logger.isDebugEnabled() ) {
-            logger.debug( "Deleting connection '{}' from source {}:{} \n   to target {}:{}", new Object[] {
-                            connectionType, connectingEntityRef.getType(), connectingEntityRef.getUuid(),
-                            connectedEntityRef.getType(), connectedEntityRef.getUuid()
+            logger.debug( "Deleting connection '{}' from source {}:{} \n   to target {}:{}", 
+                new Object[] {
+                    connectionType,
+                    connectingEntityRef.getType(),
+                    connectingEntityRef.getUuid(),
+                    connectedEntityRef.getType(),
+                    connectedEntityRef.getUuid()
                     } );
         }
 
-        org.apache.usergrid.persistence.model.entity.Entity targetEntity =
-                targetEcm.load( new SimpleId( connectedEntityRef.getUuid(), connectedEntityRef.getType() ) )
-                         .toBlockingObservable().last();
+        org.apache.usergrid.persistence.model.entity.Entity targetEntity = targetEcm.load( 
+            new SimpleId( connectedEntityRef.getUuid(), connectedEntityRef.getType() ) ) 
+                .toBlockingObservable().last();
 
         // Delete graph edge connection from head entity to member entity
-        Edge edge = new SimpleEdge( new SimpleId( connectingEntityRef.getUuid(), connectingEntityRef.getType() ),
-                connectionType, targetEntity.getId(), System.currentTimeMillis() );
+        Edge edge = new SimpleEdge( 
+            new SimpleId( connectingEntityRef.getUuid(),
+                connectingEntityRef.getType() ),
+                connectionType,
+                targetEntity.getId(),
+                System.currentTimeMillis() );
+
         GraphManager gm = managerCache.getGraphManager( applicationScope );
         gm.deleteEdge( edge ).toBlockingObservable().last();
 
@@ -1159,15 +1259,18 @@ public class CpRelationManager implements RelationManager {
         final EntityIndexBatch batch = ei.createBatch();
 
         // Deindex the connection in app|source|type context
-        IndexScope indexScope =
-                new IndexScopeImpl( new SimpleId( connectingEntityRef.getUuid(), connectingEntityRef.getType() ),
-                        CpNamingUtils.getConnectionScopeName( targetEntity.getId().getType(), connectionType ) );
+        IndexScope indexScope = new IndexScopeImpl( 
+            new SimpleId( connectingEntityRef.getUuid(),
+                connectingEntityRef.getType() ),
+                CpNamingUtils.getConnectionScopeName( targetEntity.getId().getType(),
+                connectionType ) );
         batch.deindex( indexScope, targetEntity );
 
         // Deindex the connection in app|source|type context
-        IndexScope allTypesIndexScope =
-                new IndexScopeImpl( new SimpleId( connectingEntityRef.getUuid(), connectingEntityRef.getType() ),
-                        CpNamingUtils.ALL_TYPES );
+        IndexScope allTypesIndexScope = new IndexScopeImpl( 
+            new SimpleId( connectingEntityRef.getUuid(),
+                connectingEntityRef.getType() ),
+                CpNamingUtils.ALL_TYPES );
 
         batch.deindex( allTypesIndexScope, targetEntity );
 
@@ -1189,7 +1292,8 @@ public class CpRelationManager implements RelationManager {
 
     @Override
     public Set<String> getConnectionTypes( boolean filterConnection ) throws Exception {
-        Set<String> connections = cast( em.getDictionaryAsSet( headEntity, Schema.DICTIONARY_CONNECTED_TYPES ) );
+        Set<String> connections = cast( 
+                em.getDictionaryAsSet( headEntity, Schema.DICTIONARY_CONNECTED_TYPES ) );
 
         if ( connections == null ) {
             return null;
@@ -1202,8 +1306,8 @@ public class CpRelationManager implements RelationManager {
 
 
     @Override
-    public Results getConnectedEntities( String connectionType, String connectedEntityType, Level level )
-            throws Exception {
+    public Results getConnectedEntities( 
+            String connectionType, String connectedEntityType, Level level ) throws Exception {
 
         Results raw = null;
 
@@ -1231,8 +1335,8 @@ public class CpRelationManager implements RelationManager {
             final EntityIndex ei = managerCache.getEntityIndex( applicationScope );
 
 
-            logger.debug( "Searching connected entities from scope {}:{}", indexScope.getOwner().toString(),
-                    indexScope.getName() );
+            logger.debug( "Searching connected entities from scope {}:{}", 
+                    indexScope.getOwner().toString(), indexScope.getName() );
 
             query = adjustQuery( query );
             CandidateResults crs = ei.search( indexScope, query );
@@ -1260,16 +1364,16 @@ public class CpRelationManager implements RelationManager {
 
 
     @Override
-    public Results getConnectingEntities( String connType, String fromEntityType, Level resultsLevel )
-            throws Exception {
+    public Results getConnectingEntities( 
+            String connType, String fromEntityType, Level resultsLevel ) throws Exception {
 
         return getConnectingEntities( connType, fromEntityType, resultsLevel, -1 );
     }
 
 
     @Override
-    public Results getConnectingEntities( String connType, String fromEntityType, Level level, int count )
-            throws Exception {
+    public Results getConnectingEntities( 
+            String connType, String fromEntityType, Level level, int count ) throws Exception {
 
         // looking for edges to the head entity
         String edgeType = CpNamingUtils.getEdgeTypeFromConnectionType( connType );
@@ -1312,12 +1416,14 @@ public class CpRelationManager implements RelationManager {
         if ( query.getEntityType() == null ) {
 
             // search across all types of collections of the head-entity
-            IndexScope indexScope = new IndexScopeImpl( cpHeadEntity.getId(), CpNamingUtils.ALL_TYPES );
+            IndexScope indexScope = new IndexScopeImpl( 
+                cpHeadEntity.getId(), 
+                CpNamingUtils.ALL_TYPES );
 
             EntityIndex ei = managerCache.getEntityIndex( applicationScope );
 
-            logger.debug( "Searching connections from the all-types scope {}:{}", indexScope.getOwner().toString(),
-                    indexScope.getName() );
+            logger.debug( "Searching connections from the all-types scope {}:{}", 
+                    indexScope.getOwner().toString(), indexScope.getName() );
 
             query = adjustQuery( query );
             CandidateResults crs = ei.search( indexScope, query );
@@ -1325,12 +1431,14 @@ public class CpRelationManager implements RelationManager {
             return buildConnectionResults( query, crs, query.getConnectionType() );
         }
 
-        IndexScope indexScope = new IndexScopeImpl( cpHeadEntity.getId(),
-                CpNamingUtils.getConnectionScopeName( query.getEntityType(), query.getConnectionType() ) );
+        IndexScope indexScope = new IndexScopeImpl( 
+            cpHeadEntity.getId(), 
+            CpNamingUtils.getConnectionScopeName( query.getEntityType(), 
+            query.getConnectionType() ) );
         EntityIndex ei = managerCache.getEntityIndex( applicationScope );
-
-        logger.debug( "Searching connections from the scope {}:{}", indexScope.getOwner().toString(),
-                indexScope.getName() );
+        
+        logger.debug( "Searching connections from the scope {}:{}", 
+                indexScope.getOwner().toString(), indexScope.getName() );
 
         query = adjustQuery( query );
         CandidateResults crs = ei.search( indexScope, query );
@@ -1353,29 +1461,31 @@ public class CpRelationManager implements RelationManager {
                 // This is fulgy to put here, but required.
                 if ( query.getEntityType().equals( User.ENTITY_TYPE ) && ident.isEmail() ) {
 
-                    Query newQuery =
-                            Query.fromQL( "select * where email='" + query.getSingleNameOrEmailIdentifier() + "'" );
+                    Query newQuery = Query.fromQL( "select * where email='" 
+                            + query.getSingleNameOrEmailIdentifier() + "'" );
                     query.setRootOperand( newQuery.getRootOperand() );
                 }
 
                 // use the ident with the default alias. could be an email
                 else {
 
-                    Query newQuery =
-                            Query.fromQL( "select * where name='" + query.getSingleNameOrEmailIdentifier() + "'" );
+                    Query newQuery = Query.fromQL( "select * where name='" 
+                            + query.getSingleNameOrEmailIdentifier() + "'" );
                     query.setRootOperand( newQuery.getRootOperand() );
                 }
             }
             else if ( query.containsSingleUuidIdentifier() ) {
 
-                Query newQuery = Query.fromQL( "select * where uuid='" + query.getSingleUuidIdentifier() + "'" );
+                Query newQuery = Query.fromQL( 
+                        "select * where uuid='" + query.getSingleUuidIdentifier() + "'" );
                 query.setRootOperand( newQuery.getRootOperand() );
             }
         }
 
         if ( query.isReversed() ) {
 
-            Query.SortPredicate desc = new Query.SortPredicate( PROPERTY_CREATED, Query.SortDirection.DESCENDING );
+            Query.SortPredicate desc = 
+                new Query.SortPredicate( PROPERTY_CREATED, Query.SortDirection.DESCENDING );
 
             try {
                 query.addSort( desc );
@@ -1387,8 +1497,8 @@ public class CpRelationManager implements RelationManager {
 
         if ( query.getSortPredicates().isEmpty() ) {
 
-            //TODO, should this be descending?
-            Query.SortPredicate asc = new Query.SortPredicate( PROPERTY_CREATED, Query.SortDirection.ASCENDING );
+            Query.SortPredicate asc = 
+                new Query.SortPredicate( PROPERTY_CREATED, Query.SortDirection.DESCENDING);
 
             query.addSort( asc );
         }
@@ -1424,7 +1534,8 @@ public class CpRelationManager implements RelationManager {
     }
 
 
-    private Results buildConnectionResults( Query query, CandidateResults crs, String connectionType ) {
+    private Results buildConnectionResults( 
+            Query query, CandidateResults crs, String connectionType ) {
 
         if ( query.getLevel().equals( Level.ALL_PROPERTIES ) ) {
             return buildResults( query, crs, connectionType );
@@ -1436,9 +1547,11 @@ public class CpRelationManager implements RelationManager {
 
         for ( CandidateResult cr : crs ) {
 
-            SimpleEntityRef targetRef = new SimpleEntityRef( cr.getId().getType(), cr.getId().getUuid() );
+            SimpleEntityRef targetRef = 
+                    new SimpleEntityRef( cr.getId().getType(), cr.getId().getUuid() );
 
-            final ConnectionRef ref = new ConnectionRefImpl( sourceRef, connectionType, targetRef );
+            final ConnectionRef ref = 
+                    new ConnectionRefImpl( sourceRef, connectionType, targetRef );
 
             refs.add( ref );
         }
@@ -1459,8 +1572,8 @@ public class CpRelationManager implements RelationManager {
         logger.debug( "buildResults() for {} from {} candidates", collName, crs.size() );
 
         //get an instance of our results loader
-        final ResultsLoader resultsLoader =
-                this.resultsLoaderFactory.getLoader( applicationScope, this.headEntity, query.getResultsLevel() );
+        final ResultsLoader resultsLoader = this.resultsLoaderFactory.getLoader( 
+                applicationScope, this.headEntity, query.getResultsLevel() );
 
         //load the results
         final Results results = resultsLoader.loadResults( crs );
@@ -1484,11 +1597,11 @@ public class CpRelationManager implements RelationManager {
 
         Entity entity = getHeadEntity();
 
-        elementValue = getDefaultSchema().validateEntitySetValue( entity.getType(), setName, elementValue );
+        elementValue = getDefaultSchema()
+                .validateEntitySetValue( entity.getType(), setName, elementValue );
 
-        IndexUpdate indexUpdate =
-                batchStartIndexUpdate( batch, entity, setName, elementValue, timestampUuid, true, true, removeFromSet,
-                        false );
+        IndexUpdate indexUpdate = batchStartIndexUpdate( batch, entity, setName, elementValue, 
+                timestampUuid, true, true, removeFromSet, false );
 
         // Update collections 
 
@@ -1498,8 +1611,8 @@ public class CpRelationManager implements RelationManager {
         if ( containers != null ) {
             Map<EntityRef, Set<String>> containerEntities = getContainers();
             for ( EntityRef containerEntity : containerEntities.keySet() ) {
-                if ( containerEntity.getType().equals( TYPE_APPLICATION ) && Schema
-                        .isAssociatedEntityType( entity.getType() ) ) {
+                if ( containerEntity.getType().equals( TYPE_APPLICATION ) 
+                        && Schema.isAssociatedEntityType( entity.getType() ) ) {
                     logger.debug( "Extended properties for {} not indexed by application", entity.getType() );
                     continue;
                 }
@@ -1525,25 +1638,28 @@ public class CpRelationManager implements RelationManager {
      * Batch update collection index.
      *
      * @param indexUpdate The update to apply
-     * @param owner The entity that is the owner context of this entity update. Can either be an application, or another
-     * entity
+     * @param owner The entity that is the owner context of this entity update. Can either be an 
+     * application, or another * entity
      * @param collectionName the collection name
-     *
+     * 
      * @return The indexUpdate with batch mutations
-     *
      * @throws Exception the exception
      */
     @Metered( group = "core", name = "RelationManager_batchUpdateCollectionIndex" )
-    public IndexUpdate batchUpdateCollectionIndex( IndexUpdate indexUpdate, EntityRef owner, String collectionName )
+    public IndexUpdate batchUpdateCollectionIndex( 
+            IndexUpdate indexUpdate, EntityRef owner, String collectionName )
             throws Exception {
 
         logger.debug( "batchUpdateCollectionIndex" );
 
         Entity indexedEntity = indexUpdate.getEntity();
 
-        String bucketId = indexBucketLocator
-                .getBucket( applicationId, IndexBucketLocator.IndexType.COLLECTION, indexedEntity.getUuid(),
-                        indexedEntity.getType(), indexUpdate.getEntryName() );
+        String bucketId = indexBucketLocator.getBucket( 
+                applicationId, 
+                IndexBucketLocator.IndexType.COLLECTION, 
+                indexedEntity.getUuid(),
+                indexedEntity.getType(), 
+                indexUpdate.getEntryName() );
 
         // the root name without the bucket
         // entity_id,collection_name,prop_name,
@@ -1561,14 +1677,14 @@ public class CpRelationManager implements RelationManager {
 
                 index_key = key( index_name, bucketId );
 
-                addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key, entry.getIndexComposite(),
-                        indexUpdate.getTimestamp() );
+                addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key, 
+                        entry.getIndexComposite(), indexUpdate.getTimestamp() );
 
                 if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( indexUpdate.getEntity(), entry.getTimestampUuid(),
-                            entry.getValue().toString() );
-                    batchRemoveLocationFromCollectionIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_name, loc );
+                    EntityLocationRef loc = new EntityLocationRef( indexUpdate.getEntity(), 
+                            entry.getTimestampUuid(), entry.getValue().toString() );
+                    batchRemoveLocationFromCollectionIndex( indexUpdate.getBatch(), 
+                            indexBucketLocator, applicationId, index_name, loc );
                 }
             }
             else {
@@ -1576,8 +1692,9 @@ public class CpRelationManager implements RelationManager {
             }
         }
 
-        if ( ( indexUpdate.getNewEntries().size() > 0 ) && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue()
-                && !indexUpdate.isRemoveListEntry() ) ) ) {
+        if ( ( indexUpdate.getNewEntries().size() > 0 ) 
+                && ( !indexUpdate.isMultiValue() 
+                || ( indexUpdate.isMultiValue() && !indexUpdate.isRemoveListEntry() ) ) ) {
 
             for ( IndexUpdate.IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
 
@@ -1589,15 +1706,21 @@ public class CpRelationManager implements RelationManager {
 
                 // int i = 0;
 
-                addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key, indexEntry.getIndexComposite(),
-                        null, indexUpdate.getTimestamp() );
+                addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key, 
+                        indexEntry.getIndexComposite(), null, indexUpdate.getTimestamp() );
 
                 if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc =
-                            new EntityLocationRef( indexUpdate.getEntity(), indexEntry.getTimestampUuid(),
-                                    indexEntry.getValue().toString() );
-                    batchStoreLocationInCollectionIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_name, indexedEntity.getUuid(), loc );
+                    EntityLocationRef loc = new EntityLocationRef( 
+                            indexUpdate.getEntity(), 
+                            indexEntry.getTimestampUuid(), 
+                            indexEntry.getValue().toString() );
+                    batchStoreLocationInCollectionIndex( 
+                            indexUpdate.getBatch(),
+                            indexBucketLocator,
+                            applicationId,
+                            index_name,
+                            indexedEntity.getUuid(),
+                            loc );
                 }
 
                 // i++;
@@ -1614,26 +1737,56 @@ public class CpRelationManager implements RelationManager {
     }
 
 
-    public IndexUpdate batchStartIndexUpdate( Mutator<ByteBuffer> batch, Entity entity, String entryName,
-                                              Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
-                                              boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed )
+    public IndexUpdate batchStartIndexUpdate( 
+            Mutator<ByteBuffer> batch,
+            Entity entity,
+            String entryName,
+            Object entryValue,
+            UUID timestampUuid,
+            boolean schemaHasProperty,
+            boolean isMultiValue,
+            boolean removeListEntry,
+            boolean fulltextIndexed )
             throws Exception {
-        return batchStartIndexUpdate( batch, entity, entryName, entryValue, timestampUuid, schemaHasProperty,
-                isMultiValue, removeListEntry, fulltextIndexed, false );
+
+        return batchStartIndexUpdate( 
+                batch,
+                entity,
+                entryName,
+                entryValue,
+                timestampUuid,
+                schemaHasProperty,
+                isMultiValue,
+                removeListEntry,
+                fulltextIndexed,
+                false );
     }
 
 
     @Metered( group = "core", name = "RelationManager_batchStartIndexUpdate" )
-    public IndexUpdate batchStartIndexUpdate( Mutator<ByteBuffer> batch, Entity entity, String entryName,
-                                              Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
-                                              boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed,
-                                              boolean skipRead ) throws Exception {
+    public IndexUpdate batchStartIndexUpdate( 
+            Mutator<ByteBuffer> batch,
+            Entity entity,
+            String entryName,
+            Object entryValue,
+            UUID timestampUuid,
+            boolean schemaHasProperty,
+            boolean isMultiValue,
+            boolean removeListEntry,
+            boolean fulltextIndexed,
+            boolean skipRead ) throws Exception {
 
         long timestamp = getTimestampInMicros( timestampUuid );
 
-        IndexUpdate indexUpdate =
-                new IndexUpdate( batch, entity, entryName, entryValue, schemaHasProperty, isMultiValue, removeListEntry,
-                        timestampUuid );
+        IndexUpdate indexUpdate = new IndexUpdate( 
+                batch,
+                entity,
+                entryName,
+                entryValue,
+                schemaHasProperty,
+                isMultiValue,
+                removeListEntry,
+                timestampUuid );
 
         // entryName = entryName.toLowerCase();
 
@@ -1644,17 +1797,30 @@ public class CpRelationManager implements RelationManager {
             List<HColumn<ByteBuffer, ByteBuffer>> entries = null;
 
             if ( isMultiValue && validIndexableValue( entryValue ) ) {
-                entries = cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_INDEX_ENTRIES,
+                entries = cass.getColumns( 
+                    cass.getApplicationKeyspace( applicationId ), 
+                        ENTITY_INDEX_ENTRIES,
                         entity.getUuid(),
-                        new DynamicComposite( entryName, indexValueCode( entryValue ), toIndexableValue( entryValue ) ),
-                        setGreaterThanEqualityFlag( new DynamicComposite( entryName, indexValueCode( entryValue ),
-                                        toIndexableValue( entryValue ) ) ), INDEX_ENTRY_LIST_COUNT, false );
+                        new DynamicComposite( 
+                            entryName, 
+                            indexValueCode( entryValue ), 
+                            toIndexableValue( entryValue ) ),
+                        setGreaterThanEqualityFlag( 
+                            new DynamicComposite( 
+                                entryName, indexValueCode( entryValue ),
+                                toIndexableValue( entryValue ) ) ), 
+                        INDEX_ENTRY_LIST_COUNT, 
+                        false );
             }
             else {
-                entries = cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_INDEX_ENTRIES,
-                        entity.getUuid(), new DynamicComposite( entryName ),
-                        setGreaterThanEqualityFlag( new DynamicComposite( entryName ) ), INDEX_ENTRY_LIST_COUNT,
-                        false );
+                entries = cass.getColumns( 
+                    cass.getApplicationKeyspace( applicationId ), 
+                    ENTITY_INDEX_ENTRIES, 
+                    entity.getUuid(), 
+                    new DynamicComposite( entryName ), 
+                    setGreaterThanEqualityFlag( new DynamicComposite( entryName ) ), 
+                    INDEX_ENTRY_LIST_COUNT, 
+                    false );
             }
 
             if ( logger.isDebugEnabled() ) {
@@ -1672,7 +1838,8 @@ public class CpRelationManager implements RelationManager {
                 // new format:
                 // composite(entryName,
                 // value_code,prev_value,prev_timestamp,prev_obj_path) = null
-                DynamicComposite composite = DynamicComposite.fromByteBuffer( entry.getName().duplicate() );
+                DynamicComposite composite = 
+                        DynamicComposite.fromByteBuffer( entry.getName().duplicate() );
                 prev_value = composite.get( 2 );
                 prev_timestamp = ( UUID ) composite.get( 3 );
                 if ( composite.size() > 4 ) {
@@ -1686,7 +1853,8 @@ public class CpRelationManager implements RelationManager {
                         entryPath = entryName + "." + prev_obj_path;
                     }
 
-                    indexUpdate.addPrevEntry( entryPath, prev_value, prev_timestamp, entry.getName().duplicate() );
+                    indexUpdate.addPrevEntry( 
+                            entryPath, prev_value, prev_timestamp, entry.getName().duplicate() );
 
                     // composite(property_value,connected_entity_id,entry_timestamp)
                     // addDeleteToMutator(batch, ENTITY_INDEX_ENTRIES,
@@ -1701,7 +1869,8 @@ public class CpRelationManager implements RelationManager {
 
         if ( !isMultiValue || ( isMultiValue && !removeListEntry ) ) {
 
-            List<Map.Entry<String, Object>> list = IndexUtils.getKeyValueList( entryName, entryValue, fulltextIndexed );
+            List<Map.Entry<String, Object>> list = 
+                    IndexUtils.getKeyValueList( entryName, entryValue, fulltextIndexed );
 
             if ( entryName.equalsIgnoreCase( "location" ) && ( entryValue instanceof Map ) ) {
                 @SuppressWarnings( "rawtypes" ) double latitude =
@@ -1715,14 +1884,15 @@ public class CpRelationManager implements RelationManager {
             for ( Map.Entry<String, Object> indexEntry : list ) {
 
                 if ( validIndexableValue( indexEntry.getValue() ) ) {
-                    indexUpdate.addNewEntry( indexEntry.getKey(), toIndexableValue( indexEntry.getValue() ) );
+                    indexUpdate.addNewEntry( 
+                            indexEntry.getKey(), toIndexableValue( indexEntry.getValue() ) );
                 }
             }
 
             if ( isMultiValue ) {
                 addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                        asList( entryName, indexValueCode( entryValue ), toIndexableValue( entryValue ),
-                                indexUpdate.getTimestampUuid() ), null, timestamp );
+                    asList( entryName, indexValueCode( entryValue ), 
+                    toIndexableValue( entryValue ), indexUpdate.getTimestampUuid() ), null, timestamp );
             }
             else {
                 // int i = 0;
@@ -1740,7 +1910,8 @@ public class CpRelationManager implements RelationManager {
                     byte code = indexValueCode( indexEntry.getValue() );
                     Object val = toIndexableValue( indexEntry.getValue() );
                     addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                            asList( entryName, code, val, indexUpdate.getTimestampUuid(), name ), null, timestamp );
+                            asList( entryName, code, val, indexUpdate.getTimestampUuid(), name ), 
+                            null, timestamp );
 
                     indexUpdate.addIndex( indexEntry.getKey() );
                 }
@@ -1844,10 +2015,17 @@ public class CpRelationManager implements RelationManager {
                 batchDeleteConnectionIndexEntries( indexUpdate, entry, connection, index_keys );
 
                 if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( indexUpdate.getEntity(), entry.getTimestampUuid(),
-                            entry.getValue().toString() );
-                    batchDeleteLocationInConnectionsIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_keys, entry.getPath(), loc );
+                    EntityLocationRef loc = new EntityLocationRef( 
+                        indexUpdate.getEntity(),
+                        entry.getTimestampUuid(),
+                        entry.getValue().toString() );
+                    batchDeleteLocationInConnectionsIndex( 
+                        indexUpdate.getBatch(),
+                        indexBucketLocator,
+                        applicationId,
+                        index_keys,
+                        entry.getPath(),
+                        loc );
                 }
             }
             else {
@@ -1855,19 +2033,25 @@ public class CpRelationManager implements RelationManager {
             }
         }
 
-        if ( ( indexUpdate.getNewEntries().size() > 0 ) && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue()
-                && !indexUpdate.isRemoveListEntry() ) ) ) {
+        if ( ( indexUpdate.getNewEntries().size() > 0 ) 
+                && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue() && !indexUpdate.isRemoveListEntry() ) ) ) {
 
             for ( IndexUpdate.IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
 
                 batchAddConnectionIndexEntries( indexUpdate, indexEntry, connection, index_keys );
 
                 if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc =
-                            new EntityLocationRef( indexUpdate.getEntity(), indexEntry.getTimestampUuid(),
-                                    indexEntry.getValue().toString() );
-                    batchStoreLocationInConnectionsIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_keys, indexEntry.getPath(), loc );
+                    EntityLocationRef loc = new EntityLocationRef( 
+                        indexUpdate.getEntity(),
+                        indexEntry.getTimestampUuid(),
+                        indexEntry.getValue().toString() );
+                    batchStoreLocationInConnectionsIndex( 
+                        indexUpdate.getBatch(),
+                            indexBucketLocator,
+                            applicationId,
+                            index_keys,
+                            indexEntry.getPath(),
+                            loc );
                 }
             }
 
@@ -1897,10 +2081,11 @@ public class CpRelationManager implements RelationManager {
      *
      * @return connectionType The name of the edges to search
      */
-    private PagingResultsIterator getReversedConnectionsIterator( EntityRef targetEntity, String connectionType )
-            throws Exception {
+    private PagingResultsIterator getReversedConnectionsIterator( 
+            EntityRef targetEntity, String connectionType ) throws Exception {
 
-        return new PagingResultsIterator( getConnectingEntities( targetEntity, connectionType, null, Level.REFS ) );
+        return new PagingResultsIterator( 
+                getConnectingEntities( targetEntity, connectionType, null, Level.REFS ) );
     }
 
 
@@ -1912,8 +2097,11 @@ public class CpRelationManager implements RelationManager {
      * @param connectedEntityType The connected entity type, if not specified all types are returned
      * @param resultsLevel The results level to return
      */
-    private Results getConnectingEntities( EntityRef targetEntity, String connectionType, String connectedEntityType,
-                                           Level resultsLevel ) throws Exception {
+    private Results getConnectingEntities( 
+        EntityRef targetEntity,
+            String connectionType,
+            String connectedEntityType,
+            Level resultsLevel ) throws Exception {
 
         return getConnectingEntities( targetEntity, connectionType, connectedEntityType, resultsLevel, 0 );
     }
@@ -1927,16 +2115,21 @@ public class CpRelationManager implements RelationManager {
      * @param connectedEntityType The connected entity type, if not specified all types are returned
      * @param count result limit
      */
-    private Results getConnectingEntities( EntityRef targetEntity, String connectionType, String connectedEntityType,
-                                           Level level, int count ) throws Exception {
+    private Results getConnectingEntities( 
+            EntityRef targetEntity,
+            String connectionType,
+            String connectedEntityType,
+            Level level,
+            int count ) throws Exception {
 
         Query query = new Query();
         query.setResultsLevel( level );
         query.setLimit( count );
 
-        final ConnectionRefImpl connectionRef =
-                new ConnectionRefImpl( new SimpleEntityRef( connectedEntityType, null ), connectionType, targetEntity );
-        final ConnectionResultsLoaderFactory factory = new ConnectionResultsLoaderFactory( connectionRef );
+        final ConnectionRefImpl connectionRef = new ConnectionRefImpl( 
+                new SimpleEntityRef( connectedEntityType, null ), connectionType, targetEntity );
+        final ConnectionResultsLoaderFactory factory = 
+                new ConnectionResultsLoaderFactory( connectionRef );
 
         QueryProcessorImpl qp = new QueryProcessorImpl( query, null, em, factory );
         SearchConnectionVisitor visitor = new SearchConnectionVisitor( qp, connectionRef, false );
@@ -1946,9 +2139,11 @@ public class CpRelationManager implements RelationManager {
 
 
     @Metered( group = "core", name = "RelationManager_batchDeleteConnectionIndexEntries" )
-    public Mutator<ByteBuffer> batchDeleteConnectionIndexEntries( IndexUpdate indexUpdate, IndexUpdate.IndexEntry entry,
-                                                                  ConnectionRefImpl connection, UUID[] index_keys )
-            throws Exception {
+    public Mutator<ByteBuffer> batchDeleteConnectionIndexEntries( 
+            IndexUpdate indexUpdate,
+            IndexUpdate.IndexEntry entry,
+            ConnectionRefImpl connection,
+            UUID[] index_keys ) throws Exception {
 
         logger.debug( "batchDeleteConnectionIndexEntries" );
 
@@ -2059,15 +2254,16 @@ public class CpRelationManager implements RelationManager {
 
         private final ConnectionRefImpl connection;
 
-        /** True if we should search from source->target edges.  False if we should search from target<-source edges */
+        /** True if we should search from source->target edges.  
+         * False if we should search from target<-source edges */
         private final boolean outgoing;
 
 
         /**
          * @param queryProcessor They query processor to use
          * @param connection The connection refernce
-         * @param outgoing The direction to search.  True if we should search from source->target edges.  False if we
-         * should search from target<-source edges
+         * @param outgoing The direction to search.  True if we should search from source->target 
+         * edges.  False if we * should search from target<-source edges
          */
         public SearchConnectionVisitor( QueryProcessorImpl queryProcessor, ConnectionRefImpl connection,
                                         boolean outgoing ) {
@@ -2085,8 +2281,12 @@ public class CpRelationManager implements RelationManager {
         @Override
         protected IndexScanner secondaryIndexScan( QueryNode node, QuerySlice slice ) throws Exception {
 
-            UUID id = ConnectionRefImpl.getIndexId( ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE, headEntity,
-                    connection.getConnectionType(), connection.getConnectedEntityType(), new ConnectedEntityRef[0] );
+            UUID id = ConnectionRefImpl.getIndexId( 
+                    ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE,
+                    headEntity,
+                    connection.getConnectionType(),
+                    connection.getConnectedEntityType(),
+                    new ConnectedEntityRef[0] );
 
             Object key = key( id, INDEX_CONNECTIONS );
 
@@ -2120,10 +2320,13 @@ public class CpRelationManager implements RelationManager {
 
             queryProcessor.applyCursorAndSort( slice );
 
-            GeoIterator itr =
-                    new GeoIterator( new ConnectionGeoSearch( em, indexBucketLocator, cass, connection.getIndexId() ),
-                            query.getLimit(), slice, node.getPropertyName(),
-                            new Point( node.getLattitude(), node.getLongitude() ), node.getDistance() );
+            GeoIterator itr = new GeoIterator( 
+                new ConnectionGeoSearch( em, indexBucketLocator, cass, connection.getIndexId() ), 
+                query.getLimit(),
+                slice,
+                node.getPropertyName(),
+                new Point( node.getLattitude(), node.getLongitude() ),
+                node.getDistance() );
 
             results.push( itr );
         }
@@ -2181,12 +2384,20 @@ public class CpRelationManager implements RelationManager {
 
             //we need to iterate all connection types
             else {
-                connectionTypes = new ConnectionTypesIterator( cass, applicationId, entityIdToUse, outgoing, size );
+                connectionTypes = new ConnectionTypesIterator( 
+                        cass, applicationId, entityIdToUse, outgoing, size );
             }
 
-            IndexScanner connectionScanner =
-                    new ConnectedIndexScanner( cass, dictionaryType, applicationId, entityIdToUse, connectionTypes,
-                            start, slice.isReversed(), size, skipFirst );
+            IndexScanner connectionScanner = new ConnectedIndexScanner( 
+                    cass,
+                    dictionaryType,
+                    applicationId,
+                    entityIdToUse,
+                    connectionTypes,
+                    start,
+                    slice.isReversed(),
+                    size,
+                    skipFirst );
 
             this.results.push( new SliceIterator( slice, connectionScanner, connectionParser ) );
         }
@@ -2214,9 +2425,19 @@ public class CpRelationManager implements RelationManager {
 
         Object keyPrefix = key( indexKey, slice.getPropertyName() );
 
-        IndexScanner scanner = new IndexBucketScanner( cass, indexBucketLocator, ENTITY_INDEX, applicationId,
-                IndexBucketLocator.IndexType.CONNECTION, keyPrefix, range[0], range[1], slice.isReversed(), pageSize,
-                slice.hasCursor(), slice.getPropertyName() );
+        IndexScanner scanner = new IndexBucketScanner( 
+                cass,
+                indexBucketLocator,
+                ENTITY_INDEX,
+                applicationId,
+                IndexBucketLocator.IndexType.CONNECTION,
+                keyPrefix,
+                range[0],
+                range[1],
+                slice.isReversed(),
+                pageSize,
+                slice.hasCursor(),
+                slice.getPropertyName() );
 
         return scanner;
     }


[02/12] git commit: fixes to push test

Posted by sn...@apache.org.
fixes to push test


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/c9d6b7e1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/c9d6b7e1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/c9d6b7e1

Branch: refs/heads/two-dot-o-events
Commit: c9d6b7e1168125e7ecf3e85b67c82eb25d7b120d
Parents: fc3c42c
Author: amuramoto <am...@apigee.com>
Authored: Mon Oct 27 10:08:01 2014 -0700
Committer: amuramoto <am...@apigee.com>
Committed: Mon Oct 27 10:08:01 2014 -0700

----------------------------------------------------------------------
 stack/loadtests/README.md                       |  53 -----
 stack/loadtests/gatling/LICENSE                 | 202 -------------------
 stack/loadtests/gatling/conf/application.conf   |  21 --
 stack/loadtests/gatling/conf/gatling.conf       | 162 ---------------
 stack/loadtests/gatling/conf/logback.xml        |  35 ----
 stack/loadtests/gatling/conf/recorder.conf      |  51 -----
 .../gatling/lib/Saxon-HE-9.5.1-6-compressed.jar | Bin 3813075 -> 0 bytes
 .../gatling/lib/akka-actor_2.10-2.3.6.jar       | Bin 2583959 -> 0 bytes
 .../lib/async-http-client-1.9.0-BETA13.jar      | Bin 579954 -> 0 bytes
 stack/loadtests/gatling/lib/boon-0.26.jar       | Bin 1026950 -> 0 bytes
 .../loadtests/gatling/lib/commons-pool-1.6.jar  | Bin 111119 -> 0 bytes
 .../lib/compiler-interface-0.13.5-sources.jar   | Bin 30056 -> 0 bytes
 .../lib/concurrentlinkedhashmap-lru-1.4.jar     | Bin 116575 -> 0 bytes
 stack/loadtests/gatling/lib/config-1.2.1.jar    | Bin 219554 -> 0 bytes
 .../gatling/lib/fastring_2.10-0.2.4.jar         | Bin 98640 -> 0 bytes
 .../gatling/lib/gatling-app-2.0.0-RC5.jar       | Bin 73052 -> 0 bytes
 .../gatling/lib/gatling-charts-2.0.0-RC5.jar    | Bin 500609 -> 0 bytes
 .../lib/gatling-charts-highcharts-2.0.0-RC5.jar | Bin 214683 -> 0 bytes
 .../gatling/lib/gatling-core-2.0.0-RC5.jar      | Bin 1678475 -> 0 bytes
 .../gatling/lib/gatling-http-2.0.0-RC5.jar      | Bin 1222752 -> 0 bytes
 .../gatling/lib/gatling-jdbc-2.0.0-RC5.jar      | Bin 41648 -> 0 bytes
 .../gatling/lib/gatling-jms-2.0.0-RC5.jar       | Bin 174279 -> 0 bytes
 .../gatling/lib/gatling-metrics-2.0.0-RC5.jar   | Bin 72446 -> 0 bytes
 .../gatling/lib/gatling-recorder-2.0.0-RC5.jar  | Bin 815471 -> 0 bytes
 .../gatling/lib/gatling-redis-2.0.0-RC5.jar     | Bin 19970 -> 0 bytes
 .../gatling/lib/geronimo-jms_1.1_spec-1.1.1.jar | Bin 32359 -> 0 bytes
 .../gatling/lib/incremental-compiler-0.13.5.jar | Bin 2214694 -> 0 bytes
 .../gatling/lib/jackson-annotations-2.4.0.jar   | Bin 38605 -> 0 bytes
 .../gatling/lib/jackson-core-2.4.2.jar          | Bin 225316 -> 0 bytes
 .../gatling/lib/jackson-databind-2.4.2.jar      | Bin 1075759 -> 0 bytes
 stack/loadtests/gatling/lib/jodd-core-3.6.jar   | Bin 373882 -> 0 bytes
 .../loadtests/gatling/lib/jodd-lagarto-3.6.jar  | Bin 204738 -> 0 bytes
 stack/loadtests/gatling/lib/jodd-log-3.6.jar    | Bin 14547 -> 0 bytes
 .../gatling/lib/jsonpath_2.10-0.5.0.jar         | Bin 180090 -> 0 bytes
 stack/loadtests/gatling/lib/jzlib-1.1.3.jar     | Bin 71976 -> 0 bytes
 .../gatling/lib/logback-classic-1.1.2.jar       | Bin 270750 -> 0 bytes
 .../gatling/lib/logback-core-1.1.2.jar          | Bin 427729 -> 0 bytes
 .../loadtests/gatling/lib/netty-3.9.4.Final.jar | Bin 1310154 -> 0 bytes
 stack/loadtests/gatling/lib/opencsv-2.3.jar     | Bin 19827 -> 0 bytes
 .../gatling/lib/redisclient_2.10-2.13.jar       | Bin 712616 -> 0 bytes
 .../gatling/lib/sbt-interface-0.13.5.jar        | Bin 52012 -> 0 bytes
 stack/loadtests/gatling/lib/scala-compiler.jar  | Bin 14445780 -> 0 bytes
 stack/loadtests/gatling/lib/scala-library.jar   | Bin 7126372 -> 0 bytes
 stack/loadtests/gatling/lib/scala-reflect.jar   | Bin 3203471 -> 0 bytes
 .../gatling/lib/scala-swing-2.10.4.jar          | Bin 707298 -> 0 bytes
 .../lib/scalalogging-slf4j_2.10-1.1.0.jar       | Bin 79003 -> 0 bytes
 .../loadtests/gatling/lib/scopt_2.10-3.2.0.jar  | Bin 122918 -> 0 bytes
 stack/loadtests/gatling/lib/slf4j-api-1.7.7.jar | Bin 29257 -> 0 bytes
 stack/loadtests/gatling/lib/t-digest-3.0.jar    | Bin 49754 -> 0 bytes
 stack/loadtests/gatling/lib/threetenbp-1.0.jar  | Bin 507797 -> 0 bytes
 .../gatling/lib/uncommons-maths-1.2.3.jar       | Bin 49923 -> 0 bytes
 stack/loadtests/gatling/lib/zinc-0.3.5.3.jar    | Bin 392810 -> 0 bytes
 stack/loadtests/gatling/scripts/gatling-ug.sh   |  49 -----
 .../gatling/user-files/data/search.csv          |   3 -
 .../gatling/user-files/request-bodies/.keep     |   0
 stack/loadtests/loadtest_setup.sh               |  45 -----
 .../data-generators/EntityDataGenerator.scala   |  57 ------
 .../data-generators/FeederGenerator.scala       | 101 ----------
 .../scenarios/ApplicationScenarios.scala        |  45 -----
 .../scenarios/ConnectionScenarios.scala         |  30 ---
 .../usergrid/scenarios/DeviceScenarios.scala    |  65 ------
 .../usergrid/scenarios/GeoScenarios.scala       |  43 ----
 .../scenarios/NotificationScenarios.scala       |  71 -------
 .../usergrid/scenarios/NotifierScenarios.scala  |  65 ------
 .../scenarios/OrganizationScenarios.scala       |  42 ----
 .../usergrid/scenarios/TokenScenarios.scala     |  59 ------
 .../usergrid/scenarios/UserScenarios.scala      |  50 -----
 .../org/apache/usergrid/settings/Headers.scala  |  43 ----
 .../org/apache/usergrid/settings/Settings.scala |  54 -----
 .../org/apache/usergrid/settings/Utils.scala    |  87 --------
 .../simulations/GetEntitySimulation.scala       |  41 ----
 .../simulations/PostDevicesSimulation.scala     |  42 ----
 .../simulations/PostUsersSimulation.scala       |  47 -----
 .../PushTargetDeviceSimulation.scala            |  53 -----
 .../simulations/PushTargetUserSimulation.scala  |  68 -------
 75 files changed, 1684 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/README.md
----------------------------------------------------------------------
diff --git a/stack/loadtests/README.md b/stack/loadtests/README.md
deleted file mode 100644
index 0c1774a..0000000
--- a/stack/loadtests/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-To make it easy for you to load test your instance of Usergrid, we have bundledin the Gatling load test tool, along with some pre-built tests of different functionality. To get started do the following:
-
-### Setting up Gatling
-1. Unzip loadtest.zip
-2. cd to the 'gatling' dir
-3. Run 'sh loadtest_setup.sh'. This will do the following:
-	- Add some handy options to gatling/bin/gatling.sh that will allow you to set certain test parameters using environment variables (more on this later)
-	- Run the PostUsersSimulation, which will load 5k users with geolocation data into a specified UG org/app. This is just to seed some data entities to make it easier to run some of the tests.
-4. Set the following environment variables:
-- GATLING_BASE_URL - Required. UG base url, e.g. http://api.usergrid.com/.
-- GATLING_ORG      - Required. UG organization name.
-- GATLING_APP      - Required. UG application name.
-
-- GATLING_NUMUSERS - Number of users in the simulation. Default is 100.
-- GATLING_DURATION - Duration of the simulation. Default is 300.
-- GATLING_RAMPTIME - Time period to inject the users over. Default is 0.
-- GATLING_THROTTLE - Requests per second the simulation to try to reach. Default is 50.
-
-- GATLING_NOTIFIER - Name of the notifier to use for PushNotificationSimulation.
-- GATLING_PROVIDER - Push notification provider that corresponds to the notifier, e.g. apple, google, etc.
-
-### Running load tests
-To run Gatling, do the following:
-1. Run 'gatling/bin/gatling.sh'
-2. Enter the number of the test you want to run from the list (see below for an explanation of each test)
-3. Optional. Set a identifier for the results of this run of the simulation
-4. Optional. Set a description for this run of the simulation
-
-### Viewing results
-Results of the test are output to the gatling/results. The output directory is shown once the test has successfully run. The location of the generated report is also shown.
-
-### Default tests
-The following default tests are available. Not that the GATLING_BASE_URL, GATLING_ORG, and GATLING_APP environment variables must be set before any tests can be run. Each test also requires certain additional env variables to be set.
-
-- PostUsersSimulation
-
-POSTs 5k entities with geolocation data to /users. Entities are named sequentially, i.e. user1, user2, etc.
-
-- GetEntitySimulation
-
-Performs simple GETs on the /users collection. You should run PostUsersSimulation or loadtest_Setup.sh first to load data into the collection.
-
-- PostDevicesSimulation
-
-POSTs a user-specified number of entities in the /devices collection. This is useful if you want to load test push notifications
-
-- PushTargetDeviceSimulation
-
-Creates users, devices, connects users with devices, then sends push notification to all user devices. To run this, you will need to do create a notifier, then set the GATLING_NOTIFIER environment variable to equal the name or UUID of the notifier. You'll also need to set GATLING_PROVIDER to match the provider in the notifier.
-
-- PushTargetDeviceSimulation
-
-Sends push notifications. To run this, you will need to do create a notifier, then set the GATLING_NOTIFIER environment variable to equal the name or UUID of the notifier. You'll also need to set GATLING_PROVIDER to match the provider in the notifier.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/LICENSE
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/LICENSE b/stack/loadtests/gatling/LICENSE
deleted file mode 100644
index a82aed2..0000000
--- a/stack/loadtests/gatling/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright 2012 eBusiness Information (Excilys Group)
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/conf/application.conf
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/conf/application.conf b/stack/loadtests/gatling/conf/application.conf
deleted file mode 100644
index 6bb275a..0000000
--- a/stack/loadtests/gatling/conf/application.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#               http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-akka {
-  actor {
-    default-dispatcher {
-      throughput = 20
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/conf/gatling.conf
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/conf/gatling.conf b/stack/loadtests/gatling/conf/gatling.conf
deleted file mode 100755
index ea8f754..0000000
--- a/stack/loadtests/gatling/conf/gatling.conf
+++ /dev/null
@@ -1,162 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#               http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#########################
-# Gatling Configuration #
-#########################
-
-# This file contains all the settings configurable for Gatling with their default values
-
-gatling {
-  core {
-    #outputDirectoryBaseName = "" # The prefix for each simulation result folder (then suffixed by the report generation timestamp)
-    #runDescription = ""          # The description for this simulation run, displayed in each report
-    #encoding = "utf-8"           # Encoding to use throughout Gatling for file and string manipulation
-    #simulationClass = ""         # The FQCN of the simulation to run (when used in conjunction with noReports, the simulation for which assertions will be validated)
-    #disableCompiler = false      # When set to true, skip compiling and load an already compiled simulation (used in conjunction with simulationClass)
-    #mute = false                 # When set to true, don't ask for simulation name nor run description (currently only used by Gatling SBT plugin)
-
-    extract {
-      regex {
-        #cacheMaxCapacity = 200 # Cache size for the compiled regexes, set to 0 to disable caching
-      }
-      xpath {
-        #cacheMaxCapacity = 200 # Cache size for the compiled XPath queries,  set to 0 to disable caching
-      }
-      jsonPath {
-        #cacheMaxCapacity = 200 # Cache size for the compiled jsonPath queries, set to 0 to disable caching
-        #preferJackson = false  # When set to true, prefer Jackson over Boon for JSON-related operations
-        jackson {
-          #allowComments = false           # Allow comments in JSON files
-          #allowUnquotedFieldNames = false # Allow unquoted JSON fields names
-          #allowSingleQuotes = false       # Allow single quoted JSON field names
-        }
-
-      }
-      css {
-        #cacheMaxCapacity = 200 # Cache size for the compiled CSS selectors queries,  set to 0 to disable caching
-      }
-    }
-
-    timeOut {
-      #simulation = 8640000 # Absolute timeout, in seconds, of a simulation
-    }
-    directory {
-      #data = user-files/data                    # Folder where user's data (e.g. files used by Feeders) is located
-      #requestBodies = user-files/request-bodies # Folder where request bodies are located
-      simulations = ../../simulations      # Folder where the bundle's simulations are located
-      #reportsOnly = ""                          # If set, name of report folder to look for in order to generate its report
-      #binaries = ""                             # If set, name of the folder where compiles classes are located
-      #results = results                         # Name of the folder where all reports folder are located
-    }
-    zinc {
-      #jvmArgs = "-Xss10M" # JVM args passed to Zinc (in charge of compiling Gatling Simulations)
-    }
-  }
-  charting {
-    #noReports = false       # When set to true, don't generate HTML reports
-    #maxPlotPerSeries = 1000 # Number of points per graph in Gatling reports
-    #accuracy = 10           # Accuracy, in milliseconds, of the report's stats
-    indicators {
-      #lowerBound = 800      # Lower bound for the requests' response time to track in the reports and the console summary
-      #higherBound = 1200    # Higher bound for the requests' response time to track in the reports and the console summary
-      #percentile1 = 95      # Value for the first percentile to track in the reports, the console summary and GraphiteDataWriter
-      #percentile2 = 99      # Value for the second percentile to track in the reports, the console summary and GraphiteDataWriter
-    }
-  }
-  http {
-    #elFileBodiesCacheMaxCapacity = 200        # Cache size for request body EL templates, set to 0 to disable
-    #rawFileBodiesCacheMaxCapacity = 200       # Cache size for request body Raw templates, set to 0 to disable
-    #fetchedCssCacheMaxCapacity = 200          # Cache size for CSS parsed content, set to 0 to disable
-    #fetchedHtmlCacheMaxCapacity = 200         # Cache size for HTML parsed content, set to 0 to disable
-    #redirectPerUserCacheMaxCapacity = 200     # Per virtual user cache size for permanent redirects, set to 0 to disable
-    #expirePerUserCacheMaxCapacity = 200       # Per virtual user cache size for permanent 'Expire' headers, set to 0 to disable
-    #lastModifiedPerUserCacheMaxCapacity = 200 # Per virtual user cache size for permanent 'Last-Modified' headers, set to 0 to disable
-    #etagPerUserCacheMaxCapacity = 200         # Per virtual user cache size for permanent ETag headers, set to 0 to disable
-    #warmUpUrl = "http://goo.gl/xUrsE"         # The URL to use to warm-up the HTTP stack (blank means disabled)
-    ssl {
-      trustStore {
-        #type = ""      # Type of SSLContext's TrustManagers store
-        #file = ""      # Location of SSLContext's TrustManagers store
-        #password = ""  # Password for SSLContext's TrustManagers store
-        #algorithm = "" # Algorithm used by SSLContext's TrustManagers store
-      }
-      keyStore {
-        #type = ""      # Type of SSLContext's KeyManagers store
-        #file = ""      # Location of SSLContext's KeyManagers store
-        #password = ""  # Password for SSLContext's KeyManagers store
-        #algorithm = "" # Algorithm used SSLContext's KeyManagers store
-      }
-    }
-    ahc {
-      #allowPoolingConnections = true             # Allow pooling HTTP connections (keep-alive header automatically added)
-      #allowPoolingSslConnections = true          # Allow pooling HTTPS connections (keep-alive header automatically added)
-      #compressionEnabled = true                  # Support gzipped responses
-      #connectionTimeout = 60000                  # Timeout when establishing a connection
-      #pooledConnectionIdleTimeout = 60000        # Timeout when a connection stays unused in the pool
-      #readTimeout = 60000                        # Timeout when a used connection stays idle
-      #connectionTTL = -1                         # Max duration a connection can stay open (-1 means no limit)
-      #ioThreadMultiplier = 2                     # Number of Netty worker threads per core
-      #maxConnectionsPerHost = -1                 # Max number of connections per host (-1 means no limit)
-      #maxConnections = -1                        # Max number of connections (-1 means no limit)
-      #maxRetry = 4                               # Number of times that a request should be tried again
-      #requestTimeout = 60000                     # Timeout of the requests
-      #useProxyProperties = false                 # When set to true, supports standard Proxy System properties
-      #webSocketTimeout = 60000                   # Timeout when a used websocket connection stays idle
-      #useRelativeURIsWithConnectProxies = true   # When set to true, use relative URIs when talking with an SSL proxy or a WebSocket proxy
-      #acceptAnyCertificate = true                # When set to true, doesn't validate SSL certificates
-      #httpClientCodecMaxInitialLineLength = 4096 # Maximum length of the initial line of the response (e.g. "HTTP/1.0 200 OK")
-      #httpClientCodecMaxHeaderSize = 8192        # Maximum size, in bytes, of each request's headers
-      #httpClientCodecMaxChunkSize = 8192         # Maximum length of the content or each chunk
-    }
-  }
-  data {
-    #writers = "console, file" # The lists of DataWriters to which Gatling write simulation data (currently supported : "console", "file", "graphite", "jdbc")
-    #reader = file             # The DataReader used by the charting engine for reading simulation results
-    console {
-      #light = false           # When set to true, displays a light version without detailed request stats
-    }
-    file {
-      #bufferSize = 8192       # FileDataWriter's internal data buffer size, in bytes
-    }
-    jdbc {
-      db {
-        #url = "jdbc:mysql://localhost:3306/temp" # The JDBC URL used by the JDBC DataWriter
-        #username = "root"                        # The database user used by the JDBC DataWriter
-        #password = "123123q"                     # The password for the specified user
-      }
-      #bufferSize = 20                            # The size for each batch of SQL inserts to send to the database
-      create {
-        #createRunRecordTable = "CREATE TABLE IF NOT EXISTS `RunRecords` ( `id` INT NOT NULL AUTO_INCREMENT , `runDate` DATETIME NULL , `simulationId` VARCHAR(45) NULL , `runDescription` VARCHAR(45) NULL , PRIMARY KEY (`id`) )"
-        #createRequestRecordTable = "CREATE TABLE IF NOT EXISTS `RequestRecords` (`id` int(11) NOT NULL AUTO_INCREMENT, `runId` int DEFAULT NULL, `scenario` varchar(45) DEFAULT NULL, `userId` VARCHAR(20) NULL, `name` varchar(50) DEFAULT NULL, `requestStartDate` bigint DEFAULT NULL, `requestEndDate` bigint DEFAULT NULL, `responseStartDate` bigint DEFAULT NULL, `responseEndDate` bigint DEFAULT NULL, `status` varchar(2) DEFAULT NULL, `message` varchar(4500) DEFAULT NULL, `responseTime` bigint DEFAULT NULL, PRIMARY KEY (`id`) )"
-        #createScenarioRecordTable = "CREATE TABLE IF NOT EXISTS `ScenarioRecords` (`id` int(11) NOT NULL AUTO_INCREMENT, `runId` int DEFAULT NULL, `scenarioName` varchar(45) DEFAULT NULL, `userId` VARCHAR(20) NULL, `event` varchar(50) DEFAULT NULL, `startDate` bigint DEFAULT NULL, `endDate` bigint DEFAULT NULL, PRIMARY KEY (`id`) )"
-        #createGroupRecordTable = "CREATE TABLE IF NOT EXISTS `GroupRecords` (`id` int(11) NOT NULL AUTO_INCREMENT, `runId` int DEFAULT NULL, `scenarioName` varchar(45) DEFAULT NULL, `userId` VARCHAR(20) NULL, `entryDate` bigint DEFAULT NULL, `exitDate` bigint DEFAULT NULL, `status` varchar(2) DEFAULT NULL, PRIMARY KEY (`id`) )"
-      }
-      insert {
-        #insertRunRecord = "INSERT INTO RunRecords (runDate, simulationId, runDescription) VALUES (?,?,?)"
-        #insertRequestRecord = "INSERT INTO RequestRecords (runId, scenario, userId, name, requestStartDate, requestEndDate, responseStartDate, responseEndDate, status, message, responseTime) VALUES (?,?,?,?,?,?,?,?,?,?,?)"
-        #insertScenarioRecord = "INSERT INTO ScenarioRecords (runId, scenarioName, userId, event, startDate, endDate) VALUES (?,?,?,?,?,?)"
-        #insertGroupRecord = "INSERT INTO GroupRecords (runId, scenarioName, userId, entryDate, exitDate, status) VALUES (?,?,?,?,?,?)"
-      }
-    }
-    graphite {
-      #light = false              # only send the all* stats
-      #host = "localhost"         # The host where the Carbon server is located
-      #port = 2003                # The port to which the Carbon server listens to
-      #protocol = "tcp"           # The protocol used to send data to Carbon (currently supported : "tcp", "udp")
-      #rootPathPrefix = "gatling" # The common prefix of all metrics sent to Graphite
-      #bufferSize = 8192          # GraphiteDataWriter's internal data buffer size, in bytes
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/conf/logback.xml
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/conf/logback.xml b/stack/loadtests/gatling/conf/logback.xml
deleted file mode 100644
index f859ad3..0000000
--- a/stack/loadtests/gatling/conf/logback.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<configuration>
-  <!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-        Unless required by applicable law or agreed to in writing, software
-        distributed under the License is distributed on an "AS IS" BASIS,
-        WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-        See the License for the specific language governing permissions and
-        limitations under the License.
-     -->
-	<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
-		<encoder>
-			<pattern>%d{HH:mm:ss.SSS} [%-5level] %logger{15} - %msg%n%rEx</pattern>
-			<immediateFlush>false</immediateFlush>
-		</encoder>
-	</appender>
-
-	<!-- Uncomment for logging ALL HTTP request and responses -->
-	<!-- 	<logger name="io.gatling.http" level="TRACE" /> -->
-	<!-- Uncomment for logging ONLY FAILED HTTP request and responses -->
-	 	<logger name="io.gatling.http" level="DEBUG" /> 
-
-	<root level="WARN">
-		<appender-ref ref="CONSOLE" />
-	</root>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/conf/recorder.conf
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/conf/recorder.conf b/stack/loadtests/gatling/conf/recorder.conf
deleted file mode 100644
index c496068..0000000
--- a/stack/loadtests/gatling/conf/recorder.conf
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#               http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-recorder {
-  core {
-    #encoding = "utf-8"               # The encoding used for reading/writing request bodies and the generated simulation
-    #outputFolder = ""                # The folder where generated simulation will we written
-    #package = ""                     # The package's name of the generated simulation
-    #className = "RecordedSimulation" # The name of the generated Simulation class
-    #thresholdForPauseCreation = 100  # The minimum time, in milliseconds, that must pass between requests to trigger a pause creation
-    #saveConfig = false               # When set to true, the configuration from the Recorder GUI overwrites this configuration
-  }
-  filters {
-    #filterStrategy = "Disabled" # The selected filter resources filter strategy (currently supported : "Disabled", "BlackList", "WhiteList")
-    #whitelist = []              # The list of ressources patterns that are part of the Recorder's whitelist
-    #blacklist = []              # The list of ressources patterns that are part of the Recorder's blacklist
-  }
-  http {
-    #automaticReferer = true       # When set to false, write the referer + enable 'disableAutoReferer' in the generated simulation
-    #followRedirect = true         # When set to false, write redirect requests + enable 'disableFollowRedirect' in the generated simulation
-    #removeConditionalCache = true # When set to true, removes from the generated requests headers leading to request caching
-    #inferHtmlResources = true     # When set to true, add inferred resources + set 'inferHtmlResources' with the configured blacklist/whitelist in the generated simulation
-  }
-  proxy {
-    #port = 8000     # Local port used by Gatling's Proxy for HTTP/HTTPS
-    outgoing {
-      #host = ""     # The outgoing proxy's hostname
-      #username = "" # The username to use to connect to the outgoing proxy
-      #password = "" # The password corresponding to the user to use to connect to the outgoing proxy
-      #port = 0      # The HTTP port to use to connect to the outgoing proxy
-      #sslPort = 0   # If set, The HTTPS port to use to connect to the outgoing proxy
-    }
-  }
-  netty {
-    #maxInitialLineLength = 10000 # Maximum length of the initial line of the response (e.g. "HTTP/1.0 200 OK")
-    #maxHeaderSize = 20000        # Maximum size, in bytes, of each request's headers
-    #maxChunkSize = 8192          # Maximum length of the content or each chunk
-    #maxContentLength = 100000000 # Maximum length of the aggregated content of each response
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/Saxon-HE-9.5.1-6-compressed.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/Saxon-HE-9.5.1-6-compressed.jar b/stack/loadtests/gatling/lib/Saxon-HE-9.5.1-6-compressed.jar
deleted file mode 100644
index aed23e5..0000000
Binary files a/stack/loadtests/gatling/lib/Saxon-HE-9.5.1-6-compressed.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/akka-actor_2.10-2.3.6.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/akka-actor_2.10-2.3.6.jar b/stack/loadtests/gatling/lib/akka-actor_2.10-2.3.6.jar
deleted file mode 100644
index 4aa4afe..0000000
Binary files a/stack/loadtests/gatling/lib/akka-actor_2.10-2.3.6.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/async-http-client-1.9.0-BETA13.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/async-http-client-1.9.0-BETA13.jar b/stack/loadtests/gatling/lib/async-http-client-1.9.0-BETA13.jar
deleted file mode 100644
index 8865809..0000000
Binary files a/stack/loadtests/gatling/lib/async-http-client-1.9.0-BETA13.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/boon-0.26.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/boon-0.26.jar b/stack/loadtests/gatling/lib/boon-0.26.jar
deleted file mode 100644
index 99ae220..0000000
Binary files a/stack/loadtests/gatling/lib/boon-0.26.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/commons-pool-1.6.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/commons-pool-1.6.jar b/stack/loadtests/gatling/lib/commons-pool-1.6.jar
deleted file mode 100644
index 72ca75a..0000000
Binary files a/stack/loadtests/gatling/lib/commons-pool-1.6.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/compiler-interface-0.13.5-sources.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/compiler-interface-0.13.5-sources.jar b/stack/loadtests/gatling/lib/compiler-interface-0.13.5-sources.jar
deleted file mode 100644
index 7ea2783..0000000
Binary files a/stack/loadtests/gatling/lib/compiler-interface-0.13.5-sources.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/concurrentlinkedhashmap-lru-1.4.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/concurrentlinkedhashmap-lru-1.4.jar b/stack/loadtests/gatling/lib/concurrentlinkedhashmap-lru-1.4.jar
deleted file mode 100644
index 572b258..0000000
Binary files a/stack/loadtests/gatling/lib/concurrentlinkedhashmap-lru-1.4.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/config-1.2.1.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/config-1.2.1.jar b/stack/loadtests/gatling/lib/config-1.2.1.jar
deleted file mode 100644
index d2ed5a6..0000000
Binary files a/stack/loadtests/gatling/lib/config-1.2.1.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/fastring_2.10-0.2.4.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/fastring_2.10-0.2.4.jar b/stack/loadtests/gatling/lib/fastring_2.10-0.2.4.jar
deleted file mode 100644
index 8a81f03..0000000
Binary files a/stack/loadtests/gatling/lib/fastring_2.10-0.2.4.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-app-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-app-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-app-2.0.0-RC5.jar
deleted file mode 100644
index bd2c6bc..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-app-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-charts-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-charts-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-charts-2.0.0-RC5.jar
deleted file mode 100644
index 3347eca..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-charts-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-charts-highcharts-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-charts-highcharts-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-charts-highcharts-2.0.0-RC5.jar
deleted file mode 100644
index 92e9c54..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-charts-highcharts-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-core-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-core-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-core-2.0.0-RC5.jar
deleted file mode 100644
index ad03856..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-core-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-http-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-http-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-http-2.0.0-RC5.jar
deleted file mode 100644
index 7954dd6..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-http-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-jdbc-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-jdbc-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-jdbc-2.0.0-RC5.jar
deleted file mode 100644
index bad010c..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-jdbc-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-jms-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-jms-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-jms-2.0.0-RC5.jar
deleted file mode 100644
index e5e510b..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-jms-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-metrics-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-metrics-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-metrics-2.0.0-RC5.jar
deleted file mode 100644
index 56f73fb..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-metrics-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-recorder-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-recorder-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-recorder-2.0.0-RC5.jar
deleted file mode 100644
index cb1b02d..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-recorder-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/gatling-redis-2.0.0-RC5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/gatling-redis-2.0.0-RC5.jar b/stack/loadtests/gatling/lib/gatling-redis-2.0.0-RC5.jar
deleted file mode 100644
index 4410540..0000000
Binary files a/stack/loadtests/gatling/lib/gatling-redis-2.0.0-RC5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/geronimo-jms_1.1_spec-1.1.1.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/geronimo-jms_1.1_spec-1.1.1.jar b/stack/loadtests/gatling/lib/geronimo-jms_1.1_spec-1.1.1.jar
deleted file mode 100644
index 4f5e646..0000000
Binary files a/stack/loadtests/gatling/lib/geronimo-jms_1.1_spec-1.1.1.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/incremental-compiler-0.13.5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/incremental-compiler-0.13.5.jar b/stack/loadtests/gatling/lib/incremental-compiler-0.13.5.jar
deleted file mode 100644
index 16e0f26..0000000
Binary files a/stack/loadtests/gatling/lib/incremental-compiler-0.13.5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jackson-annotations-2.4.0.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jackson-annotations-2.4.0.jar b/stack/loadtests/gatling/lib/jackson-annotations-2.4.0.jar
deleted file mode 100644
index 0b55559..0000000
Binary files a/stack/loadtests/gatling/lib/jackson-annotations-2.4.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jackson-core-2.4.2.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jackson-core-2.4.2.jar b/stack/loadtests/gatling/lib/jackson-core-2.4.2.jar
deleted file mode 100644
index fad6f9b..0000000
Binary files a/stack/loadtests/gatling/lib/jackson-core-2.4.2.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jackson-databind-2.4.2.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jackson-databind-2.4.2.jar b/stack/loadtests/gatling/lib/jackson-databind-2.4.2.jar
deleted file mode 100644
index ea95c53..0000000
Binary files a/stack/loadtests/gatling/lib/jackson-databind-2.4.2.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jodd-core-3.6.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jodd-core-3.6.jar b/stack/loadtests/gatling/lib/jodd-core-3.6.jar
deleted file mode 100644
index 520caa9..0000000
Binary files a/stack/loadtests/gatling/lib/jodd-core-3.6.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jodd-lagarto-3.6.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jodd-lagarto-3.6.jar b/stack/loadtests/gatling/lib/jodd-lagarto-3.6.jar
deleted file mode 100644
index 1cf8b20..0000000
Binary files a/stack/loadtests/gatling/lib/jodd-lagarto-3.6.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jodd-log-3.6.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jodd-log-3.6.jar b/stack/loadtests/gatling/lib/jodd-log-3.6.jar
deleted file mode 100644
index a5eef88..0000000
Binary files a/stack/loadtests/gatling/lib/jodd-log-3.6.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jsonpath_2.10-0.5.0.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jsonpath_2.10-0.5.0.jar b/stack/loadtests/gatling/lib/jsonpath_2.10-0.5.0.jar
deleted file mode 100644
index 0804afc..0000000
Binary files a/stack/loadtests/gatling/lib/jsonpath_2.10-0.5.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/jzlib-1.1.3.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/jzlib-1.1.3.jar b/stack/loadtests/gatling/lib/jzlib-1.1.3.jar
deleted file mode 100644
index 2fa60b1..0000000
Binary files a/stack/loadtests/gatling/lib/jzlib-1.1.3.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/logback-classic-1.1.2.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/logback-classic-1.1.2.jar b/stack/loadtests/gatling/lib/logback-classic-1.1.2.jar
deleted file mode 100644
index 9230b2a..0000000
Binary files a/stack/loadtests/gatling/lib/logback-classic-1.1.2.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/logback-core-1.1.2.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/logback-core-1.1.2.jar b/stack/loadtests/gatling/lib/logback-core-1.1.2.jar
deleted file mode 100644
index 391da64..0000000
Binary files a/stack/loadtests/gatling/lib/logback-core-1.1.2.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/netty-3.9.4.Final.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/netty-3.9.4.Final.jar b/stack/loadtests/gatling/lib/netty-3.9.4.Final.jar
deleted file mode 100644
index 7ee6ba4..0000000
Binary files a/stack/loadtests/gatling/lib/netty-3.9.4.Final.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/opencsv-2.3.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/opencsv-2.3.jar b/stack/loadtests/gatling/lib/opencsv-2.3.jar
deleted file mode 100644
index 01f82ca..0000000
Binary files a/stack/loadtests/gatling/lib/opencsv-2.3.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/redisclient_2.10-2.13.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/redisclient_2.10-2.13.jar b/stack/loadtests/gatling/lib/redisclient_2.10-2.13.jar
deleted file mode 100644
index 38bb671..0000000
Binary files a/stack/loadtests/gatling/lib/redisclient_2.10-2.13.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/sbt-interface-0.13.5.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/sbt-interface-0.13.5.jar b/stack/loadtests/gatling/lib/sbt-interface-0.13.5.jar
deleted file mode 100644
index d95ec3a..0000000
Binary files a/stack/loadtests/gatling/lib/sbt-interface-0.13.5.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/scala-compiler.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/scala-compiler.jar b/stack/loadtests/gatling/lib/scala-compiler.jar
deleted file mode 100644
index 5e769de..0000000
Binary files a/stack/loadtests/gatling/lib/scala-compiler.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/scala-library.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/scala-library.jar b/stack/loadtests/gatling/lib/scala-library.jar
deleted file mode 100644
index 278a51e..0000000
Binary files a/stack/loadtests/gatling/lib/scala-library.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/scala-reflect.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/scala-reflect.jar b/stack/loadtests/gatling/lib/scala-reflect.jar
deleted file mode 100644
index 40e8156..0000000
Binary files a/stack/loadtests/gatling/lib/scala-reflect.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/scala-swing-2.10.4.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/scala-swing-2.10.4.jar b/stack/loadtests/gatling/lib/scala-swing-2.10.4.jar
deleted file mode 100644
index 9c43b92..0000000
Binary files a/stack/loadtests/gatling/lib/scala-swing-2.10.4.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/scalalogging-slf4j_2.10-1.1.0.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/scalalogging-slf4j_2.10-1.1.0.jar b/stack/loadtests/gatling/lib/scalalogging-slf4j_2.10-1.1.0.jar
deleted file mode 100644
index beaa38e..0000000
Binary files a/stack/loadtests/gatling/lib/scalalogging-slf4j_2.10-1.1.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/scopt_2.10-3.2.0.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/scopt_2.10-3.2.0.jar b/stack/loadtests/gatling/lib/scopt_2.10-3.2.0.jar
deleted file mode 100644
index 6fe0f76..0000000
Binary files a/stack/loadtests/gatling/lib/scopt_2.10-3.2.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/slf4j-api-1.7.7.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/slf4j-api-1.7.7.jar b/stack/loadtests/gatling/lib/slf4j-api-1.7.7.jar
deleted file mode 100644
index bebabd9..0000000
Binary files a/stack/loadtests/gatling/lib/slf4j-api-1.7.7.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/t-digest-3.0.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/t-digest-3.0.jar b/stack/loadtests/gatling/lib/t-digest-3.0.jar
deleted file mode 100644
index cfb29bc..0000000
Binary files a/stack/loadtests/gatling/lib/t-digest-3.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/threetenbp-1.0.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/threetenbp-1.0.jar b/stack/loadtests/gatling/lib/threetenbp-1.0.jar
deleted file mode 100644
index 020c227..0000000
Binary files a/stack/loadtests/gatling/lib/threetenbp-1.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/uncommons-maths-1.2.3.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/uncommons-maths-1.2.3.jar b/stack/loadtests/gatling/lib/uncommons-maths-1.2.3.jar
deleted file mode 100644
index 098d6ec..0000000
Binary files a/stack/loadtests/gatling/lib/uncommons-maths-1.2.3.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/lib/zinc-0.3.5.3.jar
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/lib/zinc-0.3.5.3.jar b/stack/loadtests/gatling/lib/zinc-0.3.5.3.jar
deleted file mode 100644
index bde5266..0000000
Binary files a/stack/loadtests/gatling/lib/zinc-0.3.5.3.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/scripts/gatling-ug.sh
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/scripts/gatling-ug.sh b/stack/loadtests/gatling/scripts/gatling-ug.sh
deleted file mode 100755
index 67f1392..0000000
--- a/stack/loadtests/gatling/scripts/gatling-ug.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/sh
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#               http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-die () {
-    echo >&2 "$@"
-    exit 1
-}
-
-[ "$#" -eq 5 ] || die "5 arguments required, $# provided.  Arguments are URL ORG APP NUM_USERS RAMP_TIME"
-
-OLDDIR=`pwd`
-BIN_DIR=`dirname $0`
-cd "${BIN_DIR}/.." && DEFAULT_GATLING_HOME=`pwd` && cd "${OLDDIR}"
-
-GATLING_HOME="${GATLING_HOME:=${DEFAULT_GATLING_HOME}}"
-GATLING_CONF="${GATLING_CONF:=$GATLING_HOME/conf}"
-URL="$1"
-ORG="$2"
-APP="$3"
-USERS="$4"
-RAMP="$5"
-
-#Shift off our first operation
-shift 5
-
-export GATLING_HOME GATLING_CONF
-
-echo "GATLING_HOME is set to ${GATLING_HOME}"
-
-curl -X POST "${URL}/usergrid/sandbox/notifiers" -d '{"name":"notifier82e05787a8c24361a2992c64436b6e6a","provider":"noop"}'
-
-JAVA_OPTS="-Dthrottle=3000 -Dduration=300 -Dorg=${ORG} -Dbaseurl=${URL} -Dnotifier=notifier82e05787a8c24361a2992c64436b6e6a -DnumEntities=10000 -DnumUsers=${USERS} -DrampTime=${RAMP} -Dapp=${APP} -server -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -Xms512M -Xmx512M -Xmn100M -XX:+HeapDumpOnOutOfMemoryError -XX:+AggressiveOpts -XX:+OptimizeStringConcat -XX:+UseFastAccessorMethods -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -Djava.net.preferIPv4Stack=true -Djava.net.preferIPv6Addresses=false ${JAVA_OPTS}"
-
-echo $JAVA_OPTS
-
-CLASSPATH="$GATLING_HOME/lib/*:$GATLING_CONF:$GATLING_HOME/user-files:${JAVA_CLASSPATH}"
-
-java $JAVA_OPTS -cp "$CLASSPATH" io.gatling.app.Gatling "$@"

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/user-files/data/search.csv
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/user-files/data/search.csv b/stack/loadtests/gatling/user-files/data/search.csv
deleted file mode 100644
index fdeab9e..0000000
--- a/stack/loadtests/gatling/user-files/data/search.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-searchCriterion,searchComputerName
-Macbook,MacBook Pro
-eee,ASUS Eee PC 1005PE
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/gatling/user-files/request-bodies/.keep
----------------------------------------------------------------------
diff --git a/stack/loadtests/gatling/user-files/request-bodies/.keep b/stack/loadtests/gatling/user-files/request-bodies/.keep
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/loadtest_setup.sh
----------------------------------------------------------------------
diff --git a/stack/loadtests/loadtest_setup.sh b/stack/loadtests/loadtest_setup.sh
deleted file mode 100644
index 3c5cf58..0000000
--- a/stack/loadtests/loadtest_setup.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#!/bin/bash -x
-check=`grep "DnumUsers" gatling/bin/gatling.sh`
-if [[ $check == "" ]]
-then 
-sed -i.bak 's/JAVA_OPTS="/JAVA_OPTS="-Dthrottle=${GATLING_THROTTLE} -DnumUsers=${GATLING_NUMUSERS} -DrampTime=${GATLING_RAMPTIME} -Dduration=${GATLING_DURATION} -DnumEntities=${GATLING_NUMENTITIES} -Dbaseurl=${GATLING_BASE_URL} -Dorg=${GATLING_ORG} -Dapp=${GATLING_APP} -Dnotifier=${GATLING_NOTIFIER} -Dprovider=${GATLING_PROVIDER} /g' gatling/bin/gatling.sh
-fi
-GATLING_NUMUSERS=5000
-GATLING_RAMPTIME=300
-echo "Enter base url for target server, e.g. http://api.usergrid.com/ (note the trailing slash)"
-read GATLING_BASE_URL
-echo "Enter org name"
-read GATLING_ORG
-echo "Enter app name"
-read GATLING_APP
-echo "Running simulation to load 5k users with geolocation data into /users collection. This will take ~5 minutes."
-echo -e "2\n\n\n" | gatling/bin/gatling.sh
-echo "Finished loading data into /users collection"
-echo 'All done! To get started, set these environment variables:
-
-GATLING_BASE_URL - Required. UG base url, e.g. http://api.usergrid.com/.
-GATLING_ORG      - Required. UG organization name.
-GATLING_APP      - Required. UG application name.
-
-GATLING_NUMUSERS - Number of users in the simulation. Default is 100.
-GATLING_DURATION - Duration of the simulation. Default is 300.
-GATLING_RAMPTIME - Time period to inject the users over. Default is 0.
-GATLING_THROTTLE - Requests per second the simulation to try to reach. Default is 50.
-
-GATLING_NOTIFIER - Name of the notifier to use for PushNotificationSimulation.
-GATLING_PROVIDER - Push notification provider that corresponds to the notifier, e.g. apple, google, etc.'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/EntityDataGenerator.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/EntityDataGenerator.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/EntityDataGenerator.scala
deleted file mode 100755
index b1a7a90..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/EntityDataGenerator.scala
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import scala.collection.mutable.ArrayBuffer
-
-object EntityDataGenerator {
-
-  def generateBlockUserLists(numUsers: Int): Map[String, String] = {
-
-    var blocks: ArrayBuffer[String] = new ArrayBuffer[String]
-    var blockedBy: ArrayBuffer[String] = new ArrayBuffer[String]
-
-    for (numBlock <- 1 to Utils.generateRandomInt(1, 7)) {
-      blocks += "user".concat(Utils.generateRandomInt(1, numUsers).toString)
-    }
-
-    for (numBlockedBy <- 1 to Utils.generateRandomInt(1, 7)) {
-      blockedBy += "user".concat(Utils.generateRandomInt(1, numUsers).toString)
-    }
-
-    return Map("blocks" -> blocks.toArray.mkString(","), "blockedBy" -> blockedBy.toArray.mkString(","))
-
-  }
-
-  def generateUser(userId: Int): Map[String,String] = {
-
-    return Map("username" -> "user".concat(userId.toString),
-      "profileId" -> Utils.generateRandomInt(10000, 1000000).toString,
-      "displayName" -> Utils.generateRandomInt(10000, 1000000).toString,
-      "showAge" -> Utils.generateRandomInt(0, 1).toString,
-      "ethnicity" -> Utils.generateRandomInt(1, 15).toString,
-      "relationshipStatus" -> Utils.generateRandomInt(1, 4).toString,
-      "headline" -> "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
-      "aboutMe" -> "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
-      "age" -> Utils.generateRandomInt(18, 65).toString,
-      "height" -> Utils.generateRandomInt(48, 84).toString,
-      "weight" -> Utils.generateRandomInt(120, 350).toString,
-      "seen" -> Utils.generateRandomInt(50, 100000).toString
-    )
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/FeederGenerator.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/FeederGenerator.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/FeederGenerator.scala
deleted file mode 100755
index ffe324c..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/data-generators/FeederGenerator.scala
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import scala.collection.mutable.ArrayBuffer
-
-object FeederGenerator {
-
-  def generateUserWithGeolocationFeeder(numUsers: Int, radius: Double, centerLatitude: Double, centerLongitude: Double): Array[Map[String, String]] = {
-    var userArray: ArrayBuffer[Map[String, String]] = new ArrayBuffer[Map[String, String]]
-    for (userCount <- 1 to numUsers) {
-      var user: Map[String, String] = EntityDataGenerator.generateUser(userCount)
-      var geolocation: Map[String, String] = Utils.generateRandomGeolocation(radius, centerLatitude, centerLongitude)
-      var blockLists: Map[String, String] = EntityDataGenerator.generateBlockUserLists(numUsers)
-
-      user = user ++ geolocation ++ blockLists
-
-      userArray += user
-    }
-    return userArray.toArray
-  }
-
-  def generateGeolocationFeeder(radius: Double, centerLatitude: Double, centerLongitude: Double): Feeder[String] = {
-
-    val geolocationFeeder = new Feeder[String] {
-
-      // always return true as this feeder can be polled infinitively
-      override def hasNext = true
-
-      override def next: Map[String, String] = {
-        var geolocation: Map[String, String] = Utils.generateRandomGeolocation(radius, centerLatitude, centerLongitude)
-        Map("latitude" -> geolocation("latitude"), "longitude" -> geolocation("longitude"))
-      }
-    }
-
-    return geolocationFeeder
-
-  }
-
-  def generateGeolocationWithQueryFeeder(radius: Double, centerLatitude: Double, centerLongitude: Double): Feeder[String] = {
-
-    val geolocationFeeder = new Feeder[String] {
-
-      // always return true as this feeder can be polled infinitively
-      override def hasNext = true
-
-      override def next: Map[String, String] = {
-        var geolocation: Map[String, String] = Utils.generateRandomGeolocation(radius, centerLatitude, centerLongitude)
-        var queryParams = Utils.generateRandomQueryString
-        Map("latitude" -> geolocation("latitude"), "longitude" -> geolocation("longitude"), "queryParams" -> queryParams)
-      }
-    }
-
-    return geolocationFeeder
-
-  }
-
-  def generateUserConnectionFeeder(numUsers: Int): Feeder[String] = {
-
-    val userIdFeeder = new Feeder[String] {
-
-      // always return true as this feeder can be polled infinitively
-      override def hasNext = true
-
-      override def next: Map[String, String] = {
-        Map("user1" -> "user".concat(Utils.generateRandomInt(1, numUsers).toString), "user2" -> "user".concat(Utils.generateRandomInt(1, numUsers).toString))
-      }
-    }
-
-    return userIdFeeder
-
-  }
-
-  def generateEntityNameFeeder(prefix: String, numEntities: Int): Array[Map[String, String]] = {
-
-    var nameArray: ArrayBuffer[Map[String, String]] = new ArrayBuffer[Map[String, String]]
-
-    for (entityCount <- 1 to numEntities) {
-      nameArray += Map("entityName" -> prefix.concat(entityCount.toString))
-    }
-
-    return nameArray.toArray
-
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala
deleted file mode 100755
index f5f7901..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ApplicationScenarios.scala
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-
-/**
- * Performs organization registration
- *
- *
- * Expects:
- *
- * authToken The auth token to use when creating the application
- * orgName The organization name
- *
- * Produces:
- *
- * appName The name of the created application
- */
-object ApplicationScenarios {
-
-  val createApplication = exec(http("Create Application")
-    .post("/management/organizations/${org}/applications")
-    .headers(Headers.jsonAuthorized)
-    .body(StringBody("{\"name\":\"" + Settings.app + "\"}"))
-    .check(status.is(200))
-
-    )
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala
deleted file mode 100755
index ba2449f..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/ConnectionScenarios.scala
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-
-object ConnectionScenarios {
-
-  val postConnection = exec(
-    http("POST connection")
-      .post("/users/${user1}/likes/users/${user2}")
-      .check(status.is(200))
-  )
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala
deleted file mode 100755
index d8d8af0..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/DeviceScenarios.scala
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-
-/**
- *
- * Creates a new device
- *
- * Expects:
- *
- * authToken The auth token to use when creating the application
- * orgName The name of the org
- * appName The name of the app
- * notifierName The name of the created notifier
- *
- * Produces:
- *
- * deviceName the name of the device created
- *
- */
-object DeviceScenarios {
-
-  /**
-   * Create a device
-   */
-  val postDeviceWithNotifier = exec(http("Create device with notifier")
-    .post("/devices")
-    .body(StringBody("{\"name\":\"${entityName}\"," +
-      "\"deviceModel\":\"Fake Device\"," +
-      " \"deviceOSVerion\":\"Negative Version\", " +
-      "\"${notifier}.notifier.id\":\"${entityName}\"}"))
-    .check(status.is(200)))
-
-  val postDeviceWithNotifier400ok = exec(http("Create device with notifier")
-    .post("/devices")
-    .body(StringBody("{\"name\":\"${entityName}\"," +
-    "\"deviceModel\":\"Fake Device\"," +
-    " \"deviceOSVerion\":\"Negative Version\", " +
-    "\"${notifier}.notifier.id\":\"${entityName}\"}"))
-    .check(status.in(200 to 400)))
-
-  /**
-   * TODO: Add a device to a user, which would expect a user in the session
-   */
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/GeoScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/GeoScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/GeoScenarios.scala
deleted file mode 100755
index 94bf0af..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/GeoScenarios.scala
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-
-object GeoScenarios {
-
-  val getGeolocation = exec(
-      http("GET geolocated user")
-        .get("/users?ql=location%20within%20" + Settings.geosearchRadius + "%20of%20${latitude},${longitude}")
-        .check(status.is(200))
-    )
-
-  val getGeolocationWithQuery = exec(
-      http("GET geolocated user with query")
-        .get("/users?ql=${queryParams}%20AND%20location%20within%20" + Settings.geosearchRadius + "%20of%20${latitude},${longitude}")
-        .check(status.is(200))
-    )
-
-  val updateGeolocation = exec(
-    http("PUT user location")
-      .put("/users/user" + Utils.generateRandomInt(1, Settings.numUsers))
-      .body(StringBody("{\"location\":{\"latitude\":\"${latitude}\",\"longitude\":\"${longitude}\"}}"))
-      .check(status.is(200))
-  )
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
deleted file mode 100755
index 91d6754..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import java.io.File
-import java.nio.file.{Paths, Files}
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-import scala.io.Source
-
-/**
- *
- * Creates a new device
- *
- * Expects:
- *
- * authToken The auth token to use when creating the application
- * orgName The name of the org
- * appName The name of the app
- * notifierName The name of the created notifier
- * deviceName the name of the device created to send the notification to
- *
- * Produces:
- *
- * N/A
- *
- *
- */
-object NotificationScenarios {
-
-
-  /**
-   * send the notification now
-   */
-  val sendNotification = exec(http("Send Single Notification")
-      .post("/devices/${entityName}/notifications")
-      .body(StringBody("{\"payloads\":{\"${notifier}\":\"testmessage\"}}"))
-      .check(status.is(200))
-    )
-
-  val sendNotificationToUser= exec(http("Send Notification to All Devices")
-    .post("/users/${user}/notifications")
-    .body(StringBody("{\"payloads\":{\"${notifier}\":\"testmessage\"}}"))
-    .check(status.is(200))
-  )
-
-  /**
-   * TODO: Add posting to users, which would expect a user in the session
-   */
-
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/c9d6b7e1/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala b/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala
deleted file mode 100755
index 0c2fc0c..0000000
--- a/stack/loadtests/simulations/test/scala/org/apache/usergrid/scenarios/NotifierScenarios.scala
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.usergrid
-
-import io.gatling.core.Predef._
-import io.gatling.http.Predef._
-import scala.concurrent.duration._
-
-/**
- *
- * Creates a new no-op notifier
- *
- *
- * Expects:
- *
- * authToken The auth token to use when creating the application
- * orgName The name of the org
- * appName The name of the app
- *
- * Produces:
- *
- * notifierName The name of the created notifier
- *
- */
-object NotifierScenarios {
-  
-  val notifier = Settings.pushNotifier
-  val provider = Settings.pushProvider
-
-  /**
-   * Create a notifier
-   */
-  val createNotifier = exec(
-      session => {
-        session.set("notifier", notifier)
-        session.set("provider", provider)
-      }
-    )
-
-    .exec(http("Create Notifier")
-    .post("/notifiers")
-    .body(StringBody("{\"name\":\"${notifier}\",\"provider\":\"${provider}\"}"))
-    //remnants of trying to upload an apple certificate
-//    .param("name", "${notifierName}")
-//    .param("provider", "apple")
-//    .param("environment", "mock")
-//    .fileBody("p12Certificate", Map).fileBody(pkcs12Cert)
-    .check(status.is(200)))
-
-
-}


[04/12] git commit: Update README.md

Posted by sn...@apache.org.
Update README.md

Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/7947baed
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/7947baed
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/7947baed

Branch: refs/heads/two-dot-o-events
Commit: 7947baed7b3d62b2ebfe321546d8377bde0f0e49
Parents: 6149bf1
Author: amuramoto <am...@users.noreply.github.com>
Authored: Mon Oct 27 10:10:19 2014 -0700
Committer: amuramoto <am...@users.noreply.github.com>
Committed: Mon Oct 27 10:10:19 2014 -0700

----------------------------------------------------------------------
 stack/loadtests/README.md | 52 ------------------------------------------
 1 file changed, 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7947baed/stack/loadtests/README.md
----------------------------------------------------------------------
diff --git a/stack/loadtests/README.md b/stack/loadtests/README.md
index 0c1774a..8b13789 100644
--- a/stack/loadtests/README.md
+++ b/stack/loadtests/README.md
@@ -1,53 +1 @@
-To make it easy for you to load test your instance of Usergrid, we have bundledin the Gatling load test tool, along with some pre-built tests of different functionality. To get started do the following:
 
-### Setting up Gatling
-1. Unzip loadtest.zip
-2. cd to the 'gatling' dir
-3. Run 'sh loadtest_setup.sh'. This will do the following:
-	- Add some handy options to gatling/bin/gatling.sh that will allow you to set certain test parameters using environment variables (more on this later)
-	- Run the PostUsersSimulation, which will load 5k users with geolocation data into a specified UG org/app. This is just to seed some data entities to make it easier to run some of the tests.
-4. Set the following environment variables:
-- GATLING_BASE_URL - Required. UG base url, e.g. http://api.usergrid.com/.
-- GATLING_ORG      - Required. UG organization name.
-- GATLING_APP      - Required. UG application name.
-
-- GATLING_NUMUSERS - Number of users in the simulation. Default is 100.
-- GATLING_DURATION - Duration of the simulation. Default is 300.
-- GATLING_RAMPTIME - Time period to inject the users over. Default is 0.
-- GATLING_THROTTLE - Requests per second the simulation to try to reach. Default is 50.
-
-- GATLING_NOTIFIER - Name of the notifier to use for PushNotificationSimulation.
-- GATLING_PROVIDER - Push notification provider that corresponds to the notifier, e.g. apple, google, etc.
-
-### Running load tests
-To run Gatling, do the following:
-1. Run 'gatling/bin/gatling.sh'
-2. Enter the number of the test you want to run from the list (see below for an explanation of each test)
-3. Optional. Set a identifier for the results of this run of the simulation
-4. Optional. Set a description for this run of the simulation
-
-### Viewing results
-Results of the test are output to the gatling/results. The output directory is shown once the test has successfully run. The location of the generated report is also shown.
-
-### Default tests
-The following default tests are available. Not that the GATLING_BASE_URL, GATLING_ORG, and GATLING_APP environment variables must be set before any tests can be run. Each test also requires certain additional env variables to be set.
-
-- PostUsersSimulation
-
-POSTs 5k entities with geolocation data to /users. Entities are named sequentially, i.e. user1, user2, etc.
-
-- GetEntitySimulation
-
-Performs simple GETs on the /users collection. You should run PostUsersSimulation or loadtest_Setup.sh first to load data into the collection.
-
-- PostDevicesSimulation
-
-POSTs a user-specified number of entities in the /devices collection. This is useful if you want to load test push notifications
-
-- PushTargetDeviceSimulation
-
-Creates users, devices, connects users with devices, then sends push notification to all user devices. To run this, you will need to do create a notifier, then set the GATLING_NOTIFIER environment variable to equal the name or UUID of the notifier. You'll also need to set GATLING_PROVIDER to match the provider in the notifier.
-
-- PushTargetDeviceSimulation
-
-Sends push notifications. To run this, you will need to do create a notifier, then set the GATLING_NOTIFIER environment variable to equal the name or UUID of the notifier. You'll also need to set GATLING_PROVIDER to match the provider in the notifier.
\ No newline at end of file


[08/12] git commit: fix rat issue; remove debug

Posted by sn...@apache.org.
fix rat issue; remove debug


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/960ce6ba
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/960ce6ba
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/960ce6ba

Branch: refs/heads/two-dot-o-events
Commit: 960ce6baecf747797c461c1a1a52a980772bfb45
Parents: 34a6d12
Author: Shawn Feldman <sf...@apache.org>
Authored: Mon Oct 27 17:37:41 2014 -0600
Committer: Shawn Feldman <sf...@apache.org>
Committed: Mon Oct 27 17:37:41 2014 -0600

----------------------------------------------------------------------
 .../scenarios/NotificationScenarios.scala        |  2 +-
 stack/loadtests/src/test/resources/logback.xml   | 19 ++++++++++++++++++-
 2 files changed, 19 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/960ce6ba/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
index ab4d813..2ac8995 100755
--- a/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
+++ b/stack/loadtests/src/main/scala/org/apache/usergrid/scenarios/NotificationScenarios.scala
@@ -60,7 +60,7 @@ object NotificationScenarios {
 
   val sendNotificationToUser= exec(http("Send Notification to All Devices")
     .post("/users/${userId}/notifications")
-    .body(StringBody("{\"debug\":\"true\",\"payloads\":{\"" + notifier + "\":\"testmessage\"}}"))
+    .body(StringBody("{\"payloads\":{\"" + notifier + "\":\"testmessage\"}}"))
     .check(status.is(200))
   )
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/960ce6ba/stack/loadtests/src/test/resources/logback.xml
----------------------------------------------------------------------
diff --git a/stack/loadtests/src/test/resources/logback.xml b/stack/loadtests/src/test/resources/logback.xml
index f112f98..1d512d6 100644
--- a/stack/loadtests/src/test/resources/logback.xml
+++ b/stack/loadtests/src/test/resources/logback.xml
@@ -1,6 +1,23 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <configuration>
-
+<!--
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
 	<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
 		<encoder>
 			<pattern>%d{HH:mm:ss.SSS} [%-5level] %logger{15} - %msg%n%rEx</pattern>