You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@usergrid.apache.org by mr...@apache.org on 2016/08/01 16:54:01 UTC

[26/50] [abbrv] usergrid git commit: Initial checkin for Python Utilities and SDK

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py b/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py
new file mode 100644
index 0000000..37594d1
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py
@@ -0,0 +1,119 @@
+import argparse
+import json
+import datetime
+import os
+import time
+import sys
+
+import boto
+from boto import sqs
+
+### This monitors an SQS queue and measures the delta message count between polling intervals to infer the amount of time
+### remaining to fully drain the queue
+
+__author__ = 'Jeff West @ ApigeeCorporation'
+
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_time_remaining(count, rate):
+    if rate == 0:
+        return 'NaN'
+
+    seconds = count * 1.0 / rate
+
+    m, s = divmod(seconds, 60)
+    h, m = divmod(m, 60)
+
+    return "%d:%02d:%02d" % (h, m, s)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
+
+    parser.add_argument('-c', '--config',
+                        help='The queue to load into',
+                        type=str,
+                        default='%s/.usergrid/queue_monitor.json' % os.getenv("HOME"))
+
+    parser.add_argument('-q', '--queue_name',
+                        help='The queue name to send messages to.  If not specified the filename is used',
+                        default='entities',
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    print str(my_args)
+
+    return vars(my_args)
+
+
+def main():
+
+    args = parse_args()
+
+    queue_name = args.get('queue_name')
+
+    print 'queue_name=%s' % queue_name
+
+    start_time = datetime.datetime.utcnow()
+    first_start_time = start_time
+
+    print "first start: %s" % first_start_time
+
+    with open(args.get('config'), 'r') as f:
+        config = json.load(f)
+
+    sqs_config = config.get('sqs')
+    last_time = datetime.datetime.utcnow()
+
+    sqs_conn = boto.sqs.connect_to_region(**sqs_config)
+
+    queue = sqs_conn.get_queue(queue_name)
+
+    last_size = queue.count()
+    first_size = last_size
+
+    print 'Starting Size: %s' % last_size
+
+    sleep = 10
+    time.sleep(sleep)
+    rate_sum = 0
+    rate_count = 0
+
+    while True:
+        size = queue.count()
+        time_stop = datetime.datetime.utcnow()
+
+        time_delta = total_seconds(time_stop - last_time)
+        agg_time_delta = total_seconds(time_stop - first_start_time)
+        agg_size_delta = first_size - size
+        agg_messages_rate = 1.0 * agg_size_delta / agg_time_delta
+
+        size_delta = last_size - size
+        messages_rate = 1.0 * size_delta / time_delta
+        rate_sum += messages_rate
+        rate_count += 1
+
+        print '%s | %s | Size: %s | Processed: %s | Last: %s | Avg: %s | Count: %s | agg rate: %s | Remaining: %s' % (
+            datetime.datetime.utcnow(),
+            queue_name,
+            size, size_delta, round(messages_rate, 2),
+            round(rate_sum / rate_count, 2), rate_count,
+            round(agg_messages_rate, 2),
+            get_time_remaining(size, agg_messages_rate))
+
+        last_size = size
+        last_time = time_stop
+
+        time.sleep(sleep)
+
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/general/url_tester.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/general/url_tester.py b/utils/usergrid-util-python/usergrid_tools/general/url_tester.py
new file mode 100644
index 0000000..62755df
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/url_tester.py
@@ -0,0 +1,87 @@
+import datetime
+import time
+
+import numpy
+import requests
+
+## This will call a URL over and over to check the latency of the call
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+url_template = "{protocol}://{host}:{port}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}"
+
+environments = {
+
+    'local': {
+        'protocol': 'http',
+        'host': 'localhost',
+        'port': 8080,
+        'org': 'myOrg',
+        'app': 'myApp',
+        'collection': 'myEntities',
+        'ql': 'select *',
+        'client_id': '<<client_id>>',
+        'client_secret': '<<client_secret>>'
+    }
+}
+
+ENV = 'local'
+
+data = environments.get(ENV)
+if data is None:
+    print 'didn\'t find map entry for data'
+    exit(1)
+
+x = 0
+
+SLEEP = .5
+count_under_one = 0.0
+count_over = 0.0
+percent_under_one = 100.0
+total_time = 0
+
+print url_template.format(**data)
+
+response_times = []
+
+while True:
+    x += 1
+    target_url = url_template.format(**data)
+
+    r = requests.get(url=target_url)
+
+    response_time = total_milliseconds(r.elapsed)
+    total_time += response_time
+
+    # print '%s / %s' % (r.elapsed, total_milliseconds(r.elapsed))
+
+    the_date = datetime.datetime.utcnow()
+
+    if r.status_code != 200:
+        print '%s | %s: %s in %s |  %s' % (the_date, x, r.status_code, response_time, r.text)
+    else:
+        response_times.append(response_time)
+
+        if response_time < 2000:
+            count_under_one += 1
+        elif response_time > 10000:
+            count_over += 1
+
+        percent_under_one = round(100 * (count_under_one / x), 2)
+        percent_over = round(100 * (count_over / x), 2)
+
+        # print '%s | %s: %s in %s | Count: %s | Avg: %s | under 2s: %s / %s%% | over 10s: %s / %s%%' % (
+        # the_date, x, r.status_code, response_time, len(r.json().get('entities')), (total_time / x), count_under_one,
+        # percent_under_one, count_over, percent_over)
+
+        print '%s | %s: %s in %s | Count: %s | Avg: %s | 99th: %s | 90th: %s | 50th: %s | 75th: %s | 25th: %s' % (
+            the_date, x, r.status_code, response_time, r.json().get('count'), (total_time / x),
+
+            numpy.percentile(response_times, 99),
+            numpy.percentile(response_times, 90),
+            numpy.percentile(response_times, 75),
+            numpy.percentile(response_times, 50),
+            numpy.percentile(response_times, 25))
+
+    time.sleep(SLEEP)

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py b/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py
new file mode 100644
index 0000000..9b1484e
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py
@@ -0,0 +1,29 @@
+import json
+
+import requests
+
+### This will make the API calls to activate and confirm an array of users
+
+users = [
+    'user1@example.com',
+    'user2@example.com'
+]
+
+TOKEN = 'ABC123'
+URL = "http://localhost:8080/management/users/%s"
+
+s = requests.Session()
+s.headers.update({'authorization': 'Bearer %s' % TOKEN})
+
+for user in users:
+
+    r = s.put(URL % user, data=json.dumps({"activated": True}))
+    print 'Activated %s: %s' % (user, r.status_code)
+
+    if r.status_code != 200:
+        print r.text
+        continue
+
+    r = s.put(URL % user, data=json.dumps({"confirmed": True}))
+
+    print 'Confirmed %s: %s' % (user, r.status_code)

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/general/user_creator.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/general/user_creator.py b/utils/usergrid-util-python/usergrid_tools/general/user_creator.py
new file mode 100644
index 0000000..d21183d
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/user_creator.py
@@ -0,0 +1,49 @@
+import json
+import requests
+
+### This will create an array of org-level management users
+
+users = [
+    'me@example.com'
+]
+
+for user in users:
+
+    post_body = {
+        "username": user,
+        "name": user,
+        "email": user,
+        "password": "test12345"
+    }
+
+    print json.dumps(post_body)
+
+    r = requests.post('http://localhost:8080/management/organizations/asdf/users',
+                      headers={
+                          'Authorization': 'Bearer SADFSDF',
+                          'Content-Type': 'application/json'
+                      },
+                      data=json.dumps(post_body))
+
+    print r.status_code
+
+    print '%s: created (POST) [%s]: %s' % (user, r.status_code, r.text)
+
+    #
+    # r = requests.put('http://localhost:8080/management/users/%s' % user,
+    #                  headers={
+    #                      'Authorization': 'Bearer YWMtFlVrhK8nEeW-AhmxdmpAVAAAAVIYTHxTNSUxpQyUWZQ2LsZxcXSdNtO_lWo',
+    #                      'Content-Type': 'application/json'
+    #                  },
+    #                  data=json.dumps('{"confirmed": true}'))
+    #
+    # print '%s: confirmed: %s' % (user, r.status_code)
+    #
+    # r = requests.put('http://localhost:8080/management/users/%s' % user,
+    #                  headers={
+    #                      'Authorization': 'Bearer YWMtFlVrhK8nEeW-AhmxdmpAVAAAAVIYTHxTNSUxpQyUWZQ2LsZxcXSdNtO_lWo',
+    #                      'Content-Type': 'application/json'
+    #                  },
+    #                  data=json.dumps('{"activated": true}'))
+    #
+    # print '%s: activated: %s' % (user, r.status_code)

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/groups/__init__.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/groups/__init__.py b/utils/usergrid-util-python/usergrid_tools/groups/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py b/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py
new file mode 100644
index 0000000..268b7f8
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py
@@ -0,0 +1,86 @@
+import json
+import traceback
+from multiprocessing import Pool
+
+import datetime
+import urllib3
+
+import requests
+
+group_name = 'precisely-10k'
+users = 10000
+username_template = 'precisely-10k-%s'
+
+url_data = {
+    "api_url": "https://usergrid-e2e-prod.e2e.apigee.net/appservices-2-1/",
+    "org": "tempgrey",
+    "app": "sandbox",
+    "client_id": "",
+    "client_secret": "",
+
+}
+
+url_data = {
+    "api_url": "http://baas-ug002sr.apigee.net/",
+    "org": "apigee-vfmplus",
+    "app": "sandbox",
+    "client_id": "",
+    "client_secret": "",
+
+}
+
+collection_url_template = "{api_url}/{org}/{app}/{collection}"
+add_user_url_template = "{api_url}/{org}/{app}/groups/{group_name}/users/{uuid}"
+
+
+def create_group(name):
+    url = collection_url_template.format(collection='groups', **url_data)
+    print url
+    r = requests.post(url, data=json.dumps({"path": name, "name": name}))
+
+    if r.status_code not in [200, 400]:
+        print r.text
+        exit()
+
+
+def create_user(username):
+    url = collection_url_template.format(collection='users', **url_data)
+    r = requests.post(url, data=json.dumps({"username": username}))
+
+    if r.status_code not in [200, 400]:
+        print r.text
+        exit()
+
+    print 'Created user %s' % username
+
+
+def map_user(username):
+    try:
+        url = add_user_url_template.format(group_name=group_name, uuid=username, **url_data)
+        r = requests.post(url, data=json.dumps({"username": username}))
+
+        if r.status_code != 200:
+            print r.text
+            exit()
+
+        print 'Mapped user %s' % username
+    except:
+        print traceback.format_exc()
+
+
+user_names = [username_template % i for i in xrange(0, users)]
+
+pool = Pool(64)
+
+start = datetime.datetime.utcnow()
+pool.map(create_user, user_names)
+
+create_group(group_name)
+
+pool.map(map_user, user_names)
+
+finish = datetime.datetime.utcnow()
+
+td = finish - start
+
+print td

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/indexing/README.md
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/README.md b/utils/usergrid-util-python/usergrid_tools/indexing/README.md
new file mode 100644
index 0000000..e938c28
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/README.md
@@ -0,0 +1,22 @@
+# Usergrid Indexing Latency Tester
+
+
+# Overview
+
+Indexing of data (to Elasticsearch) in Usergrid is done asynchronously, while persistence (to Cassandra) is done synchronously within the context of an API call.  This means that you can immediately get your data back by UUID but if you use `GET /org/app/collection?ql=select * where field='value'` it is not instantly indexed.  The typical delay is ~25ms.
+
+The purpose of this tool is to test the indexing latency within Usergrid.
+
+```
+$ usergrid_index_test -h
+
+usage: usergrid_index_test [-h] -o ORG -a APP --base_url BASE_URL
+
+Usergrid Indexing Latency Test
+
+optional arguments:
+  -h, --help           show this help message and exit
+  -o ORG, --org ORG    Name of the org to perform the test in
+  -a APP, --app APP    Name of the app to perform the test in
+  --base_url BASE_URL  The URL of the Usergrid Instance
+```
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/indexing/__init__.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/__init__.py b/utils/usergrid-util-python/usergrid_tools/indexing/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py b/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py
new file mode 100644
index 0000000..6c910dd
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py
@@ -0,0 +1,340 @@
+# -*- coding: utf-8 -*-
+import json
+import logging
+import traceback
+from multiprocessing import Pool
+import datetime
+import socket
+
+import argparse
+import requests
+import time
+from logging.handlers import RotatingFileHandler
+
+import sys
+
+entity_template = {
+    "id": "replaced",
+    "dataType": "entitlements",
+    "mockData": [
+        {"importDate": "2015-08-25T23:33:57.124Z", "rowsImported": 2},
+        {"role": "line-owner", "route": "/master", "element": "element1", "entitlementId": "entitlement4",
+         "property": "show"},
+        {"role": "line-owner", "route": "/master", "element": "element2", "entitlementId": "entitlement8",
+         "property": "hide"}
+    ],
+    "nullArray1": [None],
+    "nullArray2": [None, None],
+    "nullArray3": [None, None],
+    "nest1": {
+        "nest2": {
+            "nest3": [None, None, 'foo']
+        }
+    }
+}
+
+entity_template = {
+    "type": "customerstatuses",
+    "created": 1454769737888,
+    "modified": 1454781811473,
+    "address": {
+        "zip": "35873",
+        "city": "m�laga",
+        "street": "3430 calle de bravo murillo",
+        "state": "melilla"
+    },
+    "DOB": "787264244",
+    "email": "bego�a.caballero29@example.com",
+    "firstName": "Bego�a",
+    "lastName": "Caballero",
+    "lastSeenDateTime": 1447737158857,
+    "locationStatus": "Entrance",
+    "loyaltyAccountNumber": "1234",
+    "loyaltyLevel": "basic",
+    "phone": "966-450-469",
+    "profilePictureUrl": "http://api.randomuser.me/portraits/thumb/women/61.jpg",
+    "status": "Entrance",
+    "storeId": 12121
+}
+
+url_template = '{api_url}/{org}/{app}/{collection}'
+token_url_template = '{api_url}/{org}/{app}/token'
+
+config = {}
+
+session = requests.Session()
+
+logger = logging.getLogger('UsergridBatchIndexTest')
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './usergrid_index_test.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=2048576000,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+def create_entity(work_item):
+    global config
+    try:
+        url = work_item[0]
+        entity = work_item[1]
+
+        # entity['name'] = datetime.datetime.now().strftime('name-%yx%mx%dx%Hx%Mx%S')
+
+        logger.info('creating entity [%s] at URL [%s]' % (entity.get('id'), url))
+
+        r = session.post(url, data=json.dumps(entity))
+
+        if r.status_code != 200:
+            logger.error('HTTP %s: %s' % (r.status_code, r.text))
+            print 'HTTP %s: %s' % (r.status_code, r.text)
+            return
+
+        entities = r.json().get('entities', [])
+        uuid = entities[0].get('uuid')
+
+        if r.status_code != 200:
+            logger.info('%s: %s' % (r.status_code, uuid))
+        else:
+            logger.info('Created entity UUID=[%s] at URL [%s]' % (uuid, url))
+
+        return uuid, entity
+
+    except Exception, e:
+        print traceback.format_exc(e)
+
+
+def test_multiple(number_of_entities):
+    global config
+
+    start = datetime.datetime.now()
+
+    logger.info('Creating %s entities w/ url=%s' % (number_of_entities, config['url']))
+    created_map = {}
+
+    work_items = []
+
+    for x in xrange(1, number_of_entities + 1):
+        entity = entity_template.copy()
+        entity['id'] = str(x)
+        work_items.append((config['url'], entity))
+
+    responses = processes.map(create_entity, work_items)
+
+    for res in responses:
+        if len(res) > 0:
+            created_map[res[0]] = res[1]
+
+    stop = datetime.datetime.now()
+
+    logger.info('Created [%s] entities in %s' % (number_of_entities, (stop - start)))
+
+    return created_map
+
+
+def wait_for_indexing(created_map, q_url, sleep_time=0.0):
+    logger.info('Waiting for indexing of [%s] entities...' % len(created_map))
+
+    count_missing = 100
+    start_time = datetime.datetime.now()
+
+    while count_missing > 0:
+
+        entity_map = {}
+        r = session.get(q_url)
+        res = r.json()
+        entities = res.get('entities', [])
+
+        now_time = datetime.datetime.now()
+        elapsed = now_time - start_time
+
+        logger.info('Found [%s] of [%s] ([%s] missing) after [%s] entities at url: %s' % (
+            len(entities), len(created_map), (len(created_map) - len(entities)), elapsed, q_url))
+
+        count_missing = 0
+
+        for entity in entities:
+            entity_map[entity.get('uuid')] = entity
+
+        for uuid, created_entity in created_map.iteritems():
+            if uuid not in entity_map:
+                count_missing += 1
+                logger.info('Missing uuid=[%s] Id=[%s] total missing=[%s]' % (
+                    uuid, created_entity.get('id'), count_missing))
+
+        if count_missing > 0:
+            logger.info('Waiting for indexing, count_missing=[%s] Total time [%s] Sleeping for [%s]s' % (
+                elapsed, count_missing, sleep_time))
+
+            time.sleep(sleep_time)
+
+    stop_time = datetime.datetime.now()
+    logger.info('All entities found after %s' % (stop_time - start_time))
+
+
+def clear(clear_url):
+    logger.info('deleting.... ' + clear_url)
+
+    r = session.delete(clear_url)
+
+    if r.status_code != 200:
+        logger.info('error deleting url=' + clear_url)
+        logger.info(json.dumps(r.json()))
+
+    else:
+        res = r.json()
+        len_entities = len(res.get('entities', []))
+
+        if len_entities > 0:
+            clear(clear_url)
+
+
+def test_cleared(q_url):
+    r = session.get(q_url)
+
+    if r.status_code != 200:
+        logger.info(json.dumps(r.json()))
+    else:
+        res = r.json()
+
+        if len(res.get('entities', [])) != 0:
+            logger.info('DID NOT CLEAR')
+
+
+processes = Pool(32)
+
+
+def test_url(q_url, sleep_time=0.25):
+    test_var = False
+
+    while not test_var:
+        r = session.get(q_url)
+
+        if r.status_code == 200:
+
+            if len(r.json().get('entities')) >= 1:
+                test_var = True
+        else:
+            logger.info('non 200')
+
+        if test_var:
+            logger.info('Test of URL [%s] Passes')
+        else:
+            logger.info('Test of URL [%s] Passes')
+            time.sleep(sleep_time)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Indexing Latency Test')
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of the app to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--base_url',
+                        help='The URL of the Usergrid Instance',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--client_id',
+                        help='The Client ID to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('--client_secret',
+                        help='The Client Secret to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    url_data = {
+        'api_url': config.get('base_url'),
+        'org': config.get('org'),
+        'app': config.get('app'),
+        'collection': '%s-%s' % (socket.gethostname(), datetime.datetime.now().strftime('index-test-%yx%mx%dx%Hx%Mx%S'))
+    }
+
+    config['url'] = url_template.format(**url_data)
+    config['token_url'] = token_url_template.format(**url_data)
+
+
+def main():
+    global config
+
+    # processes = Pool(32)
+
+    config = parse_args()
+
+    init()
+
+    init_logging()
+
+    if config.get('client_id') is not None and config.get('client_secret') is not None:
+        token_request = {
+            'grant_type': 'client_credentials',
+            'client_id': config.get('client_id'),
+            'client_secret': config.get('client_secret')
+        }
+
+        r = session.post(config.get('token_url'), json.dumps(token_request))
+
+        if r.status_code == 200:
+            access_token = r.json().get('access_token')
+            session.headers.update({'Authorization': 'Bearer %s' % access_token})
+        else:
+            logger.critical('unable to get token: %s' % r.text)
+            exit(1)
+
+    try:
+        created_map = test_multiple(999)
+
+        q_url = config.get('url') + "?ql=select * where dataType='entitlements'&limit=1000"
+
+        wait_for_indexing(created_map=created_map,
+                          q_url=q_url,
+                          sleep_time=1)
+
+        delete_q_url = config.get('url') + "?ql=select * where dataType='entitlements'&limit=1000"
+
+        clear(clear_url=delete_q_url)
+
+    except KeyboardInterrupt:
+        processes.terminate()
+
+    processes.terminate()
+
+
+main()

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py b/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py
new file mode 100644
index 0000000..6f193a6
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py
@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+import json
+import logging
+from multiprocessing import Pool
+import datetime
+
+import argparse
+import requests
+import time
+from logging.handlers import RotatingFileHandler
+
+import sys
+
+entity_template = {
+    "id": "replaced",
+    "dataType": "entitlements",
+    "mockData": [
+        {"importDate": "2015-08-25T23:33:57.124Z", "rowsImported": 2},
+        {"role": "line-owner", "route": "/master", "element": "element1", "entitlementId": "entitlement4",
+         "property": "show"},
+        {"role": "line-owner", "route": "/master", "element": "element2", "entitlementId": "entitlement8",
+         "property": "hide"}
+    ],
+    "nullArray1": [None],
+    "nullArray2": [None, None],
+    "nullArray3": [None, None],
+    "nest1": {
+        "nest2": {
+            "nest3": [None, None, 'foo']
+        }
+    }
+}
+
+entity_template = {
+    "type": "customerstatuses",
+    "name": "1234",
+    "created": 1454769737888,
+    "modified": 1454781811473,
+    "address": {
+        "zip": "35873",
+        "city": "m�laga",
+        "street": "3430 calle de bravo murillo",
+        "state": "melilla"
+    },
+    "DOB": "787264244",
+    "email": "bego�a.caballero29@example.com",
+    "firstName": "Bego�a",
+    "lastName": "Caballero",
+    "lastSeenDateTime": 1447737158857,
+    "locationStatus": "Entrance",
+    "loyaltyAccountNumber": "1234",
+    "loyaltyLevel": "basic",
+    "phone": "966-450-469",
+    "profilePictureUrl": "http://api.randomuser.me/portraits/thumb/women/61.jpg",
+    "status": "Entrance",
+    "storeId": 12121
+}
+
+collection_url_template = '{api_url}/{org}/{app}/{collection}'
+query_url_template = '{api_url}/{org}/{app}/{collection}?ql=select * where tag=\'{tag}\''
+entity_url_template = '{api_url}/{org}/{app}/{collection}/{entity_id}'
+token_url_template = '{api_url}/{org}/{app}/token'
+
+config = {}
+
+session = requests.Session()
+
+logger = logging.getLogger('UsergridEntityIndexTest')
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './usergrid_index_test.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=2048576000,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+def test_multiple(number_of_entities, processes):
+    global config
+
+    start = datetime.datetime.now()
+
+    logger.info('Creating %s entities w/ url=%s' % (number_of_entities, config['url']))
+    created_map = {}
+
+    work_items = []
+
+    for x in xrange(1, number_of_entities + 1):
+        entity = entity_template.copy()
+        entity['id'] = str(x)
+        work_items.append((config['url'], entity))
+
+    responses = processes.map(create_entity, work_items)
+
+    for res in responses:
+        if len(res) > 0:
+            created_map[res[0]] = res[1]
+
+    stop = datetime.datetime.now()
+
+    logger.info('Created [%s] entities in %s' % (number_of_entities, (stop - start)))
+
+    return created_map
+
+
+def clear(clear_url):
+    logger.info('deleting.... ' + clear_url)
+
+    r = session.delete(clear_url)
+
+    if r.status_code != 200:
+        logger.info('error deleting url=' + clear_url)
+        logger.info(json.dumps(r.json()))
+
+    else:
+        res = r.json()
+        len_entities = len(res.get('entities', []))
+
+        if len_entities > 0:
+            clear(clear_url)
+
+
+def test_cleared(q_url):
+    r = session.get(q_url)
+
+    if r.status_code != 200:
+        logger.info(json.dumps(r.json()))
+    else:
+        res = r.json()
+
+        if len(res.get('entities', [])) != 0:
+            logger.info('DID NOT CLEAR')
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Indexing Latency Test')
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of the app to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--base_url',
+                        help='The URL of the Usergrid Instance',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--client_id',
+                        help='The Client ID to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('--client_secret',
+                        help='The Client Secret to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    url_data = {
+        'api_url': config.get('base_url'),
+        'org': config.get('org'),
+        'app': config.get('app'),
+        'collection': datetime.datetime.now().strftime('index-test-%yx%mx%dx%Hx%Mx%S')
+    }
+
+    config['url_data'] = url_data
+    config['token_url'] = token_url_template.format(**url_data)
+
+
+def create_entity(name, tag):
+    create_me = entity_template.copy()
+    start_tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+    create_me['tag'] = start_tag
+
+    data = config.get('url_data')
+    url = collection_url_template.format(**data)
+
+    r = session.post(url, data=json.dumps(create_me))
+
+    if r.status_code != 200:
+        logger.critical('unable to create entity: %s' % r.text)
+        return None
+    else:
+        return r.json().get('entities')[0]
+
+
+def update_entity(entity_id, tag):
+    data = {'tag': tag}
+    url = entity_url_template.format(entity_id=entity_id, **config.get('url_data'))
+    r = session.put(url, data=json.dumps(data))
+
+    if r.status_code != 200:
+        logger.critical('unable to update entity!')
+        return False
+    else:
+        return True
+
+
+def wait_for_index(entity_id, tag, wait_time=.25):
+    start = datetime.datetime.now()
+
+    url = query_url_template.format(tag=tag, **config.get('url_data'))
+
+    logger.info('GET %s' % url)
+
+    entities = []
+    elapsed = 0
+
+    while len(entities) <= 0:
+        r = session.get(url)
+
+        if r.status_code != 200:
+            logger.critical('Unable to query, url=[%s]: %s' % (url, r.text))
+            return False
+        else:
+            res = r.json()
+            entities = res.get('entities')
+            last_time = datetime.datetime.now()
+            elapsed = last_time - start
+            logger.info(
+                    'Tag [%s] not applied to [%s] after [%s].  Waiting [%s]...' % (tag, entity_id, elapsed, wait_time))
+            time.sleep(wait_time)
+
+    logger.info('++Tag applied after [%s]!' % elapsed)
+
+
+def test_entity_update():
+    start_tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+    name = datetime.datetime.now().strftime('name-%yx%mx%dx%Hx%Mx%S')
+    entity = create_entity(name, start_tag)
+
+    if entity is None:
+        logger.critical('Entity not created, cannot continue')
+        return
+
+    uuid = entity.get('uuid')
+
+    for x in xrange(0, 10):
+        tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+        logger.info('Testing tag [%s] on entity [%s]' % (tag, name))
+        updated = update_entity(name, tag)
+        if updated: wait_for_index(name, tag)
+
+    for x in xrange(0, 10):
+        tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+        logger.info('Testing tag [%s] on entity [%s]' % (tag, uuid))
+        updated = update_entity(uuid, tag)
+        if updated: wait_for_index(uuid, tag)
+
+
+def main():
+    global config
+
+    processes = Pool(32)
+
+    config = parse_args()
+
+    init()
+
+    init_logging()
+
+    if config.get('client_id') is not None and config.get('client_secret') is not None:
+        token_request = {
+            'grant_type': 'client_credentials',
+            'client_id': config.get('client_id'),
+            'client_secret': config.get('client_secret')
+        }
+
+        r = session.post(config.get('token_url'), json.dumps(token_request))
+
+        if r.status_code == 200:
+            access_token = r.json().get('access_token')
+            session.headers.update({'Authorization': 'Bearer %s' % access_token})
+        else:
+            logger.critical('unable to get token: %s' % r.text)
+            exit(1)
+
+    try:
+        test_entity_update()
+
+    except KeyboardInterrupt:
+        pass
+        processes.terminate()
+
+
+main()

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/iterators/README.md
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/README.md b/utils/usergrid-util-python/usergrid_tools/iterators/README.md
new file mode 100644
index 0000000..cf61d4c
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/README.md
@@ -0,0 +1,8 @@
+simple_iterator
+---------------
+Basis for iterating a collection or all pages of a query and doing something with the data, such as counting or modifying
+
+
+usergrid_cross_region_iterator
+---------------
+used to iterate data and check that it exists in another region
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/iterators/__init__.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/__init__.py b/utils/usergrid-util-python/usergrid_tools/iterators/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py b/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py
new file mode 100644
index 0000000..60cc4a0
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py
@@ -0,0 +1,79 @@
+import logging
+import sys
+import uuid
+from logging.handlers import RotatingFileHandler
+
+import datetime
+from usergrid import UsergridQueryIterator
+
+execution_id = str(uuid.uuid4())
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
+    logging.getLogger('boto').setLevel(logging.ERROR)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+
+    log_formatter = logging.Formatter(
+            fmt='%(asctime)s | ' + execution_id + ' | %(name)s | %(levelname)s | %(message)s',
+            datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    stdout_logger = logging.StreamHandler(sys.stdout)
+    stdout_logger.setFormatter(log_formatter)
+    stdout_logger.setLevel(logging.CRITICAL)
+    root_logger.addHandler(stdout_logger)
+
+    if stdout_enabled:
+        stdout_logger.setLevel(logging.INFO)
+
+    # base log file
+
+    log_dir = './'
+    log_file_name = '%s/usergrid_iterator.log' % log_dir
+
+    # ConcurrentLogHandler
+    rotating_file = RotatingFileHandler(filename=log_file_name,
+                                        mode='a',
+                                        maxBytes=404857600,
+                                        backupCount=0)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+
+
+def main():
+    init_logging()
+
+    logger = logging.getLogger('SimpleIterator')
+
+    if len(sys.argv) <= 1:
+        logger.critical('usage: usergrid_iterator {url}')
+        exit(1)
+
+    url = sys.argv[1]
+    logger.info('Beginning to iterate URL: %s' % url)
+
+    q = UsergridQueryIterator(url)
+
+    counter = 0
+
+    start = datetime.datetime.utcnow()
+    try:
+        for e in q:
+            counter += 1
+            logger.info('Entity # [%s]: name=[%s] uuid=[%s] created=[%s] modified=[%s]' % (counter, e.get('name'), e.get('uuid'), e.get('created'), e.get('modified')))
+
+    except KeyboardInterrupt:
+        logger.critical('KEYBOARD INTERRUPT')
+        pass
+
+    finish = datetime.datetime.utcnow()
+
+    logger.info('final entity count is [%s] in  [%s] for query [%s]' % (counter, (finish-start), url))
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py b/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py
new file mode 100644
index 0000000..35933a2
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py
@@ -0,0 +1,409 @@
+from usergrid import UsergridQuery
+
+__author__ = 'Jeff West @ ApigeeCorporation'
+
+from Queue import Empty
+import argparse
+import json
+import time
+import logging
+import sys
+from multiprocessing import Process, JoinableQueue
+import datetime
+import requests
+import traceback
+from logging.handlers import RotatingFileHandler
+import urllib3
+import urllib3.contrib.pyopenssl
+
+urllib3.disable_warnings()
+urllib3.contrib.pyopenssl.inject_into_urllib3()
+
+
+# This was used to force a sync of C* across the regions.  The idea is to query entities from
+# a region where they exist using QL.  Then, iterate over the results and do a GET by UUID
+# in the region where the entities are 'missing'.
+#
+# In order for this to be successful the readcl in the "GET by UUID" region or target region
+# must be set to 'ALL' - this will force a repair across the cluster.
+#
+# It is recommended to have the target tomcat out of the ELB for a customer.  Ideally,
+# you should spin up another Tomcat, leaving 2+ in the ELB for a given customer.
+
+
+logger = logging.getLogger('UsergridCrossRegionRepair')
+
+token_url_template = "{api_url}/management/token"
+org_management_url_template = "{api_url}/management/organizations/{org}/applications?access_token={access_token}"
+org_url_template = "{api_url}/{org}?access_token={access_token}"
+app_url_template = "{api_url}/{org}/{app}?access_token={access_token}"
+collection_url_template = "{api_url}/{org}/{app}/{collection}?access_token={access_token}"
+collection_query_url_template = "{api_url}/{org}/{app}/{collection}?ql={ql}&access_token={access_token}&limit={limit}"
+get_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?access_token={access_token}&connections=none"
+put_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?access_token={access_token}"
+
+# config can be loaded from a file
+config = {}
+
+# config = {
+#     "regions": {
+#         "us_west": {
+#             "api_url": "http://rut040wo:8080"
+#         },
+#         "us_east": {
+#             "api_url": "http://rut154ea:8080"
+#         },
+#         "eu_west": {
+#             "api_url": "http://localhost:8080"
+#         }
+#     },
+#     "management_region_id": "us_west",
+#     "query_region_id": "us_west",
+#     "get_region_ids": [
+#         "us_east"
+#     ]
+# }
+
+session_map = {}
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './cross-region-repair.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=204857600,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('boto').setLevel(logging.ERROR)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+class Worker(Process):
+    def __init__(self, queue, handler_function):
+        super(Worker, self).__init__()
+        logger.warning('Creating worker!')
+        self.queue = queue
+        self.handler_function = handler_function
+
+    def run(self):
+
+        logger.info('starting run()...')
+        keep_going = True
+
+        count_processed = 0
+        count_error = 0
+
+        while keep_going:
+            empty_count = 0
+
+            try:
+                org, app, collection, entity = self.queue.get(timeout=600)
+                logger.debug('Task: org=[%s] app=[%s] collection=[%s] entity=[%s]' % (org, app, collection, entity))
+
+                if self.handler_function is not None:
+                    processed = self.handler_function(org=org,
+                                                      app=app,
+                                                      collection=collection,
+                                                      entity=entity,
+                                                      counter=count_processed)
+
+                    if processed:
+                        count_processed += 1
+                        logger.info('Processed count=[%s] SUCCESS uuid/name = %s / %s' % (
+                            count_processed, entity.get('uuid'), entity.get('name')))
+                    else:
+                        count_error += 1
+                        logger.error('Processed count=[%s] ERROR uuid/name = %s / %s' % (
+                            count_error, entity.get('uuid'), entity.get('name')))
+
+                self.queue.task_done()
+
+            except KeyboardInterrupt, e:
+                raise e
+
+            except Empty:
+                logger.warning('EMPTY!')
+                empty_count += 1
+                if empty_count > 30:
+                    keep_going = False
+
+        logger.warning('WORKER DONE!')
+
+
+def wait_for(threads, sleep_time=3000):
+    count_alive = 1
+
+    while count_alive > 0:
+        count_alive = 0
+
+        for t in threads:
+
+            if t.is_alive():
+                count_alive += 1
+
+        if count_alive > 0:
+            logger.warning('Waiting for [%s] processes to finish' % count_alive)
+            time.sleep(sleep_time)
+
+
+def parse_args():
+    DEFAULT_WORKERS = 16
+    DEFAULT_TOKEN_TTL = 25200000
+
+    parser = argparse.ArgumentParser(description='Usergrid Cross-Region Repair Script')
+
+    parser.add_argument('-o', '--org',
+                        help='The org to iterate',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='The org to iterate',
+                        action='append',
+                        default=[])
+
+    parser.add_argument('-c', '--collection',
+                        help='The org to iterate',
+                        action='append',
+                        default=[])
+
+    parser.add_argument('-p', '--password',
+                        help='The Password for the token request',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-u', '--username',
+                        help='The Username for the token request',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-w', '--workers',
+                        help='The Password for the token request',
+                        type=int,
+                        default=DEFAULT_WORKERS)
+
+    parser.add_argument('--ttl',
+                        help='The TTL for the token request',
+                        type=int,
+                        default=DEFAULT_TOKEN_TTL)
+
+    parser.add_argument('-l', '--limit',
+                        help='The global limit for QL requests',
+                        type=int,
+                        default=DEFAULT_WORKERS * 3)
+
+    parser.add_argument('-f', '--config',
+                        help='The file from which to load the configuration',
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def get_by_UUID(org, app, collection, entity, counter, attempts=0):
+    response = False
+
+    if attempts >= 10:
+        return False
+
+    for region_id in config.get('get_region_ids', []):
+        url_data = config.get('regions', {}).get(region_id)
+
+        url = get_entity_url_template.format(collection=collection,
+                                             app=app,
+                                             uuid=entity.get('uuid'),
+                                             org=org,
+                                             access_token=config['access_token'],
+                                             **url_data)
+
+        logger.info('GET [%s]: %s' % ('...', url))
+
+        session = session_map[region_id]
+
+        while not response:
+
+            try:
+                r = session.get(url)
+
+                if r.status_code != 200:
+                    logger.error('GET [%s] (%s): %s' % (r.status_code, r.elapsed, url))
+                    logger.warning('Sleeping for 5 on connection retry...')
+
+                    return get_by_UUID(org, app, collection, entity, counter, attempts=attempts + 1)
+
+                else:
+                    logger.info('GET [%s] (%s): %s' % (r.status_code, r.elapsed, url))
+                    response = True
+
+                if counter % 10 == 0:
+                    logger.info('COUNTER=[%s] time=[%s] GET [%s]: %s' % (counter,
+                                                                         r.elapsed,
+                                                                         r.status_code,
+                                                                         url))
+            except:
+                logger.error(traceback.format_exc())
+                logger.error('EXCEPTION on GET [...] (...): %s' % url)
+                response = False
+                logger.warning('Sleeping for 5 on connection retry...')
+                time.sleep(5)
+
+    return response
+
+
+def init(args):
+    global config
+
+    if args.get('config') is not None:
+        config_filename = args.get('config')
+
+        logger.warning('Using config file: %s' % config_filename)
+
+        try:
+            with open(config_filename, 'r') as f:
+                parsed_config = json.load(f)
+                logger.warning('Updating config with: %s' % parsed_config)
+                config.update(parsed_config)
+        except:
+            print traceback.format_exc()
+
+    for region_id, region_data in config.get('regions', {}).iteritems():
+        session_map[region_id] = requests.Session()
+
+
+def main():
+    global config
+
+    args = parse_args()
+    init(args)
+
+    management_region_id = config.get('management_region_id', '')
+    management_region = config.get('regions', {}).get(management_region_id)
+
+    query_region_id = config.get('query_region_id', '')
+    query_region = config.get('regions', {}).get(query_region_id)
+
+    start = datetime.datetime.now()
+
+    queue = JoinableQueue()
+
+    logger.warning('Starting workers...')
+    init_logging()
+
+    token_request = {
+        'grant_type': 'password',
+        'username': args.get('username'),
+        'ttl': args.get('ttl')
+    }
+
+    url = token_url_template.format(**management_region)
+
+    logger.info('getting token with url=[%s] data=[%s]' % (url, token_request))
+
+    token_request['password'] = args.get('password')
+
+    r = requests.post(url, data=json.dumps(token_request))
+
+    if r.status_code != 200:
+        logger.critical('did not get access token! response: %s' % r.json())
+        exit(-1)
+
+    logger.info(r.json())
+
+    config['access_token'] = r.json().get('access_token')
+
+    org_mgmt_url = org_management_url_template.format(org=args.get('org'),
+                                                      access_token=config['access_token'],
+                                                      **management_region)
+    logger.info(org_mgmt_url)
+
+    session = session_map[management_region_id]
+
+    r = session.get(org_mgmt_url)
+    logger.info(r.json())
+    logger.info('starting [%s] workers...' % args.get('workers'))
+    workers = [Worker(queue, get_by_UUID) for x in xrange(args.get('workers'))]
+    [w.start() for w in workers]
+
+    try:
+        org_app_data = r.json().get('data')
+
+        logger.info(org_app_data)
+
+        apps_to_process = config.get('app', [])
+        collections_to_process = config.get('collection', [])
+
+        for org_app, app_uuid in org_app_data.iteritems():
+            parts = org_app.split('/')
+            app = parts[1]
+
+            if len(apps_to_process) > 0 and app not in apps_to_process:
+                logger.info('Skipping app/uuid: %s/%s' % (org_app, app_uuid))
+                continue
+
+            logger.info('app UUID: %s' % app_uuid)
+
+            url = app_url_template.format(app=app,
+                                          org=args.get('org'),
+                                          access_token=config['access_token'],
+                                          **management_region)
+
+            logger.info('GET [...]: %s' % url)
+            session = session_map[management_region_id]
+            r = session.get(url)
+
+            for collection_name in r.json().get('entities', [{}])[0].get('metadata', {}).get('collections', {}):
+
+                if collection_name in ['events']:
+                    continue
+
+                elif len(collections_to_process) > 0 and collection_name not in collections_to_process:
+                    logger.info('skipping collection=%s' % collection_name)
+                    continue
+
+                logger.info('processing collection=%s' % collection_name)
+
+                url = collection_query_url_template.format(ql='select * order by created asc',
+                                                           collection=collection_name,
+                                                           org=args['org'],
+                                                           app=app,
+                                                           limit=args['limit'],
+                                                           access_token=config['access_token'],
+                                                           **query_region)
+
+                q = UsergridQuery(url)
+                counter = 0
+
+                for x, e in enumerate(q):
+                    counter += 1
+                    queue.put((args['org'], app, collection_name, e))
+
+                logger.info('collection=%s, count=%s' % (collection_name, counter))
+
+    except KeyboardInterrupt:
+        [w.terminate() for w in workers]
+
+    logger.warning('Waiting for workers to finish...')
+    wait_for(workers)
+
+    finish = datetime.datetime.now()
+    logger.warning('Done!  Took: %s ' % (finish - start))
+
+
+main()

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/library_check.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/library_check.py b/utils/usergrid-util-python/usergrid_tools/library_check.py
new file mode 100644
index 0000000..f326987
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/library_check.py
@@ -0,0 +1,23 @@
+import traceback
+
+url_data = {
+    "api_url": "https://usergrid-e2e-prod.e2e.apigee.net/appservices-2-1/",
+    "org": "",
+    "app": "",
+    "client_id": "",
+    "client_secret": "",
+
+}
+
+collection_url_template = "{api_url}/{org}/{app}/{collection}"
+
+try:
+    from usergrid import UsergridQueryIterator
+
+    q = UsergridQueryIterator('')
+
+    print 'Check OK'
+
+except Exception, e:
+    print traceback.format_exc(e)
+    print 'Check Failed'

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/migration/README.md
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/migration/README.md b/utils/usergrid-util-python/usergrid_tools/migration/README.md
new file mode 100644
index 0000000..921b0a7
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/migration/README.md
@@ -0,0 +1,234 @@
+# Usergrid Data Migrator
+
+## Prerequisites
+* Python 2 (not python 3)
+
+* Install the Usergrid Python SDK: https://github.com/jwest-apigee/usergrid-python
+
+With Pip (requires python-pip to be installed): `pip install usergrid`
+
+* Install Usergrid Tools
+
+With Pip (requires python-pip to be installed): `pip install usergrid-tools`
+
+
+## Overview
+The purpose of this document is to provide an overview of the Python Script provided in the same directory which allows you to migrate data, connections and users from one Usergrid platform / org / app to another.  This can be used in the upgrade process from Usergrid 1.0 to 2.x since there is no upgrade path.
+
+This script functions by taking source and target endpoint configurations (with credentials) and a set of command-line parameters to read data from one Usergrid instance and write to another.  It is written in Python and requires Python 2.7.6+.
+
+There are multiple processes at work in the migration to speed the process up.  There is a main thread which reads entities from the API and then publishes the entities with metadata into a Python Queue which has multiple worker processes listening for work.  The number of worker threads is configurable by command line parameters.
+
+
+# Process to Migrate Data and Graph (Connections)
+Usergrid is a Graph database and allows for connections between entities.  In order for a connection to be made, both the source entity and the target entity must exist.  Therefore, in order to migrate connections it is adviseable to first migrate all the data and then all the connections associated with that data.
+
+# Concepts
+As with any migration process there is a source and a target.  The source and target have the following parameters:
+
+* API URL: The HTTP[S] URL where the platform can be reached
+* Org: You must specify one org at a time to migrate using this script
+* App: You can optinally specify one or more applications to migrate.  If you specify zero applications then all applications will be migrated
+* Collection: You can optionally specify one or more collections to migrate.  If you specify zero collections then all applications will be migrated
+* QL: You can specify a Query Language predicate to be used.  If none is specified, 'select *' will be used which will migrate all data within a given collection
+* Graph: Graph implies traversal of graph edges which necessarily must exist.  This is an alternative to using query which uses the indexing.  
+
+# Graph Loops
+
+When iterating a graph it is possible to get stuck in a loop.  For example:
+
+```
+A --follows--> B
+B --likes--> C
+C --loves--> A
+```
+
+There are two options to prevent getting stuck in a loop:
+* `graph_depth` option - this will limit the graph depth which will be traversed from a given entity.
+* And/Or Marking nodes and edges as 'visited'.  This requires a place to store this state.  See Using Redis in the next section
+
+# Using Redis 
+
+Redis can be used for the following:
+
+If using Redis, version 2.8+ is needed because TTL is used with the 'ex' parameter.
+
+* Keeping track of the modified date for each entity.  When running the script subsequent times after this, entiites which were not modified will not be copied.
+* Keeping track of visited nodes for migrating a graph.  This is done with a TTL such that a job can be resumed, but since there is no modified date on an edge you cannot know if there are new edges or not.  Therefore, when the TTL expires the nodes will be visited again
+* Keeping track of the URLs for the connections which are created between entities.  This has no TTL.  Subsequent runs will not create connections which are found in Redis which have already been created.
+
+
+# Mapping
+Using this script it is not necessary to keep the same application name, org name and/or collection name as the source at the target.  For example, you could migrate from /myOrg/myApp/myCollection to /org123/app456/collections789.  
+
+
+# Configuration Files
+Example source/target configuration files:
+
+```
+{
+  "endpoint": {
+    "api_url": "https://api.usergrid.com"
+  },
+  "credentials": {
+    "myOrg1": {
+      "client_id": "YXA6lpg9sEaaEeONxG0g3Uz44Q",
+      "client_secret": "ZdF66u2h3Hc7csOcsEtgewmxalB1Ygg"
+    },
+    "myOrg2": {
+      "client_id": "ZXf63p239sDaaEeONSG0g3Uz44Z",
+      "client_secret": "ZdF66u2h3Hc7csOcsEtgewmxajsadfj32"
+    }
+  }
+}
+```
+* api_url: the API URL to access/write data
+* Credentials:
+ * For each org, with the org name (case-sensetive) as the key:
+  * client_id - the org-level Client ID. This can be retrieved from the BaaS/Usergrid Portal.
+  * client_secret - the org-level Client Secret. This can be retrieved from the BaaS/Usergrid Portal.
+
+# Command Line Parameters
+
+```
+Usergrid Org/App Data Migrator
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --log_dir LOG_DIR     path to the place where logs will be written
+  --log_level LOG_LEVEL
+                        log level - DEBUG, INFO, WARN, ERROR, CRITICAL
+  -o ORG, --org ORG     Name of the org to migrate
+  -a APP, --app APP     Name of one or more apps to include, specify none to
+                        include all apps
+  -e INCLUDE_EDGE, --include_edge INCLUDE_EDGE
+                        Name of one or more edges/connection types to INCLUDE,
+                        specify none to include all edges
+  --exclude_edge EXCLUDE_EDGE
+                        Name of one or more edges/connection types to EXCLUDE,
+                        specify none to include all edges
+  --exclude_collection EXCLUDE_COLLECTION
+                        Name of one or more collections to EXCLUDE, specify
+                        none to include all collections
+  -c COLLECTION, --collection COLLECTION
+                        Name of one or more collections to include, specify
+                        none to include all collections
+  --use_name_for_collection USE_NAME_FOR_COLLECTION
+                        Name of one or more collections to use [name] instead
+                        of [uuid] for creating entities and edges
+  -m {data,none,reput,credentials,graph}, --migrate {data,none,reput,credentials,graph}
+                        Specifies what to migrate: data, connections,
+                        credentials, audit or none (just iterate the
+                        apps/collections)
+  -s SOURCE_CONFIG, --source_config SOURCE_CONFIG
+                        The path to the source endpoint/org configuration file
+  -d TARGET_CONFIG, --target_config TARGET_CONFIG
+                        The path to the target endpoint/org configuration file
+  --limit LIMIT         The number of entities to return per query request
+  -w ENTITY_WORKERS, --entity_workers ENTITY_WORKERS
+                        The number of worker processes to do the migration
+  --visit_cache_ttl VISIT_CACHE_TTL
+                        The TTL of the cache of visiting nodes in the graph
+                        for connections
+  --error_retry_sleep ERROR_RETRY_SLEEP
+                        The number of seconds to wait between retrieving after
+                        an error
+  --page_sleep_time PAGE_SLEEP_TIME
+                        The number of seconds to wait between retrieving pages
+                        from the UsergridQueryIterator
+  --entity_sleep_time ENTITY_SLEEP_TIME
+                        The number of seconds to wait between retrieving pages
+                        from the UsergridQueryIterator
+  --collection_workers COLLECTION_WORKERS
+                        The number of worker processes to do the migration
+  --queue_size_max QUEUE_SIZE_MAX
+                        The max size of entities to allow in the queue
+  --graph_depth GRAPH_DEPTH
+                        The graph depth to traverse to copy
+  --queue_watermark_high QUEUE_WATERMARK_HIGH
+                        The point at which publishing to the queue will PAUSE
+                        until it is at or below low watermark
+  --min_modified MIN_MODIFIED
+                        Break when encountering a modified date before this,
+                        per collection
+  --max_modified MAX_MODIFIED
+                        Break when encountering a modified date after this,
+                        per collection
+  --queue_watermark_low QUEUE_WATERMARK_LOW
+                        The point at which publishing to the queue will RESUME
+                        after it has reached the high watermark
+  --ql QL               The QL to use in the filter for reading data from
+                        collections
+  --skip_cache_read     Skip reading the cache (modified timestamps and graph
+                        edges)
+  --skip_cache_write    Skip updating the cache with modified timestamps of
+                        entities and graph edges
+  --create_apps         Create apps at the target if they do not exist
+  --nohup               specifies not to use stdout for logging
+  --graph               Use GRAPH instead of Query
+  --su_username SU_USERNAME
+                        Superuser username
+  --su_password SU_PASSWORD
+                        Superuser Password
+  --inbound_connections
+                        Name of the org to migrate
+  --map_app MAP_APP     Multiple allowed: A colon-separated string such as
+                        'apples:oranges' which indicates to put data from the
+                        app named 'apples' from the source endpoint into app
+                        named 'oranges' in the target endpoint
+  --map_collection MAP_COLLECTION
+                        One or more colon-separated string such as 'cats:dogs'
+                        which indicates to put data from collections named
+                        'cats' from the source endpoint into a collection
+                        named 'dogs' in the target endpoint, applicable
+                        globally to all apps
+  --map_org MAP_ORG     One or more colon-separated strings such as 'red:blue'
+                        which indicates to put data from org named 'red' from
+                        the source endpoint into a collection named 'blue' in
+                        the target endpoint
+```
+
+## Example Command Line
+
+Use the following command to migrate DATA AND GRAPH  (no graph edges or connections between entities).  If there are no graph edges (connections) then using `-m graph` is not necessary.  This will copy all data from all apps in the org 'myorg', creating apps in the target org if they do not already exist.  Note that --create_apps will be required if the Apps in the target org have not been created.
+
+```
+$ usergrid_data_migrator -o myorg -m graph -w 4 -s mySourceConfig.json -d myTargetConfiguration.json  --create_apps
+```
+
+Use the following command to migrate DATA ONLY (no graph edges or connections between entities).  This will copy all data from all apps in the org 'myorg', creating apps in the target org if they do not already exist.  Note that --create_apps will be required if the Apps in the target org have not been created.
+
+```
+$ usergrid_data_migrator -o myorg -m data -w 4 -s mySourceConfig.json -d myTargetConfiguration.json --create_apps
+```
+
+Use the following command to migrate CREDENTIALS for Application-level Users.  Note that `usergrid.sysadmin.login.allowed=true` must be set in the `usergrid-deployment.properties` file on the source and target Tomcat nodes.
+
+```
+$ usergrid_data_migrator -o myorg -m credentails -w 4 -s mySourceConfig.json -d myTargetConfiguration.json --create_apps --su_username foo --su_password bar
+```
+
+This command:
+
+```
+$ usergrid_data_migrator -o myorg -a app1 -a app2 -m data -w 4 --map_app app1:appplication_1 --map_app app2:application_2 --map_collection pets:animals --map_org myorg:my_new_org -s mySourceConfig.json -d myTargetConfiguration.json
+```
+will do the following: 
+
+* migrate Apps named 'app1' and 'app2' in org named 'myorg' from the API endpoint defined in 'mySourceConfig.json' to the API endpoint defined in 'myTargetConfiguration.json'
+* In the process:
+** data from 'myorg' will ge migrated to the org named 'my_new_org'
+** data from 'app1' will be migrated to the app named 'application_1'
+** data from 'app2' will be migrated to the app named 'application_2'
+** all collections named 'pets' will be overridden at the destination to 'animals'
+
+
+# FAQ
+
+### Does the process keep the same UUIDs?
+
+* Yes - with this script the same UUIDs can be kept from the source into the destination.  An exception is if you specify going from one collection to another under the same Org hierarchy.
+
+### Does the process keep the ordering of connections by time?
+
+* Yes ordering of connections is maintained in the process. 

http://git-wip-us.apache.org/repos/asf/usergrid/blob/32f9e55d/utils/usergrid-util-python/usergrid_tools/migration/__init__.py
----------------------------------------------------------------------
diff --git a/utils/usergrid-util-python/usergrid_tools/migration/__init__.py b/utils/usergrid-util-python/usergrid_tools/migration/__init__.py
new file mode 100644
index 0000000..f09d5b5
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/migration/__init__.py
@@ -0,0 +1,2 @@
+import usergrid_data_migrator
+import usergrid_data_exporter
\ No newline at end of file